Example usage for java.util LinkedHashSet addAll

List of usage examples for java.util LinkedHashSet addAll

Introduction

In this page you can find the example usage for java.util LinkedHashSet addAll.

Prototype

boolean addAll(Collection<? extends E> c);

Source Link

Document

Adds all of the elements in the specified collection to this set if they're not already present (optional operation).

Usage

From source file:net.doubledoordev.backend.server.Server.java

public Collection<String> getPossibleJarnames() {
    LinkedHashSet<String> names = new LinkedHashSet<>();
    names.addAll(Arrays.asList(folder.list(ACCEPT_FORGE_FILTER)));
    names.addAll(Arrays.asList(folder.list(ACCEPT_MINECRAFT_SERVER_FILTER)));
    names.addAll(Arrays.asList(folder.list(ACCEPT_ALL_JAR_FILTER)));
    return names;
}

From source file:org.alfresco.bm.dataload.rm.unfiled.ScheduleUnfiledRecordLoaders.java

/**
 * Obtains all unfiled record folders underneath specified parent folder plus the parent folder
 *
 * @param parentFolder - the parent folder that we need to get unfiled record folders from
 * @return all unfiled record folders underneath specified parent folder plus the parent folder
 *///from  w  ww .  j  a v a 2  s  . c o  m
private Set<FolderData> getUnfiledRecordFolders(FolderData parentFolder) {
    LinkedHashSet<FolderData> result = new LinkedHashSet<FolderData>();
    int skip = 0;
    int limit = 100;
    List<FolderData> directChildren = new ArrayList<FolderData>();
    List<FolderData> childFolders = fileFolderService.getChildFolders(UNFILED_CONTEXT, parentFolder.getPath(),
            skip, limit);
    while (childFolders.size() > 0) {
        directChildren.addAll(childFolders);
        skip += limit;
        childFolders = fileFolderService.getChildFolders(UNFILED_CONTEXT, parentFolder.getPath(), skip, limit);
    }
    if (directChildren.size() > 0) {
        for (FolderData childFolder : directChildren) {
            result.addAll(getUnfiledRecordFolders(childFolder));
        }
    }
    result.add(parentFolder);
    return result;
}

From source file:net.hillsdon.reviki.search.impl.LuceneSearcher.java

public Set<SearchMatch> search(final String queryString, final boolean provideExtracts,
        final boolean singleWiki) throws IOException, QuerySyntaxException {
    if (_dir == null || queryString == null || queryString.trim().length() == 0) {
        return Collections.emptySet();
    }/*  w ww.  ja v  a 2s .c  om*/
    return doReadOperation(new ReadOperation<Set<SearchMatch>>() {
        public Set<SearchMatch> execute(final IndexReader reader, final Searcher searcher,
                final Analyzer analyzer) throws IOException, ParseException {
            LinkedHashSet<SearchMatch> results = new LinkedHashSet<SearchMatch>();
            // Prefer path, then title then content matches (match equality is on page name)
            for (String field : ALL_SEARCH_FIELDS) {
                if (field.equals(FIELD_PATH_LOWER)) {
                    final Query query = new WildcardQuery(
                            new Term(FIELD_PATH_LOWER, "*" + queryString.toLowerCase() + "*"));
                    results.addAll(doQuery(reader, analyzer, searcher, field, provideExtracts, query));
                } else {
                    results.addAll(query(reader, analyzer, searcher, field, queryString, provideExtracts));
                }
            }
            return orderResults(results);
        }
    }, !singleWiki);
}

From source file:org.jahia.services.content.PublicationInfo.java

private void getAllReferences(LinkedHashSet<PublicationInfo> uuids, LinkedList<PublicationInfoNode> nodes,
        Set<PublicationInfoNode> processedNodes) {
    nodes.add(root);//from  www  .  j  a v  a  2  s. com
    processedNodes.add(root);

    PublicationInfoNode node = nodes.poll();
    while (node != null) {
        for (PublicationInfoNode infoNode : node.getChildren()) {
            if (!processedNodes.contains(infoNode)) {
                nodes.add(infoNode);
                processedNodes.add(infoNode);
            }
        }
        for (PublicationInfo refInfo : node.getReferences()) {
            if (!processedNodes.contains(refInfo.getRoot())) {
                refInfo.getAllReferences(uuids, nodes, processedNodes);
            }
        }
        uuids.addAll(node.getReferences());
        node = nodes.poll();
    }
}

From source file:com.datatorrent.stram.StramClient.java

/**
 * Launch application for the dag represented by this client.
 *
 * @throws YarnException/*  www  . j a  va2 s.  c  om*/
 * @throws IOException
 */
public void startApplication() throws YarnException, IOException {
    Class<?>[] defaultClasses;

    if (applicationType.equals(YARN_APPLICATION_TYPE)) {
        //TODO restrict the security check to only check if security is enabled for webservices.
        if (UserGroupInformation.isSecurityEnabled()) {
            defaultClasses = DATATORRENT_SECURITY_CLASSES;
        } else {
            defaultClasses = DATATORRENT_CLASSES;
        }
    } else {
        throw new IllegalStateException(applicationType + " is not a valid application type.");
    }

    LinkedHashSet<String> localJarFiles = findJars(dag, defaultClasses);

    if (resources != null) {
        localJarFiles.addAll(resources);
    }

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    //GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class);
    //GetClusterNodesResponse clusterNodesResp = rmClient.clientRM.getClusterNodes(clusterNodesReq);
    //LOG.info("Got Cluster node info from ASM");
    //for (NodeReport node : clusterNodesResp.getNodeReports()) {
    //  LOG.info("Got node report from ASM for"
    //           + ", nodeId=" + node.getNodeId()
    //           + ", nodeAddress" + node.getHttpAddress()
    //           + ", nodeRackName" + node.getRackName()
    //           + ", nodeNumContainers" + node.getNumContainers()
    //           + ", nodeHealthStatus" + node.getHealthReport());
    //}
    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication newApp = yarnClient.createApplication();
    appId = newApp.getNewApplicationResponse().getApplicationId();

    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = newApp.getNewApplicationResponse().getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);
    int amMemory = dag.getMasterMemoryMB();
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    if (dag.getAttributes().get(LogicalPlan.APPLICATION_ID) == null) {
        dag.setAttribute(LogicalPlan.APPLICATION_ID, appId.toString());
    }

    // Create launch context for app master
    LOG.info("Setting up application submission context for ASM");
    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);

    // set the application id
    appContext.setApplicationId(appId);
    // set the application name
    appContext.setApplicationName(dag.getValue(LogicalPlan.APPLICATION_NAME));
    appContext.setApplicationType(this.applicationType);
    if (YARN_APPLICATION_TYPE.equals(this.applicationType)) {
        //appContext.setMaxAppAttempts(1); // no retries until Stram is HA
    }

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // Setup security tokens
    // If security is enabled get ResourceManager and NameNode delegation tokens.
    // Set these tokens on the container so that they are sent as part of application submission.
    // This also sets them up for renewal by ResourceManager. The NameNode delegation rmToken
    // is also used by ResourceManager to fetch the jars from HDFS and set them up for the
    // application master launch.
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
        try {
            final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
            if (tokens != null) {
                for (Token<?> token : tokens) {
                    LOG.info("Got dt for " + fs.getUri() + "; " + token);
                }
            }
        } finally {
            fs.close();
        }

        new ClientRMHelper(yarnClient, conf).addRMDelegationToken(tokenRenewer, credentials);

        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // copy required jar files to dfs, to be localized for containers
    FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
    try {
        Path appsBasePath = new Path(StramClientUtils.getDTDFSRootDir(fs, conf), StramClientUtils.SUBDIR_APPS);
        Path appPath;
        String configuredAppPath = dag.getValue(LogicalPlan.APPLICATION_PATH);
        if (configuredAppPath == null) {
            appPath = new Path(appsBasePath, appId.toString());
        } else {
            appPath = new Path(configuredAppPath);
        }
        String libJarsCsv = copyFromLocal(fs, appPath, localJarFiles.toArray(new String[] {}));

        LOG.info("libjars: {}", libJarsCsv);
        dag.getAttributes().put(LogicalPlan.LIBRARY_JARS, libJarsCsv);
        LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, libJarsCsv, localResources,
                fs);

        if (archives != null) {
            String[] localFiles = archives.split(",");
            String archivesCsv = copyFromLocal(fs, appPath, localFiles);
            LOG.info("archives: {}", archivesCsv);
            dag.getAttributes().put(LogicalPlan.ARCHIVES, archivesCsv);
            LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.ARCHIVE, archivesCsv,
                    localResources, fs);
        }

        if (files != null) {
            String[] localFiles = files.split(",");
            String filesCsv = copyFromLocal(fs, appPath, localFiles);
            LOG.info("files: {}", filesCsv);
            dag.getAttributes().put(LogicalPlan.FILES, filesCsv);
            LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, filesCsv, localResources,
                    fs);
        }

        dag.getAttributes().put(LogicalPlan.APPLICATION_PATH, appPath.toString());
        StorageAgent agent = dag.getAttributes().get(OperatorContext.STORAGE_AGENT);
        if (agent != null && agent instanceof StorageAgent.ApplicationAwareStorageAgent) {
            ((StorageAgent.ApplicationAwareStorageAgent) agent).setApplicationAttributes(dag.getAttributes());
        }

        if (dag.getAttributes()
                .get(OperatorContext.STORAGE_AGENT) == null) { /* which would be the most likely case */
            Path checkpointPath = new Path(appPath, LogicalPlan.SUBDIR_CHECKPOINTS);
            // use conf client side to pickup any proxy settings from dt-site.xml
            dag.setAttribute(OperatorContext.STORAGE_AGENT,
                    new AsyncFSStorageAgent(checkpointPath.toString(), conf));
        }

        if (dag.getAttributes().get(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR) == null) {
            dag.setAttribute(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR, new BasicContainerOptConfigurator());
        }

        // Set the log4j properties if needed
        if (!log4jPropFile.isEmpty()) {
            Path log4jSrc = new Path(log4jPropFile);
            Path log4jDst = new Path(appPath, "log4j.props");
            fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
            FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
            LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
            log4jRsrc.setType(LocalResourceType.FILE);
            log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
            log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
            log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
            log4jRsrc.setSize(log4jFileStatus.getLen());
            localResources.put("log4j.properties", log4jRsrc);
        }

        if (originalAppId != null) {
            Path origAppPath = new Path(appsBasePath, this.originalAppId);
            LOG.info("Restart from {}", origAppPath);
            copyInitialState(origAppPath);
        }

        // push logical plan to DFS location
        Path cfgDst = new Path(appPath, LogicalPlan.SER_FILE_NAME);
        FSDataOutputStream outStream = fs.create(cfgDst, true);
        LogicalPlan.write(this.dag, outStream);
        outStream.close();

        Path launchConfigDst = new Path(appPath, LogicalPlan.LAUNCH_CONFIG_FILE_NAME);
        outStream = fs.create(launchConfigDst, true);
        conf.writeXml(outStream);
        outStream.close();
        LaunchContainerRunnable.addFileToLocalResources(LogicalPlan.SER_FILE_NAME, fs.getFileStatus(cfgDst),
                LocalResourceType.FILE, localResources);

        // Set local resource info into app master container launch context
        amContainer.setLocalResources(localResources);

        // Set the necessary security tokens as needed
        //amContainer.setContainerTokens(containerToken);
        // Set the env variables to be setup in the env where the application master will be run
        LOG.info("Set the environment for the application master");
        Map<String, String> env = new HashMap<String, String>();

        // Add application jar(s) location to classpath
        // At some point we should not be required to add
        // the hadoop specific classpaths to the env.
        // It should be provided out of the box.
        // For now setting all required classpaths including
        // the classpath to "." for the application jar(s)
        // including ${CLASSPATH} will duplicate the class path in app master, removing it for now
        //StringBuilder classPathEnv = new StringBuilder("${CLASSPATH}:./*");
        StringBuilder classPathEnv = new StringBuilder("./*");
        String classpath = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH);
        for (String c : StringUtils.isBlank(classpath) ? YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH
                : classpath.split(",")) {
            if (c.equals("$HADOOP_CLIENT_CONF_DIR")) {
                // SPOI-2501
                continue;
            }
            classPathEnv.append(':');
            classPathEnv.append(c.trim());
        }
        env.put("CLASSPATH", classPathEnv.toString());
        // propagate to replace node managers user name (effective in non-secure mode)
        env.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName());

        amContainer.setEnvironment(env);

        // Set the necessary command to execute the application master
        ArrayList<CharSequence> vargs = new ArrayList<CharSequence>(30);

        // Set java executable command
        LOG.info("Setting up app master command");
        vargs.add(javaCmd);
        if (dag.isDebug()) {
            vargs.add("-agentlib:jdwp=transport=dt_socket,server=y,suspend=n");
        }
        // Set Xmx based on am memory size
        // default heap size 75% of total memory
        if (dag.getMasterJVMOptions() != null) {
            vargs.add(dag.getMasterJVMOptions());
        }
        Path tmpDir = new Path(ApplicationConstants.Environment.PWD.$(),
                YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR);
        vargs.add("-Djava.io.tmpdir=" + tmpDir);
        vargs.add("-Xmx" + (amMemory * 3 / 4) + "m");
        vargs.add("-XX:+HeapDumpOnOutOfMemoryError");
        vargs.add("-XX:HeapDumpPath=" + System.getProperty("java.io.tmpdir") + "/dt-heap-" + appId.getId()
                + ".bin");
        vargs.add("-Dhadoop.root.logger=" + (dag.isDebug() ? "DEBUG" : "INFO") + ",RFA");
        vargs.add("-Dhadoop.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
        vargs.add(String.format("-D%s=%s", StreamingContainer.PROP_APP_PATH, dag.assertAppPath()));
        if (dag.isDebug()) {
            vargs.add("-Dlog4j.debug=true");
        }

        String loggersLevel = conf.get(DTLoggerFactory.DT_LOGGERS_LEVEL);
        if (loggersLevel != null) {
            vargs.add(String.format("-D%s=%s", DTLoggerFactory.DT_LOGGERS_LEVEL, loggersLevel));
        }
        vargs.add(StreamingAppMaster.class.getName());
        vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
        vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

        // Get final command
        StringBuilder command = new StringBuilder(9 * vargs.size());
        for (CharSequence str : vargs) {
            command.append(str).append(" ");
        }

        LOG.info("Completed setting up app master command " + command.toString());
        List<String> commands = new ArrayList<String>();
        commands.add(command.toString());
        amContainer.setCommands(commands);

        // Set up resource type requirements
        // For now, only memory is supported so we set memory requirements
        Resource capability = Records.newRecord(Resource.class);
        capability.setMemory(amMemory);
        appContext.setResource(capability);

        // Service data is a binary blob that can be passed to the application
        // Not needed in this scenario
        // amContainer.setServiceData(serviceData);
        appContext.setAMContainerSpec(amContainer);

        // Set the priority for the application master
        Priority pri = Records.newRecord(Priority.class);
        pri.setPriority(amPriority);
        appContext.setPriority(pri);
        // Set the queue to which this application is to be submitted in the RM
        appContext.setQueue(queueName);

        // Submit the application to the applications manager
        // SubmitApplicationResponse submitResp = rmClient.submitApplication(appRequest);
        // Ignore the response as either a valid response object is returned on success
        // or an exception thrown to denote some form of a failure
        String specStr = Objects.toStringHelper("Submitting application: ")
                .add("name", appContext.getApplicationName()).add("queue", appContext.getQueue())
                .add("user", UserGroupInformation.getLoginUser()).add("resource", appContext.getResource())
                .toString();
        LOG.info(specStr);
        if (dag.isDebug()) {
            //LOG.info("Full submission context: " + appContext);
        }
        yarnClient.submitApplication(appContext);
    } finally {
        fs.close();
    }
}

From source file:org.apache.geronimo.mavenplugins.car.AbstractCarMojo.java

protected LinkedHashSet<DependencyType> toDependencies(List<Dependency> explicitDependencies,
        UseMavenDependencies useMavenDependencies, boolean includeImport)
        throws InvalidDependencyVersionException, ArtifactResolutionException, ProjectBuildingException,
        MojoExecutionException {/*  w  w  w. jav  a 2s .  c  o  m*/
    List<DependencyType> dependencyTypes = new ArrayList<DependencyType>();
    for (Dependency dependency : explicitDependencies) {
        dependencyTypes.add(dependency.toDependencyType());
    }
    LinkedHashSet<DependencyType> dependencies = new LinkedHashSet<DependencyType>();

    if (useMavenDependencies == null || !useMavenDependencies.isValue()) {
        dependencies.addAll(dependencyTypes);
        localDependencies = new HashSet<Artifact>();
        for (DependencyType dependency : dependencies) {
            localDependencies.add(geronimoToMavenArtifact(dependency.toArtifact()));
        }
    } else {
        Map<String, DependencyType> explicitDependencyMap = new HashMap<String, DependencyType>();
        for (DependencyType dependency : dependencyTypes) {
            explicitDependencyMap.put(getKey(dependency), dependency);
        }

        getDependencies(project, useMavenDependencies.isUseTransitiveDependencies());
        for (Artifact entry : localDependencies) {
            dependencies.add(toDependencyType(entry, explicitDependencyMap,
                    useMavenDependencies.isIncludeVersion(), includeImport));
        }
    }

    return dependencies;
}

From source file:org.openmrs.web.taglib.FormatTag.java

/**
 * Filters a list of encounter providers according to the global property 
 * which determines providers in which encounter roles to display.
 *
 * @param eps the encounter providers to filter.
 * @return the filtered encounter providers.
 *//*from   w w w. jav  a 2s  .c  o m*/
private Set<Provider> getDisplayEncounterProviders(Map<EncounterRole, Set<Provider>> encounterProviders) {
    String encounterRoles = Context.getAdministrationService()
            .getGlobalProperty(OpenmrsConstants.GP_DASHBOARD_PROVIDER_DISPLAY_ENCOUNTER_ROLES, null);

    if (StringUtils.isEmpty(encounterRoles)) {

        //we do not filter if user has not yet set the global property.
        LinkedHashSet<Provider> allProviders = new LinkedHashSet<Provider>();

        for (Set<Provider> providers : encounterProviders.values()) {
            allProviders.addAll(providers);
        }

        return allProviders;
    }

    return filterProviders(encounterProviders, trimStringArray(encounterRoles.split(",")));
}

From source file:org.sonatype.plugin.nexus.testenvironment.AbstractEnvironmentMojo.java

@SuppressWarnings("unchecked")
private Collection<Artifact> getNonTransitivePlugins(Set<Artifact> projectArtifacts)
        throws MojoExecutionException {
    Collection<Artifact> deps = new LinkedHashSet<Artifact>();

    for (Artifact artifact : projectArtifacts) {
        Artifact pomArtifact = artifactFactory.createArtifact(artifact.getGroupId(), artifact.getArtifactId(),
                artifact.getVersion(), artifact.getClassifier(), "pom");
        Set<Artifact> result;
        try {//ww  w. ja v  a 2s.  c o  m
            MavenProject pomProject = mavenProjectBuilder.buildFromRepository(pomArtifact, remoteRepositories,
                    localRepository);

            Set<Artifact> artifacts = pomProject.createArtifacts(artifactFactory, null, null);
            artifacts = filterOutSystemDependencies(artifacts);
            ArtifactResolutionResult arr = resolver.resolveTransitively(artifacts, pomArtifact, localRepository,
                    remoteRepositories, artifactMetadataSource, null);
            result = arr.getArtifacts();
        } catch (Exception e) {
            throw new MojoExecutionException("Failed to resolve non-transitive deps " + e.getMessage(), e);
        }

        LinkedHashSet<Artifact> plugins = new LinkedHashSet<Artifact>();
        plugins.addAll(filtterArtifacts(result, getFilters(null, null, "nexus-plugin", null)));
        plugins.addAll(filtterArtifacts(result, getFilters(null, null, "zip", "bundle")));

        plugins.addAll(getNonTransitivePlugins(plugins));

        if (!plugins.isEmpty()) {
            getLog().debug("Adding non-transitive dependencies for: " + artifact + " -\n"
                    + plugins.toString().replace(',', '\n'));
        }

        deps.addAll(plugins);
    }

    return deps;
}

From source file:org.apache.ws.scout.registry.BusinessQueryManagerImpl.java

/**
 * Gets the RegistryObjects owned by the caller. The objects
 * are returned as their concrete type (e.g. Organization, User etc.)
 *
 *  TODO - need to figure out what the set are.  This is just to get some
 *  basic functionality/*from w  ww  . j a  v a2 s. co  m*/
 *
 * @return BulkResponse
 * @throws JAXRException
 */
public BulkResponse getRegistryObjects() throws JAXRException {
    String types[] = { LifeCycleManager.ORGANIZATION, LifeCycleManager.SERVICE };

    LinkedHashSet<Object> c = new LinkedHashSet<Object>();

    for (int i = 0; i < types.length; i++) {
        try {
            BulkResponse bk = getRegistryObjects(types[i]);

            if (bk.getCollection() != null) {
                c.addAll(bk.getCollection());
            }
        } catch (JAXRException e) {
            log.debug("ignore - just a problem with that type? " + e.getMessage(), e);
        }
    }

    return new BulkResponseImpl(c);
}

From source file:org.alfresco.bm.dataload.rm.fileplan.ScheduleRecordLoaders.java

/**
 * Helper method that initialize the record folders that can receive loaded unfiled records.
 * This method, also calculates the number of records to  add to the initialized record folders.
 *///from   w w  w . ja v a  2s  .c  o  m
private void calculateListOfEmptyFolders() {
    if (mapOfRecordsPerRecordFolder == null) {
        mapOfRecordsPerRecordFolder = new LinkedHashMap<FolderData, Integer>();
        List<FolderData> recordFoldersThatNeedRecords = new ArrayList<FolderData>();
        if (paths == null || paths.size() == 0) {
            // get the existing file plan folder structure
            recordFoldersThatNeedRecords.addAll(initialiseFoldersToExistingStructure(RECORD_FOLDER_CONTEXT));
        } else {
            LinkedHashSet<FolderData> structureFromExistentProvidedPaths = new LinkedHashSet<FolderData>();
            for (String path : paths) {
                if (!path.startsWith("/")) {
                    path = "/" + path;
                }
                //if the path is category and exists
                FolderData folder = fileFolderService.getFolder(RECORD_CATEGORY_CONTEXT,
                        RECORD_CONTAINER_PATH + path);
                if (folder == null)//if folder is not a category verify if it is a record folder and exists
                {
                    folder = fileFolderService.getFolder(RECORD_FOLDER_CONTEXT, RECORD_CONTAINER_PATH + path);
                }
                if (folder != null)// if folder exists
                {
                    structureFromExistentProvidedPaths.addAll(getRecordFolders(folder));
                } else {
                    try {
                        folder = createFolder(path);
                        recordFoldersThatNeedRecords.add(folder);
                    } catch (Exception e) {
                        // something went wrong on creating current path structure, not all required paths will be created
                    }
                }
            }
            // add record folders from existent paths
            if (structureFromExistentProvidedPaths.size() > 0) {
                recordFoldersThatNeedRecords.addAll(structureFromExistentProvidedPaths);
            }
            // configured paths did not existed in db and something went wrong with creation for all of them,
            // initialize to existing structure in this case
            if (recordFoldersThatNeedRecords.size() == 0) {
                recordFoldersThatNeedRecords
                        .addAll(initialiseFoldersToExistingStructure(RECORD_FOLDER_CONTEXT));
            }
        }
        if (recordFoldersThatNeedRecords.size() > 0) {
            mapOfRecordsPerRecordFolder = distributeNumberOfRecords(recordFoldersThatNeedRecords,
                    recordsNumber);
        }
    }
}