Example usage for java.util LinkedHashSet toArray

List of usage examples for java.util LinkedHashSet toArray

Introduction

In this page you can find the example usage for java.util LinkedHashSet toArray.

Prototype

<T> T[] toArray(T[] a);

Source Link

Document

Returns an array containing all of the elements in this set; the runtime type of the returned array is that of the specified array.

Usage

From source file:org.apache.tajo.engine.planner.LogicalPlanner.java

public TableSubQueryNode visitTableSubQuery(PlanContext context, Stack<Expr> stack, TablePrimarySubQuery expr)
        throws PlanningException {
    QueryBlock block = context.queryBlock;

    QueryBlock childBlock = context.plan.getBlock(context.plan.getBlockNameByExpr(expr.getSubQuery()));
    PlanContext newContext = new PlanContext(context, childBlock);
    LogicalNode child = visit(newContext, new Stack<Expr>(), expr.getSubQuery());
    TableSubQueryNode subQueryNode = context.queryBlock.getNodeFromExpr(expr);
    context.plan.connectBlocks(childBlock, context.queryBlock, BlockType.TableSubQuery);
    subQueryNode.setSubQuery(child);/*  w ww . ja va 2 s .  c  o m*/

    // Add additional expressions required in upper nodes.
    Set<String> newlyEvaluatedExprs = TUtil.newHashSet();
    for (NamedExpr rawTarget : block.namedExprsMgr.getAllNamedExprs()) {
        try {
            EvalNode evalNode = exprAnnotator.createEvalNode(context, rawTarget.getExpr(),
                    NameResolvingMode.RELS_ONLY);
            if (checkIfBeEvaluatedAtRelation(block, evalNode, subQueryNode)) {
                block.namedExprsMgr.markAsEvaluated(rawTarget.getAlias(), evalNode);
                newlyEvaluatedExprs.add(rawTarget.getAlias()); // newly added exr
            }
        } catch (VerifyException ve) {
        }
    }

    // Assume that each unique expr is evaluated once.
    LinkedHashSet<Target> targets = createFieldTargetsFromRelation(block, subQueryNode, newlyEvaluatedExprs);

    for (String newAddedExpr : newlyEvaluatedExprs) {
        targets.add(block.namedExprsMgr.getTarget(newAddedExpr, true));
    }

    subQueryNode.setTargets(targets.toArray(new Target[targets.size()]));

    return subQueryNode;
}

From source file:org.apache.tajo.engine.planner.rewrite.ProjectionPushDownRule.java

public LogicalNode visitJoin(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block, JoinNode node,
        Stack<LogicalNode> stack) throws PlanningException {
    Context newContext = new Context(context);

    String joinQualReference = null;
    if (node.hasJoinQual()) {
        for (EvalNode eachQual : AlgebraicUtil.toConjunctiveNormalFormArray(node.getJoinQual())) {
            if (eachQual instanceof BinaryEval) {
                BinaryEval binaryQual = (BinaryEval) eachQual;

                for (int i = 0; i < 2; i++) {
                    EvalNode term = binaryQual.getChild(i);
                    pushDownIfComplexTermInJoinCondition(newContext, eachQual, term);
                }/*from   w w w . j a v a  2  s.  c o m*/
            }
        }

        joinQualReference = newContext.addExpr(node.getJoinQual());
        newContext.addNecessaryReferences(node.getJoinQual());
    }

    String[] referenceNames = null;
    if (node.hasTargets()) {
        referenceNames = new String[node.getTargets().length];
        int i = 0;
        for (Iterator<Target> it = getFilteredTarget(node.getTargets(), context.requiredSet); it.hasNext();) {
            Target target = it.next();
            referenceNames[i++] = newContext.addExpr(target);
        }
    }

    stack.push(node);
    LogicalNode left = visit(newContext, plan, block, node.getLeftChild(), stack);
    LogicalNode right = visit(newContext, plan, block, node.getRightChild(), stack);
    stack.pop();

    Schema merged = SchemaUtil.merge(left.getOutSchema(), right.getOutSchema());

    node.setInSchema(merged);

    if (node.hasJoinQual()) {
        Target target = context.targetListMgr.getTarget(joinQualReference);
        if (newContext.targetListMgr.isEvaluated(joinQualReference)) {
            throw new PlanningException(
                    "Join condition must be evaluated in the proper Join Node: " + joinQualReference);
        } else {
            node.setJoinQual(target.getEvalTree());
            newContext.targetListMgr.markAsEvaluated(target);
        }
    }

    LinkedHashSet<Target> projectedTargets = Sets.newLinkedHashSet();
    for (Iterator<String> it = getFilteredReferences(context.targetListMgr.getNames(), context.requiredSet); it
            .hasNext();) {
        String referenceName = it.next();
        Target target = context.targetListMgr.getTarget(referenceName);

        if (context.targetListMgr.isEvaluated(referenceName)) {
            Target fieldReference = new Target(new FieldEval(target.getNamedColumn()));
            if (LogicalPlanner.checkIfBeEvaluatedAtJoin(block, fieldReference.getEvalTree(), node,
                    stack.peek().getType() != NodeType.JOIN)) {
                projectedTargets.add(fieldReference);
            }
        } else if (LogicalPlanner.checkIfBeEvaluatedAtJoin(block, target.getEvalTree(), node,
                stack.peek().getType() != NodeType.JOIN)) {
            projectedTargets.add(target);
            context.targetListMgr.markAsEvaluated(target);
        }
    }

    node.setTargets(projectedTargets.toArray(new Target[projectedTargets.size()]));
    LogicalPlanner.verifyProjectedFields(block, node);
    return node;
}

From source file:org.apache.tajo.engine.planner.rewrite.ProjectionPushDownRule.java

public LogicalNode visitScan(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block, ScanNode node,
        Stack<LogicalNode> stack) throws PlanningException {

    Context newContext = new Context(context);

    Target[] targets;/*  w  w w .j  av  a  2s . c  o  m*/
    if (node.hasTargets()) {
        targets = node.getTargets();
    } else {
        targets = PlannerUtil.schemaToTargets(node.getTableSchema());
    }

    LinkedHashSet<Target> projectedTargets = Sets.newLinkedHashSet();
    for (Iterator<Target> it = getFilteredTarget(targets, newContext.requiredSet); it.hasNext();) {
        Target target = it.next();
        newContext.addExpr(target);
    }

    for (Iterator<Target> it = context.targetListMgr.getFilteredTargets(newContext.requiredSet); it
            .hasNext();) {
        Target target = it.next();

        if (LogicalPlanner.checkIfBeEvaluatedAtRelation(block, target.getEvalTree(), node)) {
            projectedTargets.add(target);
            newContext.targetListMgr.markAsEvaluated(target);
        }
    }

    node.setTargets(projectedTargets.toArray(new Target[projectedTargets.size()]));
    LogicalPlanner.verifyProjectedFields(block, node);
    return node;
}

From source file:org.apache.tajo.engine.planner.rewrite.ProjectionPushDownRule.java

@Override
public LogicalNode visitPartitionedTableScan(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block,
        PartitionedTableScanNode node, Stack<LogicalNode> stack) throws PlanningException {

    Context newContext = new Context(context);

    Target[] targets;/* ww  w  . ja v a2  s  . com*/
    if (node.hasTargets()) {
        targets = node.getTargets();
    } else {
        targets = PlannerUtil.schemaToTargets(node.getOutSchema());
    }

    LinkedHashSet<Target> projectedTargets = Sets.newLinkedHashSet();
    for (Iterator<Target> it = getFilteredTarget(targets, newContext.requiredSet); it.hasNext();) {
        Target target = it.next();
        newContext.addExpr(target);
    }

    for (Iterator<Target> it = context.targetListMgr.getFilteredTargets(newContext.requiredSet); it
            .hasNext();) {
        Target target = it.next();

        if (LogicalPlanner.checkIfBeEvaluatedAtRelation(block, target.getEvalTree(), node)) {
            projectedTargets.add(target);
            newContext.targetListMgr.markAsEvaluated(target);
        }
    }

    node.setTargets(projectedTargets.toArray(new Target[projectedTargets.size()]));
    LogicalPlanner.verifyProjectedFields(block, node);
    return node;
}

From source file:org.apache.tajo.engine.planner.rewrite.ProjectionPushDownRule.java

@Override
public LogicalNode visitTableSubQuery(Context upperContext, LogicalPlan plan, LogicalPlan.QueryBlock block,
        TableSubQueryNode node, Stack<LogicalNode> stack) throws PlanningException {
    Context childContext = new Context(plan, upperContext.requiredSet);
    stack.push(node);/*from  w ww.ja  v  a  2 s.  c o  m*/
    LogicalNode child = super.visitTableSubQuery(childContext, plan, block, node, stack);
    node.setSubQuery(child);
    stack.pop();

    Target[] targets;
    if (node.hasTargets()) {
        targets = node.getTargets();
    } else {
        targets = PlannerUtil.schemaToTargets(node.getOutSchema());
    }

    LinkedHashSet<Target> projectedTargets = Sets.newLinkedHashSet();
    for (Iterator<Target> it = getFilteredTarget(targets, upperContext.requiredSet); it.hasNext();) {
        Target target = it.next();
        upperContext.addExpr(target);
    }

    for (Iterator<Target> it = upperContext.targetListMgr.getFilteredTargets(upperContext.requiredSet); it
            .hasNext();) {
        Target target = it.next();

        if (LogicalPlanner.checkIfBeEvaluatedAtRelation(block, target.getEvalTree(), node)) {
            projectedTargets.add(target);
            upperContext.targetListMgr.markAsEvaluated(target);
        }
    }

    node.setTargets(projectedTargets.toArray(new Target[projectedTargets.size()]));
    LogicalPlanner.verifyProjectedFields(block, node);
    return node;
}

From source file:org.apache.tajo.plan.LogicalPlanner.java

@Override
public ScanNode visitRelation(PlanContext context, Stack<Expr> stack, Relation expr) throws TajoException {
    QueryBlock block = context.queryBlock;

    ScanNode scanNode = block.getNodeFromExpr(expr);
    updatePhysicalInfo(context, scanNode.getTableDesc());

    // Find expression which can be evaluated at this relation node.
    // Except for column references, additional expressions used in select list, where clause, order-by clauses
    // can be evaluated here. Their reference names are kept in newlyEvaluatedExprsRef.
    Set<String> newlyEvaluatedExprsReferences = new LinkedHashSet<String>();
    for (Iterator<NamedExpr> iterator = block.namedExprsMgr.getIteratorForUnevaluatedExprs(); iterator
            .hasNext();) {/*from  www  .j  a  v  a  2 s  . co m*/
        NamedExpr rawTarget = iterator.next();
        try {
            EvalNode evalNode = exprAnnotator.createEvalNode(context, rawTarget.getExpr(),
                    NameResolvingMode.RELS_ONLY);
            if (checkIfBeEvaluatedAtRelation(block, evalNode, scanNode)) {
                block.namedExprsMgr.markAsEvaluated(rawTarget.getAlias(), evalNode);
                newlyEvaluatedExprsReferences.add(rawTarget.getAlias()); // newly added exr
            }
        } catch (UndefinedColumnException ve) {
        }
    }

    // Assume that each unique expr is evaluated once.
    LinkedHashSet<Target> targets = createFieldTargetsFromRelation(block, scanNode,
            newlyEvaluatedExprsReferences);

    // The fact the some expr is included in newlyEvaluatedExprsReferences means that it is already evaluated.
    // So, we get a raw expression and then creates a target.
    for (String reference : newlyEvaluatedExprsReferences) {
        NamedExpr refrer = block.namedExprsMgr.getNamedExpr(reference);
        EvalNode evalNode = exprAnnotator.createEvalNode(context, refrer.getExpr(),
                NameResolvingMode.RELS_ONLY);
        targets.add(new Target(evalNode, reference));
    }

    scanNode.setTargets(targets.toArray(new Target[targets.size()]));

    verifyProjectedFields(block, scanNode);
    return scanNode;
}

From source file:org.apache.tajo.plan.LogicalPlanner.java

private void setTargetOfTableSubQuery(PlanContext context, QueryBlock block, TableSubQueryNode subQueryNode)
        throws TajoException {
    // Add additional expressions required in upper nodes.
    Set<String> newlyEvaluatedExprs = TUtil.newHashSet();
    for (NamedExpr rawTarget : block.namedExprsMgr.getAllNamedExprs()) {
        try {/* w  w  w  . j a v  a2s  .c o m*/
            EvalNode evalNode = exprAnnotator.createEvalNode(context, rawTarget.getExpr(),
                    NameResolvingMode.RELS_ONLY);
            if (checkIfBeEvaluatedAtRelation(block, evalNode, subQueryNode)) {
                block.namedExprsMgr.markAsEvaluated(rawTarget.getAlias(), evalNode);
                newlyEvaluatedExprs.add(rawTarget.getAlias()); // newly added exr
            }
        } catch (UndefinedColumnException ve) {
        }
    }

    // Assume that each unique expr is evaluated once.
    LinkedHashSet<Target> targets = createFieldTargetsFromRelation(block, subQueryNode, newlyEvaluatedExprs);

    for (String newAddedExpr : newlyEvaluatedExprs) {
        targets.add(block.namedExprsMgr.getTarget(newAddedExpr, true));
    }

    subQueryNode.setTargets(targets.toArray(new Target[targets.size()]));
}

From source file:org.codehaus.mojo.webminifier.WebMinifierMojo.java

/**
 * Main entry point for the MOJO.//from   w w  w  . jav a  2  s  .co  m
 * 
 * @throws MojoExecutionException if there's a problem in the normal course of execution.
 * @throws MojoFailureException if there's a problem with the MOJO itself.
 */
public void execute() throws MojoExecutionException, MojoFailureException {
    // Start off by copying all files over. We'll ultimately remove the js files that we don't need from there, and
    // create new ones in there (same goes for css files and anything else we minify).

    FileUtils.deleteQuietly(destinationFolder);
    try {
        FileUtils.copyDirectory(sourceFolder, destinationFolder);
    } catch (IOException e) {
        throw new MojoExecutionException("Cannot copy file to target folder", e);
    }

    // Process each HTML source file and concatenate into unminified output scripts
    int minifiedCounter = 0;

    // If a split point already exists on disk then we've been through the minification process. As
    // minification can be expensive, we would like to avoid performing it multiple times. Thus storing
    // a set of what we've previously minified enables us.
    Set<File> existingConcatenatedJsResources = new HashSet<File>();
    Set<File> consumedJsResources = new HashSet<File>();

    for (String targetHTMLFile : getArrayOfTargetHTMLFiles()) {
        File targetHTML = new File(destinationFolder, targetHTMLFile);

        // Parse HTML file and locate SCRIPT elements
        DocumentResourceReplacer replacer;
        try {
            replacer = new DocumentResourceReplacer(targetHTML);
        } catch (SAXException e) {
            throw new MojoExecutionException("Problem reading html document", e);
        } catch (IOException e) {
            throw new MojoExecutionException("Problem opening html document", e);
        }

        List<File> jsResources = replacer.findJSResources();

        if (jsSplitPoints == null) {
            jsSplitPoints = new Properties();
        }

        File concatenatedJsResource = null;

        URI destinationFolderUri = destinationFolder.toURI();

        // Split the js resources into two lists: one containing all external dependencies, the other containing
        // project sources. We do this so that project sources can be minified without the dependencies (libraries
        // generally don't need to distribute the dependencies).
        int jsDependencyProjectResourcesIndex;

        if (splitDependencies) {
            List<File> jsDependencyResources = new ArrayList<File>(jsResources.size());
            List<File> jsProjectResources = new ArrayList<File>(jsResources.size());
            for (File jsResource : jsResources) {
                String jsResourceUri = destinationFolderUri.relativize(jsResource.toURI()).toString();
                File jsResourceFile = new File(projectSourceFolder, jsResourceUri);
                if (jsResourceFile.exists()) {
                    jsProjectResources.add(jsResource);
                } else {
                    jsDependencyResources.add(jsResource);
                }
            }

            // Re-constitute the js resource list from dependency resources + project resources and note the index
            // in the list that represents the start of project sources in the list. We need this information later.
            jsDependencyProjectResourcesIndex = jsDependencyResources.size();

            jsResources = jsDependencyResources;
            jsResources.addAll(jsProjectResources);
        } else {
            jsDependencyProjectResourcesIndex = 0;
        }

        // Walk backwards through the script declarations and note what files will map to what split point.
        Map<File, File> jsResourceTargetFiles = new LinkedHashMap<File, File>(jsResources.size());
        ListIterator<File> jsResourcesIter = jsResources.listIterator(jsResources.size());

        boolean splittingDependencies = false;

        while (jsResourcesIter.hasPrevious()) {
            int jsResourceIterIndex = jsResourcesIter.previousIndex();
            File jsResource = jsResourcesIter.previous();

            String candidateSplitPointNameUri = destinationFolderUri.relativize(jsResource.toURI()).toString();
            String splitPointName = (String) jsSplitPoints.get(candidateSplitPointNameUri);

            // If we do not have a split point name and the resource is a dependency of this project i.e. it is not
            // within our src/main folder then we give it a split name of "dependencies". Factoring out dependencies
            // into their own split point is a useful thing to do and will always be required when building
            // libraries.
            if (splitDependencies && splitPointName == null && !splittingDependencies) {
                if (jsResourceIterIndex < jsDependencyProjectResourcesIndex) {
                    splitPointName = Integer.valueOf(++minifiedCounter).toString();
                    splittingDependencies = true;
                }
            }

            // If we have no name and we've not been in here before, then assign an initial name based on a number.
            if (splitPointName == null && concatenatedJsResource == null) {
                splitPointName = Integer.valueOf(++minifiedCounter).toString();
            }

            // We have a new split name so use it for this file and upwards in the script statements until we
            // either hit another split point or there are no more script statements.
            if (splitPointName != null) {
                concatenatedJsResource = new File(destinationFolder, splitPointName + ".js");

                // Note that we've previously created this.
                if (concatenatedJsResource.exists()) {
                    existingConcatenatedJsResources.add(concatenatedJsResource);
                }
            }

            jsResourceTargetFiles.put(jsResource, concatenatedJsResource);
        }

        for (File jsResource : jsResources) {
            concatenatedJsResource = jsResourceTargetFiles.get(jsResource);
            if (!existingConcatenatedJsResources.contains(concatenatedJsResource)) {
                // Concatenate input file onto output resource file
                try {
                    concatenateFile(jsResource, concatenatedJsResource);
                } catch (IOException e) {
                    throw new MojoExecutionException("Problem concatenating JS files", e);
                }

                // Finally, remove the JS resource from the target folder as it is no longer required (we've
                // concatenated it).
                consumedJsResources.add(jsResource);
            }
        }

        // Reduce the list of js resource target files to a distinct set
        LinkedHashSet<File> concatenatedJsResourcesSet = new LinkedHashSet<File>(
                jsResourceTargetFiles.values());
        File[] concatenatedJsResourcesArray = new File[concatenatedJsResourcesSet.size()];
        concatenatedJsResourcesSet.toArray(concatenatedJsResourcesArray);
        List<File> concatenatedJsResources = Arrays.asList(concatenatedJsResourcesArray);

        // Minify the concatenated JS resource files

        if (jsCompressorType != JsCompressorType.NONE) {
            List<File> minifiedJSResources = new ArrayList<File>(concatenatedJsResources.size());

            ListIterator<File> concatenatedJsResourcesIter = concatenatedJsResources
                    .listIterator(concatenatedJsResources.size());
            while (concatenatedJsResourcesIter.hasPrevious()) {
                concatenatedJsResource = concatenatedJsResourcesIter.previous();

                File minifiedJSResource;
                try {
                    String uri = concatenatedJsResource.toURI().toString();
                    int i = uri.lastIndexOf(".js");
                    String minUri;
                    if (i > -1) {
                        minUri = uri.substring(0, i) + "-min.js";
                    } else {
                        minUri = uri;
                    }
                    minifiedJSResource = FileUtils.toFile(new URL(minUri));
                } catch (MalformedURLException e) {
                    throw new MojoExecutionException("Problem determining file URL", e);
                }

                minifiedJSResources.add(minifiedJSResource);

                // If we've not actually performed the minification before... then do so. This is the expensive bit
                // so we like to avoid it if we can.
                if (!existingConcatenatedJsResources.contains(concatenatedJsResource)) {
                    boolean warningsFound;
                    try {
                        warningsFound = minifyJSFile(concatenatedJsResource, minifiedJSResource);
                    } catch (IOException e) {
                        throw new MojoExecutionException("Problem reading/writing JS", e);
                    }

                    logCompressionRatio(minifiedJSResource.getName(), concatenatedJsResource.length(),
                            minifiedJSResource.length());

                    // If there were warnings then the user may want to manually invoke the compressor for further
                    // investigation.
                    if (warningsFound) {
                        getLog().warn("Warnings were found. " + concatenatedJsResource
                                + " is available for your further investigations.");
                    }
                }
            }

            // Update source references
            replacer.replaceJSResources(destinationFolder, targetHTML, minifiedJSResources);
        } else {
            List<File> unminifiedJSResources = new ArrayList<File>(concatenatedJsResources.size());

            ListIterator<File> concatenatedJsResourcesIter = concatenatedJsResources
                    .listIterator(concatenatedJsResources.size());
            while (concatenatedJsResourcesIter.hasPrevious()) {
                concatenatedJsResource = concatenatedJsResourcesIter.previous();
                unminifiedJSResources.add(concatenatedJsResource);
            }

            replacer.replaceJSResources(destinationFolder, targetHTML, unminifiedJSResources);
            getLog().info("Concatenated resources with no compression");
        }

        // Write HTML file to output dir
        try {
            replacer.writeHTML(targetHTML, encoding);
        } catch (TransformerException e) {
            throw new MojoExecutionException("Problem transforming html", e);
        } catch (IOException e) {
            throw new MojoExecutionException("Problem writing html", e);
        }

    }

    // Clean up including the destination folder recursively where directories have nothing left in them.
    for (File consumedJsResource : consumedJsResources) {
        consumedJsResource.delete();
    }
    removeEmptyFolders(destinationFolder);
}

From source file:org.fusesource.meshkeeper.distribution.remoting.AbstractRemotingClient.java

private final <T> T exportInternal(T obj, String multicastAddress, Class<?>... serviceInterfaces)
        throws Exception {
    LinkedHashSet<Class<?>> interfaces = new LinkedHashSet<Class<?>>();
    if (serviceInterfaces == null || serviceInterfaces.length == 0) {
        collectDistributableInterfaces(obj.getClass(), interfaces);
    } else {/*from  w w w . j  a v  a  2 s .c om*/
        for (Class<?> serviceInterface : serviceInterfaces) {
            validateInterface(serviceInterface);
            interfaces.add(serviceInterface);
        }
    }

    //If the only interfaces is the Distributable interface itself, then we're
    //just trying to export the class:
    if (interfaces.size() == 0 || (interfaces.size() == 1 && interfaces.contains(Distributable.class))) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Exporting " + obj.getClass() + " with no service interfaces");
        }
        return (T) exportInterfaces(obj, multicastAddress, (Class<?>[]) null);
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("Exporting " + obj.getClass() + " as: " + interfaces);
    }

    Class<?>[] distributable = null;
    //System.out.println("Found distributable interfaces for: " + obj + ": " + interfaces);
    distributable = new Class<?>[interfaces.size()];
    interfaces.toArray(distributable);
    return (T) exportInterfaces(obj, multicastAddress, distributable);
}

From source file:org.mskcc.cbio.importer.converter.internal.ConverterImpl.java

/**
 * Generates case lists for the given portal.
 *
  * @param portal String/*from   w  ww.  j  a  va2s.c  o  m*/
 * @throws Exception
 */
@Override
public void generateCaseLists(String portal) throws Exception {

    if (LOG.isInfoEnabled()) {
        LOG.info("generateCaseLists()");
    }

    // check args
    if (portal == null) {
        throw new IllegalArgumentException("portal must not be null");
    }

    // get portal metadata
    PortalMetadata portalMetadata = config.getPortalMetadata(portal).iterator().next();
    if (portalMetadata == null) {
        if (LOG.isInfoEnabled()) {
            LOG.info("convertData(), cannot find PortalMetadata, returning");
        }
        return;
    }

    // get CaseListMetadata
    Collection<CaseListMetadata> caseListMetadatas = config.getCaseListMetadata(Config.ALL);

    // iterate over all cancer studies
    for (CancerStudyMetadata cancerStudyMetadata : config.getCancerStudyMetadata(portalMetadata.getName())) {
        // iterate over case lists
        for (CaseListMetadata caseListMetadata : caseListMetadatas) {
            if (LOG.isInfoEnabled()) {
                LOG.info("generateCaseLists(), processing cancer study: " + cancerStudyMetadata
                        + ", case list: " + caseListMetadata.getCaseListFilename());
            }
            // how many staging files are we working with?
            String[] stagingFilenames = null;
            // setup union/intersection bools
            boolean unionCaseList = caseListMetadata.getStagingFilenames()
                    .contains(CaseListMetadata.CASE_LIST_UNION_DELIMITER);
            boolean intersectionCaseList = caseListMetadata.getStagingFilenames()
                    .contains(CaseListMetadata.CASE_LIST_INTERSECTION_DELIMITER);
            // union (like all cases)
            if (unionCaseList) {
                stagingFilenames = caseListMetadata.getStagingFilenames()
                        .split("\\" + CaseListMetadata.CASE_LIST_UNION_DELIMITER);
            }
            // intersection (like complete or cna-seq)
            else if (intersectionCaseList) {
                stagingFilenames = caseListMetadata.getStagingFilenames()
                        .split("\\" + CaseListMetadata.CASE_LIST_INTERSECTION_DELIMITER);
            }
            // just a single staging file
            else {
                stagingFilenames = new String[] { caseListMetadata.getStagingFilenames() };
            }
            if (LOG.isInfoEnabled()) {
                LOG.info("generateCaseLists(), stagingFilenames: "
                        + java.util.Arrays.toString(stagingFilenames));
            }
            // this is the set we will pass to writeCaseListFile
            LinkedHashSet<String> caseSet = new LinkedHashSet<String>();
            // this indicates the number of staging files processed -
            // used to verify that an intersection should be written
            int numStagingFilesProcessed = 0;
            for (String stagingFilename : stagingFilenames) {
                if (LOG.isInfoEnabled()) {
                    LOG.info("generateCaseLists(), processing stagingFile: " + stagingFilename);
                }
                // compute the case set
                List<String> caseList = fileUtils.getCaseListFromStagingFile(caseIDs, portalMetadata,
                        cancerStudyMetadata, stagingFilename);
                // we may not have this datatype in study
                if (caseList.size() == 0) {
                    if (LOG.isInfoEnabled()) {
                        LOG.info("generateCaseLists(), stagingFileHeader is empty: " + stagingFilename
                                + ", skipping...");
                    }
                    continue;
                }
                // intersection 
                if (intersectionCaseList) {
                    if (caseSet.isEmpty()) {
                        caseSet.addAll(caseList);
                    } else {
                        caseSet.retainAll(caseList);
                    }
                }
                // otherwise union or single staging (treat the same)
                else {
                    caseSet.addAll(caseList);
                }
                ++numStagingFilesProcessed;
            }
            // write the case list file (don't make empty case lists)
            if (caseSet.size() > 0) {
                if (LOG.isInfoEnabled()) {
                    LOG.info("generateCaseLists(), calling writeCaseListFile()...");
                }
                // do not write out complete cases file unless we've processed all the files required
                if (intersectionCaseList && (numStagingFilesProcessed != stagingFilenames.length)) {
                    if (LOG.isInfoEnabled()) {
                        LOG.info(
                                "generateCaseLists(), number of staging files processed != number staging files required for cases_complete.txt, skipping call to writeCaseListFile()...");
                    }
                    continue;
                }
                fileUtils.writeCaseListFile(portalMetadata, cancerStudyMetadata, caseListMetadata,
                        caseSet.toArray(new String[0]));
            } else if (LOG.isInfoEnabled()) {
                LOG.info("generateCaseLists(), caseSet.size() <= 0, skipping call to writeCaseListFile()...");
            }
            // if union, write out the cancer study metadata file
            if (caseSet.size() > 0 && caseListMetadata.getCaseListFilename().equals(ALL_CASES_FILENAME)) {
                if (LOG.isInfoEnabled()) {
                    LOG.info(
                            "generateCaseLists(), processed all cases list, we can now update cancerStudyMetadata file()...");
                }
                fileUtils.writeCancerStudyMetadataFile(portalMetadata, cancerStudyMetadata, caseSet.size());
            }
        }
    }

}