Example usage for java.util Set clear

List of usage examples for java.util Set clear

Introduction

In this page you can find the example usage for java.util Set clear.

Prototype

void clear();

Source Link

Document

Removes all of the elements from this set (optional operation).

Usage

From source file:org.apache.flex.compiler.clients.ASC.java

/**
 * Compile one source file. Each source file has its own symbol table.
 * //w  w w  .  j a va2 s . c  o m
 * @param workspace workspace
 * @param sourceFilename source filename
 * @throws InterruptedException compiler thread error
 * @return true compiled without problem
 */
private boolean compileSourceFiles(final Workspace workspace, final List<String> sourceFilenames)
        throws InterruptedException {
    boolean success = true;
    long startTime = System.nanoTime();
    int problemCount = 0;

    //  Set up a problem query object to check the result of the compilation.
    //  Some problems found aren't ever relevant to ASC, and some depend on 
    //  the switches passed on the command line.
    problemQuery = new ProblemQuery();
    problemQuery.setShowProblemByClass(MultipleExternallyVisibleDefinitionsProblem.class, false);
    problemQuery.setShowProblemByClass(UnfoundPropertyProblem.class, false);
    problemQuery.setShowStrictSemantics(useStaticSemantics());
    problemQuery.setShowWarnings(getShowWarnings());

    // process source AS3 files
    Set<ICompilationUnit> mainUnits = new LinkedHashSet<ICompilationUnit>(getSourceFilenames().size());
    final HashMap<ICompilationUnit, Integer> unitOrdering = new HashMap<ICompilationUnit, Integer>();

    ASCProject applicationProject = createProject(workspace, problemQuery);

    // Add any problems from parsing config vars supplied on the command line
    List<ICompilerProblem> configProblems = new ArrayList<ICompilerProblem>();
    applicationProject.collectProblems(configProblems);
    problemQuery.addAll(configProblems);

    int i = 0;
    for (final String sourceFilename : sourceFilenames) {
        // If we are not merging then create a new project
        // and set the compilation units.
        if (i > 0 && !getMergeABCs()) {
            applicationProject = createProject(workspace, problemQuery);
            mainUnits.clear();
            unitOrdering.clear();
            problemQuery.clear();
        }

        final IFileSpecification sourceFileSpec = new FileSpecification(sourceFilename);
        workspace.fileAdded(sourceFileSpec);
        final ICompilationUnit cu = ASCompilationUnit.createMainCompilationUnitForASC(applicationProject,
                sourceFileSpec, this);
        mainUnits.add(cu);
        unitOrdering.put(cu, unitOrdering.size());

        // add compilation unit to project
        applicationProject.addCompilationUnit(cu);
        applicationProject.updatePublicAndInternalDefinitions(Collections.singletonList(cu));

        // The logic that re-parses a garbage collected syntax tree, does not
        // know about the files included with the -in option, so we'll pin
        // the syntax tree here so we know we will never need to re-parse the
        // the synax tree for the root compilation unit.
        rootedSyntaxTrees.add(cu.getSyntaxTreeRequest().get().getAST());

        // syntax errors
        for (final ICompilationUnit compilationUnit : applicationProject.getCompilationUnits()) {
            final ICompilerProblem[] problems = compilationUnit.getSyntaxTreeRequest().get().getProblems();
            problemQuery.addAll(problems);
        }

        //  Parse trees
        if (getShowParseTrees()) {
            final String outputSyntaxFilename = FilenameUtils.removeExtension(sourceFilename).concat(".p");
            try {
                PrintWriter syntaxFile = new PrintWriter(outputSyntaxFilename);
                final IASNode ast = cu.getSyntaxTreeRequest().get().getAST();
                if (ast instanceof FileNode) {
                    // Parse the full tree and add the new problems found in the
                    // function bodies into the problem collection.
                    final FileNode fileNode = (FileNode) ast;
                    final ImmutableSet<ICompilerProblem> skeletonProblems = ImmutableSet
                            .copyOf(fileNode.getProblems());
                    fileNode.populateFunctionNodes();
                    final ImmutableSet<ICompilerProblem> allProblems = ImmutableSet
                            .copyOf(fileNode.getProblems());

                    // Only add newly found problems. Otherwise, there will be
                    // duplicates in "problemQuery".
                    final SetView<ICompilerProblem> difference = Sets.difference(skeletonProblems, allProblems);
                    problemQuery.addAll(difference);
                }

                syntaxFile.println(ast);
                syntaxFile.flush();
                syntaxFile.close();
            } catch (FileNotFoundException e) {
                problemQuery.add(new FileWriteProblem(e));
            }
        }

        // output
        // For the merged case, wait until the last source file.
        // For the non-merged case, make each source file individually
        if (!getMergeABCs() || (getMergeABCs() && (i == sourceFilenames.size() - 1))) {

            // Let's start up all the compilation units to try and get more threads generating code
            // at the same time.
            for (final ICompilationUnit compilationUnit : applicationProject.getCompilationUnits()) {
                compilationUnit.startBuildAsync(TargetType.SWF);
            }

            //  Run the resolveRefs() logic for as long as it's relevant.
            for (final ICompilationUnit compilationUnit : applicationProject.getCompilationUnits()) {
                final ICompilerProblem[] problems = compilationUnit.getOutgoingDependenciesRequest().get()
                        .getProblems();
                problemQuery.addAll(problems);
            }

            String outputFileBaseName = FilenameUtils.getBaseName(sourceFilename);
            String outputDirectoryName = FilenameUtils.getFullPath(sourceFilename);

            // Apply user specified basename and output directory. The
            // basename is only changed ABCs are merged since each abc
            // needs a unique filename.
            if (getMergeABCs() && getOutputBasename() != null)
                outputFileBaseName = getOutputBasename();

            final String specifiedOutputDirectory = getOutputDirectory();
            if (!Strings.isNullOrEmpty(specifiedOutputDirectory))
                outputDirectoryName = normalizeDirectoryName(specifiedOutputDirectory);

            // Output to either a SWF or ABC file.
            if (isGenerateSWF()) {
                final boolean swfBuilt = generateSWF(outputDirectoryName, outputFileBaseName,
                        applicationProject, mainUnits, sourceFilename, problemQuery, startTime);
                if (!swfBuilt)
                    success = false;
            } else {
                Collection<ICompilationUnit> units = mainUnits;
                if (getMergeABCs()) {
                    // Run the topological sort to figure out which order to output the ABCs in
                    // Resorts to using commandline order rather than a filename based lexical sort in
                    // cases where there are no real dependencies between the scripts
                    units = applicationProject.getDependencyGraph().topologicalSort(mainUnits,
                            new Comparator<ICompilationUnit>() {
                                @Override
                                public int compare(ICompilationUnit o1, ICompilationUnit o2) {
                                    return (unitOrdering.containsKey(o2) ? unitOrdering.get(o2) : 0)
                                            - (unitOrdering.containsKey(o1) ? unitOrdering.get(o1) : 0);
                                }
                            });
                    Collection<ICompilationUnit> sourceUnits = new ArrayList<ICompilationUnit>(
                            mainUnits.size());
                    for (ICompilationUnit unit : units) {
                        // The dependency graph will put all CompilationUnits in the results, but
                        // we only want the CUs for the source files, since the imports should not be merged
                        // into the resulting ABC
                        if (mainUnits.contains(unit)) {
                            sourceUnits.add(unit);
                        }
                    }
                    units = sourceUnits;
                }
                final boolean abcBuilt = generateABCFile(outputDirectoryName, outputFileBaseName,
                        applicationProject, units, sourceFilename, problemQuery, startTime);
                if (!abcBuilt)
                    success = false;
            }

            //*************************************
            // Report problems.
            //

            // let's make a categorizer, so we can differentiate errors and warnings
            CompilerProblemCategorizer compilerProblemCategorizer = new CompilerProblemCategorizer();
            problemFormatter = new WorkspaceProblemFormatter(workspace, compilerProblemCategorizer);
            ProblemPrinter printer = new ProblemPrinter(problemFormatter, err);
            problemCount += printer.printProblems(problemQuery.getFilteredProblems());

            startTime = System.nanoTime();
        }
        i++;
    }

    // If there were problems, print out the summary
    if (problemCount > 0) {
        Collection<ICompilerProblem> errors = new ArrayList<ICompilerProblem>();
        Collection<ICompilerProblem> warnings = new ArrayList<ICompilerProblem>();
        problemQuery.getErrorsAndWarnings(errors, warnings);

        int errorCount = errors.size();
        int warningCount = warnings.size();

        if (errorCount == 1) {
            err.println();
            err.println("1 error found");
        } else if (errorCount > 1) {
            err.println();
            err.println(errorCount + " errors found");
        }

        if (warningCount == 1) {
            err.println();
            err.println("1 warning found");
        } else if (warningCount > 1) {
            err.println();
            err.println(warningCount + " warnings found");
        }

        if (success && (errorCount > 0)) {
            success = false;
        }
    }

    return success;
}

From source file:com.netflix.nicobar.core.module.ScriptModuleLoaderTest.java

@Test
public void testReloadWithUpdatedDepdencies() throws Exception {
    // original graph: A->B->C->D
    long originalCreateTime = 1000;
    Set<ScriptArchive> updateArchives = new HashSet<ScriptArchive>();
    updateArchives.add(new TestDependecyScriptArchive(new ScriptModuleSpec.Builder("A")
            .addCompilerPluginId("mockPlugin").addModuleDependency("B").build(), originalCreateTime));
    updateArchives.add(new TestDependecyScriptArchive(new ScriptModuleSpec.Builder("B")
            .addCompilerPluginId("mockPlugin").addModuleDependency("C").build(), originalCreateTime));
    updateArchives.add(new TestDependecyScriptArchive(new ScriptModuleSpec.Builder("C")
            .addCompilerPluginId("mockPlugin").addModuleDependency("D").build(), originalCreateTime));
    updateArchives.add(new TestDependecyScriptArchive(
            new ScriptModuleSpec.Builder("D").addCompilerPluginId("mockPlugin").build(), originalCreateTime));

    ScriptModuleListener mockListener = createMockListener();
    ScriptModuleLoader moduleLoader = new ScriptModuleLoader.Builder().addListener(mockListener)
            .addPluginSpec(new ScriptCompilerPluginSpec.Builder("mockPlugin")
                    .withPluginClassName(MockScriptCompilerPlugin.class.getName()).build())
            .build();/*from  ww w.j  a  v  a 2 s . c o m*/

    when(MOCK_COMPILER.shouldCompile(Mockito.any(ScriptArchive.class))).thenReturn(true);
    when(MOCK_COMPILER.compile(Mockito.any(ScriptArchive.class), Mockito.any(JBossModuleClassLoader.class),
            Mockito.any(Path.class))).thenReturn(Collections.<Class<?>>emptySet());
    moduleLoader.updateScriptArchives(updateArchives);

    // validate that they were compiled in reverse dependency order
    InOrder orderVerifier = inOrder(mockListener);
    orderVerifier.verify(mockListener).moduleUpdated(moduleEquals("D", originalCreateTime),
            (ScriptModule) Mockito.isNull());
    orderVerifier.verify(mockListener).moduleUpdated(moduleEquals("C", originalCreateTime),
            (ScriptModule) Mockito.isNull());
    orderVerifier.verify(mockListener).moduleUpdated(moduleEquals("B", originalCreateTime),
            (ScriptModule) Mockito.isNull());
    orderVerifier.verify(mockListener).moduleUpdated(moduleEquals("A", originalCreateTime),
            (ScriptModule) Mockito.isNull());
    orderVerifier.verifyNoMoreInteractions();

    // updated graph: D->C->B->A
    updateArchives.clear();
    long updatedCreateTime = 2000;
    updateArchives.add(new TestDependecyScriptArchive(new ScriptModuleSpec.Builder("D")
            .addCompilerPluginId("mockPlugin").addModuleDependency("C").build(), updatedCreateTime));
    updateArchives.add(new TestDependecyScriptArchive(new ScriptModuleSpec.Builder("C")
            .addCompilerPluginId("mockPlugin").addModuleDependency("B").build(), updatedCreateTime));
    updateArchives.add(new TestDependecyScriptArchive(new ScriptModuleSpec.Builder("B")
            .addCompilerPluginId("mockPlugin").addModuleDependency("A").build(), updatedCreateTime));
    updateArchives.add(new TestDependecyScriptArchive(
            new ScriptModuleSpec.Builder("A").addCompilerPluginId("mockPlugin").build(), updatedCreateTime));

    moduleLoader.updateScriptArchives(updateArchives);

    // validate that they were compiled in the updated reverse dependency order
    orderVerifier = inOrder(mockListener);
    orderVerifier.verify(mockListener).moduleUpdated(moduleEquals("A", updatedCreateTime),
            moduleEquals("A", originalCreateTime));
    orderVerifier.verify(mockListener).moduleUpdated(moduleEquals("B", updatedCreateTime),
            moduleEquals("B", originalCreateTime));
    orderVerifier.verify(mockListener).moduleUpdated(moduleEquals("C", updatedCreateTime),
            moduleEquals("C", originalCreateTime));
    orderVerifier.verify(mockListener).moduleUpdated(moduleEquals("D", updatedCreateTime),
            moduleEquals("D", originalCreateTime));
    orderVerifier.verifyNoMoreInteractions();

    // validate the post-condition of the module database
    assertEquals(moduleLoader.getScriptModule("A").getCreateTime(), updatedCreateTime);
    assertEquals(moduleLoader.getScriptModule("B").getCreateTime(), updatedCreateTime);
    assertEquals(moduleLoader.getScriptModule("C").getCreateTime(), updatedCreateTime);
    assertEquals(moduleLoader.getScriptModule("D").getCreateTime(), updatedCreateTime);
    assertEquals(moduleLoader.getAllScriptModules().size(), 4);
}

From source file:de.erdesignerng.visual.jgraph.JGraphEditor.java

private List<Set<Table>> buildHierarchy(Model aModel) {
    // Try to build a hierarchy
    List<Set<Table>> theLayers = new ArrayList<>();
    Set<Table> theCurrentLayer = new HashSet<>();
    Set<Table> theAlreadyKnown = new HashSet<>();
    for (Table theTable : aModel.getTables()) {
        boolean isTopLevel = true;
        List<Relation> theRelations = aModel.getRelations().getExportedKeysFor(theTable);
        if (theRelations.size() == 0) {
            isTopLevel = true;//w  w  w.ja v a2 s.c  o m
        } else {
            for (Relation theRelation : theRelations) {
                if (theRelation.getImportingTable() != theTable) {
                    isTopLevel = false;
                }
            }
        }
        if (isTopLevel) {
            theCurrentLayer.add(theTable);
            theAlreadyKnown.add(theTable);
        }
    }

    // Top Level components
    theLayers.add(theCurrentLayer);

    Set<Table> theTablesToSearch = new HashSet<>();
    theTablesToSearch.addAll(theCurrentLayer);
    while (theTablesToSearch.size() > 0) {
        theCurrentLayer = new HashSet<>();
        for (Table theTable : theTablesToSearch) {
            for (Relation theRelation : aModel.getRelations().getForeignKeysFor(theTable)) {
                if (theRelation.getExportingTable() != theTable
                        && !theAlreadyKnown.contains(theRelation.getExportingTable())) {
                    theCurrentLayer.add(theRelation.getExportingTable());
                    theAlreadyKnown.add(theRelation.getExportingTable());
                }
            }
        }
        if (theCurrentLayer.size() > 0) {

            Set<Table> theTablesToRemove = new HashSet<>();

            for (Table theTable : theCurrentLayer) {
                boolean isUsedInSameLayer = false;
                for (Relation theRelation : aModel.getRelations().getExportedKeysFor(theTable)) {
                    if (theRelation.getImportingTable() != theTable
                            && theCurrentLayer.contains(theRelation.getImportingTable())) {
                        isUsedInSameLayer = true;
                    }
                }
                if (isUsedInSameLayer) {
                    theTablesToRemove.add(theTable);
                }
            }

            theCurrentLayer.removeAll(theTablesToRemove);
            theAlreadyKnown.removeAll(theTablesToRemove);

            theLayers.add(theCurrentLayer);
            theTablesToSearch = theCurrentLayer;
        } else {
            theTablesToSearch.clear();
        }
    }
    return theLayers;
}

From source file:org.apache.sqoop.mapreduce.CombineFileInputFormat.java

/**
 * Return all the splits in the specified set of paths
 */// w  w  w.j  a va 2  s.  c  o m
private void getMoreSplits(JobContext job, Path[] paths, long maxSize, long minSizeNode, long minSizeRack,
        List<InputSplit> splits) throws IOException {
    Configuration conf = job.getConfiguration();

    // all blocks for all the files in input set
    OneFileInfo[] files;

    // mapping from a rack name to the list of blocks it has
    HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>();

    // mapping from a block to the nodes on which it has replicas
    HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>();

    // mapping from a node to the list of blocks that it contains
    HashMap<String, List<OneBlockInfo>> nodeToBlocks = new HashMap<String, List<OneBlockInfo>>();

    files = new OneFileInfo[paths.length];
    if (paths.length == 0) {
        return;
    }

    // populate all the blocks for all files
    long totLength = 0;
    for (int i = 0; i < paths.length; i++) {
        files[i] = new OneFileInfo(paths[i], conf, isSplitable(job, paths[i]), rackToBlocks, blockToNodes,
                nodeToBlocks, rackToNodes, maxSize);
        totLength += files[i].getLength();
    }

    ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
    Set<String> nodes = new HashSet<String>();
    long curSplitSize = 0;

    // process all nodes and create splits that are local
    // to a node.
    for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = nodeToBlocks.entrySet().iterator(); iter
            .hasNext();) {

        Map.Entry<String, List<OneBlockInfo>> one = iter.next();
        nodes.add(one.getKey());
        List<OneBlockInfo> blocksInNode = one.getValue();

        // for each block, copy it into validBlocks. Delete it from
        // blockToNodes so that the same block does not appear in
        // two different splits.
        for (OneBlockInfo oneblock : blocksInNode) {
            if (blockToNodes.containsKey(oneblock)) {
                validBlocks.add(oneblock);
                blockToNodes.remove(oneblock);
                curSplitSize += oneblock.length;

                // if the accumulated split size exceeds the maximum, then
                // create this split.
                if (maxSize != 0 && curSplitSize >= maxSize) {
                    // create an input split and add it to the splits array
                    addCreatedSplit(splits, nodes, validBlocks);
                    curSplitSize = 0;
                    validBlocks.clear();
                }
            }
        }
        // if there were any blocks left over and their combined size is
        // larger than minSplitNode, then combine them into one split.
        // Otherwise add them back to the unprocessed pool. It is likely
        // that they will be combined with other blocks from the
        // same rack later on.
        if (minSizeNode != 0 && curSplitSize >= minSizeNode) {
            // create an input split and add it to the splits array
            addCreatedSplit(splits, nodes, validBlocks);
        } else {
            for (OneBlockInfo oneblock : validBlocks) {
                blockToNodes.put(oneblock, oneblock.hosts);
            }
        }
        validBlocks.clear();
        nodes.clear();
        curSplitSize = 0;
    }

    // if blocks in a rack are below the specified minimum size, then keep them
    // in 'overflow'. After the processing of all racks is complete, these
    // overflow blocks will be combined into splits.
    ArrayList<OneBlockInfo> overflowBlocks = new ArrayList<OneBlockInfo>();
    Set<String> racks = new HashSet<String>();

    // Process all racks over and over again until there is no more work to do.
    while (blockToNodes.size() > 0) {

        // Create one split for this rack before moving over to the next rack.
        // Come back to this rack after creating a single split for each of the
        // remaining racks.
        // Process one rack location at a time, Combine all possible blocks that
        // reside on this rack as one split. (constrained by minimum and maximum
        // split size).

        // iterate over all racks
        for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = rackToBlocks.entrySet().iterator(); iter
                .hasNext();) {

            Map.Entry<String, List<OneBlockInfo>> one = iter.next();
            racks.add(one.getKey());
            List<OneBlockInfo> blocks = one.getValue();

            // for each block, copy it into validBlocks. Delete it from
            // blockToNodes so that the same block does not appear in
            // two different splits.
            boolean createdSplit = false;
            for (OneBlockInfo oneblock : blocks) {
                if (blockToNodes.containsKey(oneblock)) {
                    validBlocks.add(oneblock);
                    blockToNodes.remove(oneblock);
                    curSplitSize += oneblock.length;

                    // if the accumulated split size exceeds the maximum, then
                    // create this split.
                    if (maxSize != 0 && curSplitSize >= maxSize) {
                        // create an input split and add it to the splits array
                        addCreatedSplit(splits, getHosts(racks), validBlocks);
                        createdSplit = true;
                        break;
                    }
                }
            }

            // if we created a split, then just go to the next rack
            if (createdSplit) {
                curSplitSize = 0;
                validBlocks.clear();
                racks.clear();
                continue;
            }

            if (!validBlocks.isEmpty()) {
                if (minSizeRack != 0 && curSplitSize >= minSizeRack) {
                    // if there is a minimum size specified, then create a single split
                    // otherwise, store these blocks into overflow data structure
                    addCreatedSplit(splits, getHosts(racks), validBlocks);
                } else {
                    // There were a few blocks in this rack that
                    // remained to be processed. Keep them in 'overflow' block list.
                    // These will be combined later.
                    overflowBlocks.addAll(validBlocks);
                }
            }
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }
    }

    assert blockToNodes.isEmpty();
    assert curSplitSize == 0;
    assert validBlocks.isEmpty();
    assert racks.isEmpty();

    // Process all overflow blocks
    for (OneBlockInfo oneblock : overflowBlocks) {
        validBlocks.add(oneblock);
        curSplitSize += oneblock.length;

        // This might cause an exiting rack location to be re-added,
        // but it should be ok.
        for (int i = 0; i < oneblock.racks.length; i++) {
            racks.add(oneblock.racks[i]);
        }

        // if the accumulated split size exceeds the maximum, then
        // create this split.
        if (maxSize != 0 && curSplitSize >= maxSize) {
            // create an input split and add it to the splits array
            addCreatedSplit(splits, getHosts(racks), validBlocks);
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }
    }

    // Process any remaining blocks, if any.
    if (!validBlocks.isEmpty()) {
        addCreatedSplit(splits, getHosts(racks), validBlocks);
    }
}

From source file:org.apache.jackrabbit.oak.plugins.index.lucene.IndexCopierTest.java

@Test
public void failureInDelete() throws Exception {
    final Set<String> testFiles = new HashSet<String>();
    Directory baseDir = new CloseSafeDir() {
        @Override//from  w  w w . ja  va  2s .c o m
        public void deleteFile(String name) throws IOException {
            if (testFiles.contains(name)) {
                throw new IOException("Not allowed to delete " + name);
            }
            super.deleteFile(name);
        }
    };

    IndexDefinition defn = new IndexDefinition(root, builder.getNodeState());
    IndexCopier c1 = new RAMIndexCopier(baseDir, sameThreadExecutor(), getWorkDir());

    Directory r1 = new RAMDirectory();

    byte[] t1 = writeFile(r1, "t1");
    byte[] t2 = writeFile(r1, "t2");

    Directory w1 = c1.wrapForRead("/foo", defn, r1);
    readAndAssert(w1, "t1", t1);
    readAndAssert(w1, "t2", t2);

    // t1 and t2 should now be present in local (base dir which back local)
    assertTrue(baseDir.fileExists("t1"));
    assertTrue(baseDir.fileExists("t2"));

    Directory r2 = new CloseSafeDir();
    copy(r1, r2);
    r2.deleteFile("t1");

    Directory w2 = c1.wrapForRead("/foo", defn, r2);

    //Close would trigger removal of file which are not present in remote
    testFiles.add("t1");
    w2.close();

    assertEquals(1, c1.getFailedToDeleteFiles().size());
    IndexCopier.LocalIndexFile testFile = c1.getFailedToDeleteFiles().values().iterator().next();

    assertEquals(1, testFile.getDeleteAttemptCount());
    assertEquals(IOUtils.humanReadableByteCount(t1.length), c1.getGarbageSize());
    assertEquals(1, c1.getGarbageDetails().length);

    Directory w3 = c1.wrapForRead("/foo", defn, r2);
    w3.close();
    assertEquals(2, testFile.getDeleteAttemptCount());

    //Now let the file to be deleted
    testFiles.clear();

    Directory w4 = c1.wrapForRead("/foo", defn, r2);
    w4.close();

    //No pending deletes left
    assertEquals(0, c1.getFailedToDeleteFiles().size());
}

From source file:hydrograph.ui.propertywindow.widgets.customwidgets.operational.TransformDialog.java

private void setIsOperationInputFieldDuplicate() {
    if (!transformMapping.getMappingSheetRows().isEmpty()) {
        Set<FilterProperties> set = null;
        List<MappingSheetRow> mappingSheetRows = transformMapping.getMappingSheetRows();
        for (MappingSheetRow mappingSheetRow : mappingSheetRows) {
            set = new HashSet<FilterProperties>(mappingSheetRow.getInputFields());
            if (set.size() < mappingSheetRow.getInputFields().size()) {
                isOperationInputFieldDuplicate = true;
                break;
            }//from   www  .  ja va 2 s  .  c o m
        }
        if (set != null) {
            set.clear();
        }
    }
}

From source file:tr.edu.gsu.nerwip.recognition.internal.modelless.subee.Subee.java

/**
 * Handles the name of the person described in the processed article. For this matter,
 * we consider the article title and name, as well as the first sentence, which generally
 * starts with the full name of the person.
 * //from w w w.ja  v  a 2  s  . c  o m
 * @param article 
 *       Article to process.
 * @return
 *       List of possible entities based on the analysis of the article title and name.
 * 
 * @throws ClientProtocolException
 *       Problem while accessing Freebase.
 * @throws ParseException
 *       Problem while accessing Freebase.
 * @throws IOException
 *       Problem while accessing Freebase.
 * @throws org.json.simple.parser.ParseException
 *       Problem while accessing Freebase.
 */
private List<AbstractEntity<?>> processMainName(Article article)
        throws ClientProtocolException, ParseException, IOException, org.json.simple.parser.ParseException {
    logger.increaseOffset();
    List<AbstractEntity<?>> result = new ArrayList<AbstractEntity<?>>();
    String rawText = article.getRawText();

    // init candidate strings with article name and title 
    Set<String> candidateStrings = new TreeSet<String>();
    String articleTitle = article.getTitle();
    //debug
    //if(articleTitle.equals("Alfred Lothar Wegener"))
    //   System.out.print("");
    logger.log("Article title: " + articleTitle);
    candidateStrings.add(articleTitle);
    String articleName = article.getName();
    logger.log("Article name: " + articleName);
    articleName = articleName.replace('_', ' ').trim();
    candidateStrings.add(articleName);

    // process the beginning of the first sentence
    // we look for the string before the first parenthesis (usually containing birth info)
    // if there's none, we just ignore this potential information source
    Pattern p = Pattern.compile("^[^\\.]+?\\(");
    Matcher m = p.matcher(rawText);
    if (m.find()) {
        int startPos = m.start();
        if (startPos == 0) {
            int endPos = m.end();
            String persName = rawText.substring(0, endPos - 1);
            persName = persName.trim();
            int wordCount = persName.length() - persName.replaceAll(" ", "").length();
            if (wordCount > 6)
                logger.log(
                        "Not able to extract person name from first sentence (too many words before the parenthesis): \""
                                + rawText.substring(0, 75) + "\"");
            else {
                logger.log("Person name: " + persName);
                candidateStrings.add(persName);
            }
        }
    } else
        logger.log("Not able to extract person name from first sentence (can't find the parenthesis): \""
                + rawText.substring(0, 75) + "\"");

    // possibly remove double quotes (especially for the nicknames)
    List<String> nickFull = new ArrayList<String>();
    Set<String> copy = new TreeSet<String>(candidateStrings);
    candidateStrings.clear();
    for (String candidateString : copy) {
        if (candidateString.contains("\"")) {
            nickFull.add(candidateString);
            candidateString = candidateString.replaceAll("\"", "");
        }
        candidateStrings.add(candidateString);
    }

    // possibly remove an indication in parenthesis at the end (especially for the titles)
    copy = new TreeSet<String>(candidateStrings);
    candidateStrings.clear();
    for (String candidateString : copy) {
        if (candidateString.endsWith(")")) {
            String temp[] = candidateString.split("\\(");
            candidateString = temp[0].trim();
        }
        candidateStrings.add(candidateString);
    }

    // add the lastname alone; only with the preceeding word; only with the 2 preeceding words, etc.
    copy = new TreeSet<String>(candidateStrings);
    for (String candidateString : copy) {
        String split[] = candidateString.split(" ");
        for (int i = split.length - 1; i >= 0; i--) {
            String temp = "";
            for (int j = i; j < split.length; j++)
                temp = temp + split[j] + " ";
            temp = temp.trim();
            candidateStrings.add(temp);
        }
    }

    // add very first and very last names (for more than 2 words)
    copy = new TreeSet<String>(candidateStrings);
    for (String candidateString : copy) {
        String split[] = candidateString.split(" ");
        if (split.length > 2) {
            String temp = split[0] + " " + split[split.length - 1];
            candidateStrings.add(temp);
        }
    }

    // add variants with initials instead of firstnames
    copy = new TreeSet<String>(candidateStrings);
    for (String candidateString : copy) {
        String split[] = candidateString.split(" ");
        if (split.length > 1) {
            String initials1 = "";
            String initials2 = "";
            for (int i = 0; i < split.length - 1; i++) {
                initials1 = initials1 + split[i].substring(0, 1).toUpperCase(Locale.ENGLISH) + ". ";
                initials2 = initials2 + split[i].substring(0, 1).toUpperCase(Locale.ENGLISH) + ".";
            }
            initials1 = initials1 + split[split.length - 1];
            initials2 = initials2 + " " + split[split.length - 1];
            candidateStrings.add(initials1);
            candidateStrings.add(initials2);
        }
    }

    // add the original version of the nicknames
    candidateStrings.addAll(nickFull);

    // look for similar strings in the text
    for (String expr : candidateStrings) {
        String escapedStr = Pattern.quote(expr);
        p = Pattern.compile("\\b" + escapedStr + "\\b");
        m = p.matcher(rawText);
        while (m.find()) {
            int startPos = m.start();
            int endPos = m.end();
            String valueStr = m.group();
            AbstractEntity<?> ent = AbstractEntity.build(EntityType.PERSON, startPos, endPos,
                    RecognizerName.SUBEE, valueStr);
            result.add(ent);
        }
    }

    if (result.isEmpty())
        logger.log("WARNING: title not found at all in the text, which is unusual");

    logger.decreaseOffset();
    return result;
}

From source file:org.apache.hadoop.yarn.client.cli.TestYarnCLI.java

@Test
public void testListClusterNodes() throws Exception {
    List<NodeReport> nodeReports = new ArrayList<NodeReport>();
    nodeReports.addAll(getNodeReports(1, NodeState.NEW));
    nodeReports.addAll(getNodeReports(2, NodeState.RUNNING));
    nodeReports.addAll(getNodeReports(1, NodeState.UNHEALTHY));
    nodeReports.addAll(getNodeReports(1, NodeState.DECOMMISSIONED));
    nodeReports.addAll(getNodeReports(1, NodeState.REBOOTED));
    nodeReports.addAll(getNodeReports(1, NodeState.LOST));

    NodeCLI cli = new NodeCLI();
    cli.setClient(client);/*from w  w w .ja v a 2s .  c o  m*/
    cli.setSysOutPrintStream(sysOut);

    Set<NodeState> nodeStates = new HashSet<NodeState>();
    nodeStates.add(NodeState.NEW);
    NodeState[] states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    int result = cli.run(new String[] { "-list", "-states", "NEW" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    PrintWriter pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t            NEW\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    String nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.RUNNING);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "RUNNING" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:2");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host1:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(2)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    result = cli.run(new String[] { "-list" });
    assertEquals(0, result);
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    result = cli.run(new String[] { "-list", "-showDetails" });
    assertEquals(0, result);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:2");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.println("Detailed Node Information :");
    pw.println("\tConfigured Resources : <memory:0, vCores:0, gpus:0>");
    pw.println("\tAllocated Resources : <memory:0, vCores:0, gpus:0>");
    pw.println("\tResource Utilization by Node : PMem:2048 MB, VMem:4096 MB, VCores:8.0");
    pw.println("\tResource Utilization by Containers : PMem:1024 MB, VMem:2048 MB, VCores:4.0");
    pw.println("\tNode-Labels : ");
    pw.print("         host1:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.println("Detailed Node Information :");
    pw.println("\tConfigured Resources : <memory:0, vCores:0, gpus:0>");
    pw.println("\tAllocated Resources : <memory:0, vCores:0, gpus:0>");
    pw.println("\tResource Utilization by Node : PMem:2048 MB, VMem:4096 MB, VCores:8.0");
    pw.println("\tResource Utilization by Containers : PMem:1024 MB, VMem:2048 MB, VCores:4.0");
    pw.println("\tNode-Labels : ");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.UNHEALTHY);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "UNHEALTHY" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t      UNHEALTHY\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(5)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.DECOMMISSIONED);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "DECOMMISSIONED" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t DECOMMISSIONED\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(6)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.REBOOTED);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "REBOOTED" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t       REBOOTED\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(7)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.LOST);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "LOST" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:1");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t           LOST\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(8)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    nodeStates.clear();
    nodeStates.add(NodeState.NEW);
    nodeStates.add(NodeState.RUNNING);
    nodeStates.add(NodeState.LOST);
    nodeStates.add(NodeState.REBOOTED);
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-states", "NEW,RUNNING,LOST,REBOOTED" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:5");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t            NEW\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host1:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t       REBOOTED\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t           LOST\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(9)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    nodeStates.clear();
    for (NodeState s : NodeState.values()) {
        nodeStates.add(s);
    }
    states = nodeStates.toArray(new NodeState[0]);
    when(client.getNodeReports(states)).thenReturn(getNodeReports(nodeReports, nodeStates));
    result = cli.run(new String[] { "-list", "-All" });
    assertEquals(0, result);
    verify(client).getNodeReports(states);
    baos = new ByteArrayOutputStream();
    pw = new PrintWriter(baos);
    pw.println("Total Nodes:7");
    pw.print("         Node-Id\t     Node-State\tNode-Http-Address\t");
    pw.println("Number-of-Running-Containers");
    pw.print("         host0:0\t            NEW\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host1:0\t        RUNNING\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t      UNHEALTHY\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t DECOMMISSIONED\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t       REBOOTED\t       host1:8888\t");
    pw.println("                           0");
    pw.print("         host0:0\t           LOST\t       host1:8888\t");
    pw.println("                           0");
    pw.close();
    nodesReportStr = baos.toString("UTF-8");
    Assert.assertEquals(nodesReportStr, sysOutStream.toString());
    verify(sysOut, times(10)).write(any(byte[].class), anyInt(), anyInt());

    sysOutStream.reset();
    result = cli.run(new String[] { "-list", "-states", "InvalidState" });
    assertEquals(-1, result);
}

From source file:fr.landel.utils.assertor.AssertorIterableTest.java

/**
 * Test method for {@link AssertorIterable#contains}.
 * //  www .  jav  a 2  s  .  co m
 * @throws IOException
 *             On not contain
 */
@Test
public void testContainsIterable() throws IOException {
    final String el1 = "element1";
    final String el2 = "element2";

    final Set<String> set1 = new HashSet<>();
    final Set<String> set2 = new HashSet<>();
    final Set<String> set3 = new HashSet<>();
    set1.add(el1);
    set2.add(el1);
    set3.add(el2);

    Assertor.that(set1).containsAll(set2).orElseThrow("iterable doesn't contain the list %s*");
    assertFalse(Assertor.that(set1).containsAll(set3).isOK());
    Assertor.that(set1).containsAny(set2).orElseThrow("iterable doesn't contain the list %s*");

    Assertor.that(set1, EnumAnalysisMode.STREAM).containsAll(set2)
            .orElseThrow("iterable doesn't contain the list %s*");
    assertFalse(Assertor.that(set1, EnumAnalysisMode.STREAM).containsAll(set3).isOK());
    Assertor.that(set1, EnumAnalysisMode.STREAM).containsAny(set2)
            .orElseThrow("iterable doesn't contain the list %s*");

    Assertor.that(set1, EnumAnalysisMode.PARALLEL).containsAll(set2)
            .orElseThrow("iterable doesn't contain the list %s*");
    assertFalse(Assertor.that(set1, EnumAnalysisMode.PARALLEL).containsAll(set3).isOK());
    Assertor.that(set1, EnumAnalysisMode.PARALLEL).containsAny(set2)
            .orElseThrow("iterable doesn't contain the list %s*");

    set2.add(el2);
    Assertor.that(set1).containsAny(set2).orElseThrow("iterable doesn't contain the list %s*");

    assertException(() -> {
        Assertor.that(set1).containsAll(set2).orElseThrow("iterable doesn't contain the list %2$s*");
        fail(ERROR);
    }, IllegalArgumentException.class, "iterable doesn't contain the list " + set2.toString());

    assertException(() -> {
        Assertor.that(set1).containsAll(set2).orElseThrow(new IOException(), true);
        fail(ERROR);
    }, IOException.class);

    assertException(() -> {
        Assertor.that(set1).containsAll((Iterable<String>) null).orElseThrow();
        fail(ERROR);
    }, IllegalArgumentException.class, "neither iterables can be null or empty");

    assertException(() -> {
        Assertor.that(set1).containsAny((Iterable<String>) null).orElseThrow();
        fail(ERROR);
    }, IllegalArgumentException.class, "neither iterables can be null or empty");

    set1.clear();

    assertException(() -> {
        Assertor.that(set1).containsAll(set2).orElseThrow();
        fail(ERROR);
    }, IllegalArgumentException.class);

    assertException(() -> {
        Assertor.that(set1).containsAll(set2).orElseThrow();
        fail(ERROR);
    }, IllegalArgumentException.class, "neither iterables can be null or empty");

    assertException(() -> {
        Assertor.that((Iterable<String>) null).contains(el1).orElseThrow();
        fail(ERROR);
    }, IllegalArgumentException.class, "the iterable cannot be null or empty");

    assertException(() -> {
        Assertor.that((Iterable<String>) null).containsAny(set2).orElseThrow();
        fail(ERROR);
    }, IllegalArgumentException.class, "neither iterables can be null or empty");

    assertException(() -> {
        Assertor.that(set1).containsAll((Iterable<String>) null).orElseThrow();
        fail(ERROR);
    }, IllegalArgumentException.class, "neither iterables can be null or empty");

    set1.add(null);
    Assertor.that(set1).contains(null).orElseThrow();
}

From source file:com.ikanow.aleph2.analytics.hadoop.assets.UpdatedCombineFileInputFormat.java

@VisibleForTesting
void createSplits(Map<String, Set<OneBlockInfo>> nodeToBlocks, Map<OneBlockInfo, String[]> blockToNodes,
        Map<String, List<OneBlockInfo>> rackToBlocks, long totLength, long maxSize, long minSizeNode,
        long minSizeRack, List<InputSplit> splits) {
    ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
    long curSplitSize = 0;

    int totalNodes = nodeToBlocks.size();
    long totalLength = totLength;

    Multiset<String> splitsPerNode = HashMultiset.create();
    Set<String> completedNodes = new HashSet<String>();

    while (true) {
        // it is allowed for maxSize to be 0. Disable smoothing load for such cases

        // process all nodes and create splits that are local to a node. Generate
        // one split per node iteration, and walk over nodes multiple times to
        // distribute the splits across nodes. 
        for (Iterator<Map.Entry<String, Set<OneBlockInfo>>> iter = nodeToBlocks.entrySet().iterator(); iter
                .hasNext();) {/*  w w  w  . ja  v a  2s  .c o  m*/
            Map.Entry<String, Set<OneBlockInfo>> one = iter.next();

            String node = one.getKey();

            // Skip the node if it has previously been marked as completed.
            if (completedNodes.contains(node)) {
                continue;
            }

            Set<OneBlockInfo> blocksInCurrentNode = one.getValue();

            // for each block, copy it into validBlocks. Delete it from
            // blockToNodes so that the same block does not appear in
            // two different splits.
            Iterator<OneBlockInfo> oneBlockIter = blocksInCurrentNode.iterator();
            while (oneBlockIter.hasNext()) {
                OneBlockInfo oneblock = oneBlockIter.next();

                // Remove all blocks which may already have been assigned to other
                // splits.
                if (!blockToNodes.containsKey(oneblock)) {
                    oneBlockIter.remove();
                    continue;
                }

                validBlocks.add(oneblock);
                blockToNodes.remove(oneblock);
                curSplitSize += oneblock.length;

                // if the accumulated split size exceeds the maximum, then
                // create this split.
                if (maxSize != 0 && curSplitSize >= maxSize) {
                    // create an input split and add it to the splits array
                    addCreatedSplit(splits, Collections.singleton(node), validBlocks);
                    totalLength -= curSplitSize;
                    curSplitSize = 0;

                    splitsPerNode.add(node);

                    // Remove entries from blocksInNode so that we don't walk these
                    // again.
                    blocksInCurrentNode.removeAll(validBlocks);
                    validBlocks.clear();

                    // Done creating a single split for this node. Move on to the next
                    // node so that splits are distributed across nodes.
                    break;
                }

            }
            if (validBlocks.size() != 0) {
                // This implies that the last few blocks (or all in case maxSize=0)
                // were not part of a split. The node is complete.

                // if there were any blocks left over and their combined size is
                // larger than minSplitNode, then combine them into one split.
                // Otherwise add them back to the unprocessed pool. It is likely
                // that they will be combined with other blocks from the
                // same rack later on.
                // This condition also kicks in when max split size is not set. All
                // blocks on a node will be grouped together into a single split.
                if (minSizeNode != 0 && curSplitSize >= minSizeNode && splitsPerNode.count(node) == 0) {
                    // haven't created any split on this machine. so its ok to add a
                    // smaller one for parallelism. Otherwise group it in the rack for
                    // balanced size create an input split and add it to the splits
                    // array
                    addCreatedSplit(splits, Collections.singleton(node), validBlocks);
                    totalLength -= curSplitSize;
                    splitsPerNode.add(node);
                    // Remove entries from blocksInNode so that we don't walk this again.
                    blocksInCurrentNode.removeAll(validBlocks);
                    // The node is done. This was the last set of blocks for this node.
                } else {
                    // Put the unplaced blocks back into the pool for later rack-allocation.
                    for (OneBlockInfo oneblock : validBlocks) {
                        blockToNodes.put(oneblock, oneblock.hosts);
                    }
                }
                validBlocks.clear();
                curSplitSize = 0;
                completedNodes.add(node);
            } else { // No in-flight blocks.
                if (blocksInCurrentNode.size() == 0) {
                    // Node is done. All blocks were fit into node-local splits.
                    completedNodes.add(node);
                } // else Run through the node again.
            }
        }

        // Check if node-local assignments are complete.
        if (completedNodes.size() == totalNodes || totalLength == 0) {
            // All nodes have been walked over and marked as completed or all blocks
            // have been assigned. The rest should be handled via rackLock assignment.
            LOG.info("DEBUG: Terminated node allocation with : CompletedNodes: " + completedNodes.size()
                    + ", size left: " + totalLength);
            break;
        }
    }

    // if blocks in a rack are below the specified minimum size, then keep them
    // in 'overflow'. After the processing of all racks is complete, these 
    // overflow blocks will be combined into splits.
    ArrayList<OneBlockInfo> overflowBlocks = new ArrayList<OneBlockInfo>();
    Set<String> racks = new HashSet<String>();

    // Process all racks over and over again until there is no more work to do.
    while (blockToNodes.size() > 0) {

        // Create one split for this rack before moving over to the next rack. 
        // Come back to this rack after creating a single split for each of the 
        // remaining racks.
        // Process one rack location at a time, Combine all possible blocks that
        // reside on this rack as one split. (constrained by minimum and maximum
        // split size).

        // iterate over all racks 
        for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = rackToBlocks.entrySet().iterator(); iter
                .hasNext();) {

            Map.Entry<String, List<OneBlockInfo>> one = iter.next();
            racks.add(one.getKey());
            List<OneBlockInfo> blocks = one.getValue();

            // for each block, copy it into validBlocks. Delete it from 
            // blockToNodes so that the same block does not appear in 
            // two different splits.
            boolean createdSplit = false;
            for (OneBlockInfo oneblock : blocks) {
                if (blockToNodes.containsKey(oneblock)) {
                    validBlocks.add(oneblock);
                    blockToNodes.remove(oneblock);
                    curSplitSize += oneblock.length;

                    // if the accumulated split size exceeds the maximum, then 
                    // create this split.
                    if (maxSize != 0 && curSplitSize >= maxSize) {
                        // create an input split and add it to the splits array
                        addCreatedSplit(splits, getHosts(racks), validBlocks);
                        createdSplit = true;
                        break;
                    }
                }
            }

            // if we created a split, then just go to the next rack
            if (createdSplit) {
                curSplitSize = 0;
                validBlocks.clear();
                racks.clear();
                continue;
            }

            if (!validBlocks.isEmpty()) {
                if (minSizeRack != 0 && curSplitSize >= minSizeRack) {
                    // if there is a minimum size specified, then create a single split
                    // otherwise, store these blocks into overflow data structure
                    addCreatedSplit(splits, getHosts(racks), validBlocks);
                } else {
                    // There were a few blocks in this rack that 
                    // remained to be processed. Keep them in 'overflow' block list. 
                    // These will be combined later.
                    overflowBlocks.addAll(validBlocks);
                }
            }
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }
    }

    assert blockToNodes.isEmpty();
    assert curSplitSize == 0;
    assert validBlocks.isEmpty();
    assert racks.isEmpty();

    // Process all overflow blocks
    for (OneBlockInfo oneblock : overflowBlocks) {
        validBlocks.add(oneblock);
        curSplitSize += oneblock.length;

        // This might cause an exiting rack location to be re-added,
        // but it should be ok.
        for (int i = 0; i < oneblock.racks.length; i++) {
            racks.add(oneblock.racks[i]);
        }

        // if the accumulated split size exceeds the maximum, then 
        // create this split.
        if (maxSize != 0 && curSplitSize >= maxSize) {
            // create an input split and add it to the splits array
            addCreatedSplit(splits, getHosts(racks), validBlocks);
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }
    }

    // Process any remaining blocks, if any.
    if (!validBlocks.isEmpty()) {
        addCreatedSplit(splits, getHosts(racks), validBlocks);
    }
}