Example usage for java.util HashMap remove

List of usage examples for java.util HashMap remove

Introduction

In this page you can find the example usage for java.util HashMap remove.

Prototype

public V remove(Object key) 

Source Link

Document

Removes the mapping for the specified key from this map if present.

Usage

From source file:com.comcast.freeflow.core.FreeFlowContainer.java

public LayoutChangeset getViewChanges(HashMap<Object, FreeFlowItem> oldFrames,
        HashMap<Object, FreeFlowItem> newFrames, boolean moveEvenIfSame) {

    // cleanupViews();
    LayoutChangeset change = new LayoutChangeset();

    if (oldFrames == null) {
        markAdapterDirty = false;//  www .j ava 2  s . c om
        for (FreeFlowItem freeflowItem : newFrames.values()) {
            change.addToAdded(freeflowItem);
        }

        return change;
    }

    if (markAdapterDirty) {
        markAdapterDirty = false;
        for (FreeFlowItem freeflowItem : newFrames.values()) {
            change.addToAdded(freeflowItem);
        }

        for (FreeFlowItem freeflowItem : oldFrames.values()) {
            change.addToDeleted(freeflowItem);
        }

        return change;
    }

    Iterator<?> it = newFrames.entrySet().iterator();
    while (it.hasNext()) {
        Map.Entry<?, ?> m = (Map.Entry<?, ?>) it.next();
        FreeFlowItem freeflowItem = (FreeFlowItem) m.getValue();

        if (oldFrames.get(m.getKey()) != null) {

            FreeFlowItem old = oldFrames.remove(m.getKey());
            freeflowItem.view = old.view;

            // if (moveEvenIfSame || !old.compareRect(((FreeFlowItem)
            // m.getValue()).frame)) {

            if (moveEvenIfSame || !old.frame.equals(((FreeFlowItem) m.getValue()).frame)) {

                change.addToMoved(freeflowItem, getActualFrame(freeflowItem));
            }
        } else {
            change.addToAdded(freeflowItem);
        }

    }

    for (FreeFlowItem freeflowItem : oldFrames.values()) {
        change.addToDeleted(freeflowItem);
    }

    frames = newFrames;

    return change;
}

From source file:org.aspectj.org.eclipse.jdt.internal.core.search.matching.MatchLocator.java

public static SearchDocument[] addWorkingCopies(SearchPattern pattern, SearchDocument[] indexMatches,
        org.aspectj.org.eclipse.jdt.core.ICompilationUnit[] copies, SearchParticipant participant) {
    if (copies == null)
        return indexMatches;
    // working copies take precedence over corresponding compilation units
    HashMap workingCopyDocuments = workingCopiesThatCanSeeFocus(copies, pattern, participant);
    if (workingCopyDocuments.size() == 0)
        return indexMatches;
    SearchDocument[] matches = null;// ww w .  j a v  a 2s  . co m
    int length = indexMatches.length;
    for (int i = 0; i < length; i++) {
        SearchDocument searchDocument = indexMatches[i];
        if (searchDocument.getParticipant() == participant) {
            SearchDocument workingCopyDocument = (SearchDocument) workingCopyDocuments
                    .remove(searchDocument.getPath());
            if (workingCopyDocument != null) {
                if (matches == null) {
                    System.arraycopy(indexMatches, 0, matches = new SearchDocument[length], 0, length);
                }
                matches[i] = workingCopyDocument;
            }
        }
    }
    if (matches == null) { // no working copy
        matches = indexMatches;
    }
    int remainingWorkingCopiesSize = workingCopyDocuments.size();
    if (remainingWorkingCopiesSize != 0) {
        System.arraycopy(matches, 0, matches = new SearchDocument[length + remainingWorkingCopiesSize], 0,
                length);
        Iterator iterator = workingCopyDocuments.values().iterator();
        int index = length;
        while (iterator.hasNext()) {
            matches[index++] = (SearchDocument) iterator.next();
        }
    }
    return matches;
}

From source file:pe.gob.mef.gescon.web.ui.ConsultaMB.java

public String advanceSearch() {
    HashMap filter = new HashMap();
    try {//from  w  ww.j  a  v  a  2  s.c  o m
        if (CollectionUtils.isEmpty(this.getListaCategoriaFiltro())) {
            CategoriaService catservice = (CategoriaService) ServiceFinder.findBean("CategoriaService");
            this.setListaCategoriaFiltro(catservice.getCategoriasActived());
        }
        for (Categoria c : this.getListaCategoriaFiltro()) {
            if (c.getNcategoriaid().toString().equals(this.getCategoria())) {
                this.setSelectedCategoriaFiltro(c);
                break;
            }
        }
        this.setSelectedTipoConocimiento(new ArrayList<String>());
        if (StringUtils.isNotBlank(this.getTipoConocimiento())) {
            this.getSelectedTipoConocimiento().add(this.getTipoConocimiento());
        }
        filter.put("fCategoria", this.getCategoriesFilter());
        filter.put("fFromDate", this.getFechaInicio());
        filter.put("fToDate", this.getFechaFin());
        filter.put("fType", this.getTypesFilter());
        if (StringUtils.isNotBlank(this.getSearchText())) {
            HashMap map = Indexador.search(this.getSearchText());
            filter.put("fCodesBL", (String) map.get("codesBL"));
            filter.put("fCodesPR", (String) map.get("codesPR"));
            filter.put("fCodesC", (String) map.get("codesC"));
            filter.put("fText", this.getSearchText().trim());
        } else {
            filter.remove("fCodesBL");
            filter.remove("fCodesPR");
            filter.remove("fCodesC");
            filter.remove("fText");
        }
        filter.put("order", this.getOrdenpor());
        ConsultaService service = (ConsultaService) ServiceFinder.findBean("ConsultaService");
        this.setListaConsulta(service.getQueryFilter(filter));
        if (CollectionUtils.isEmpty(this.getListaCategoriaFiltro())) {
            CategoriaService catservice = (CategoriaService) ServiceFinder.findBean("CategoriaService");
            this.setListaCategoriaFiltro(catservice.getCategoriasActived());
            createTree(this.getListaCategoriaFiltro());
            this.setListaBreadCrumb(new ArrayList<Categoria>());
        }
        if (CollectionUtils.isEmpty(this.getSelectedTipoConocimiento())) {
            TipoConocimientoService tcservice = (TipoConocimientoService) ServiceFinder
                    .findBean("TipoConocimientoService");
            this.setListaTipoConocimientoFiltro(tcservice.getTipoConocimientos());
        }
    } catch (Exception e) {
        e.getMessage();
        e.printStackTrace();
    }
    return "/pages/consulta?faces-redirect=true";
}

From source file:org.hyperic.hq.appdef.server.session.ServerManagerImpl.java

/**
 * Update server types/*from   www  . j  a  v a 2  s .c  o  m*/
 * 
 */
public void updateServerTypes(String plugin, ServerTypeInfo[] infos) throws VetoException, NotFoundException {
    // First, put all of the infos into a Hash
    HashMap<String, ServerTypeInfo> infoMap = new HashMap<String, ServerTypeInfo>();
    for (int i = 0; i < infos.length; i++) {
        String name = infos[i].getName();
        ServerTypeInfo sinfo = infoMap.get(name);

        if (sinfo == null) {
            // first time we've seen this type
            // clone it incase we have to update the platforms
            infoMap.put(name, (ServerTypeInfo) infos[i].clone());
        } else {
            // already seen this type; just update the platforms.
            // this allows server types of the same name to support
            // different families of platforms in the plugins.
            String[] platforms = (String[]) ArrayUtil.merge(sinfo.getValidPlatformTypes(),
                    infos[i].getValidPlatformTypes(), new String[0]);
            sinfo.setValidPlatformTypes(platforms);
        }
    }

    Collection<ServerType> curServers = serverTypeDAO.findByPlugin(plugin);

    AuthzSubject overlord = authzSubjectManager.getOverlordPojo();

    for (ServerType serverType : curServers) {

        String serverName = serverType.getName();
        ServerTypeInfo sinfo = (ServerTypeInfo) infoMap.remove(serverName);

        if (sinfo == null) {
            deleteServerType(serverType, overlord, resourceGroupManager, resourceManager);
        } else {
            String curDesc = serverType.getDescription();
            Collection<PlatformType> curPlats = serverType.getPlatformTypes();
            String newDesc = sinfo.getDescription();
            String[] newPlats = sinfo.getValidPlatformTypes();
            boolean updatePlats;

            log.debug("Updating ServerType: " + serverName);

            if (!newDesc.equals(curDesc)) {
                serverType.setDescription(newDesc);
            }

            // See if we need to update the supported platforms
            updatePlats = newPlats.length != curPlats.size();
            if (updatePlats == false) {
                // Ensure that the lists are the same
                for (PlatformType pLocal : curPlats) {

                    int j;

                    for (j = 0; j < newPlats.length; j++) {
                        if (newPlats[j].equals(pLocal.getName()))
                            break;
                    }
                    if (j == newPlats.length) {
                        updatePlats = true;
                        break;
                    }
                }
            }

            if (updatePlats == true) {
                findAndSetPlatformType(newPlats, serverType);
            }
        }
    }

    // Now create the left-overs
    for (ServerTypeInfo sinfo : infoMap.values()) {
        createServerType(sinfo, plugin);
    }
}

From source file:StorageEngineClient.CombineFileInputFormat.java

private void processsplit(JobConf job, Map.Entry<String, List<OneBlockInfo>> one,
        HashMap<OneBlockInfo, String[]> blockToNodes, long maxSize, long minSizeNode, long minSizeRack,
        List<CombineFileSplit> splits, String type) {
    ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
    ArrayList<String> nodes = new ArrayList<String>();
    long curSplitSize = 0;
    if (type.equals("node"))
        nodes.add(one.getKey());/*from   w  w  w  .ja va 2 s. c om*/

    List<OneBlockInfo> blocks = null;
    if (!type.equals("all")) {
        blocks = one.getValue();
    } else {
        blocks = new ArrayList<OneBlockInfo>();
        blocks.addAll(blockToNodes.keySet());
    }

    OneBlockInfo[] blocksInNodeArr = blocks.toArray(new OneBlockInfo[blocks.size()]);
    if (job.getBoolean("hive.merge.inputfiles.sort", true)) {
        Arrays.sort(blocksInNodeArr, new Comparator<OneBlockInfo>() {
            @Override
            public int compare(OneBlockInfo o1, OneBlockInfo o2) {
                return (int) (o2.length - o1.length);
            }
        });
    }

    if (job.getBoolean("hive.merge.inputfiles.rerange", false)) {

        Random r = new Random(123456);
        OneBlockInfo tmp = null;
        for (int i = 0; i < blocksInNodeArr.length; i++) {
            int idx = r.nextInt(blocksInNodeArr.length);
            tmp = blocksInNodeArr[i];
            blocksInNodeArr[i] = blocksInNodeArr[idx];
            blocksInNodeArr[idx] = tmp;
        }
    }

    int maxFileNumPerSplit = job.getInt("hive.merge.inputfiles.maxFileNumPerSplit", 1000);

    for (int i = 0; i < blocksInNodeArr.length; i++) {
        if (blockToNodes.containsKey(blocksInNodeArr[i])) {
            if (!type.equals("node")) {
                nodes.clear();
            }

            curSplitSize = blocksInNodeArr[i].length;
            validBlocks.clear();
            validBlocks.add(blocksInNodeArr[i]);
            blockToNodes.remove(blocksInNodeArr[i]);
            if (maxSize != 0 && curSplitSize >= maxSize) {
                addCreatedSplit(job, splits, nodes, validBlocks);
            } else {
                int filenum = 1;
                for (int j = i + 1; j < blocksInNodeArr.length; j++) {
                    if (blockToNodes.containsKey(blocksInNodeArr[j])) {
                        long size1 = blocksInNodeArr[j].length;
                        if (maxSize != 0 && curSplitSize + size1 <= maxSize) {
                            curSplitSize += size1;
                            filenum++;
                            validBlocks.add(blocksInNodeArr[j]);
                            blockToNodes.remove(blocksInNodeArr[j]);
                            if (!type.equals("node"))
                                for (int k = 0; k < blocksInNodeArr[j].hosts.length; k++) {
                                    nodes.add(blocksInNodeArr[j].hosts[k]);
                                }
                        }
                        if (filenum >= maxFileNumPerSplit) {
                            break;
                        }
                    }
                }
                if (minSizeNode != 0 && curSplitSize >= minSizeNode) {
                    addCreatedSplit(job, splits, nodes, validBlocks);
                } else {
                    for (OneBlockInfo oneblock : validBlocks) {
                        blockToNodes.put(oneblock, oneblock.hosts);
                    }
                    break;
                }
            }
        }
    }
}

From source file:edu.csupomona.nlp.tool.crawler.Facebook.java

/**
 * Get all the Pages match the searching keyword.
 * @param keyword               Keyword for search
 * @param onlyVerified          Return only verified pages (currently NA)
 * @return                      HashMap of Pages
 * @throws JSONException//  w  ww  .  ja v a  2  s  .  co m
 */
public HashMap<String, Page> getPages(String keyword, boolean onlyVerified) throws JSONException {
    HashMap<String, Page> fullPages = new HashMap<>();
    int totalLikes = 0;
    try {
        // search pages according to keyword
        ResponseList<Page> pages = fb_.searchPages(keyword);
        System.out.println(pages.size());
        int idx = 0;
        for (Page page : pages) {
            if (onlyVerified) {
                // TOTALLY GAVE UP DUE TO UNKNOWN REASON OF UNABLE TO 
                // ACCESS FQL WITH APP ACCESS TOKEN OR USER ACCESS TOKEN
                // is_verified field is only accessable through FQL
                //                    String query = "select is_verified from page where page_id=" 
                //                            + page.getId();
                //                    JSONObject json = fb_.executeFQL(query).getJSONObject(0);
                //                    boolean isVerified = json.getBoolean("is_verified");
                //                
                //                    // reduce speed
                //                    pause(1);
                //                    
                //                    if (!isVerified)
                //                        continue;
            }

            // retrieve full information of the page 
            Page fullPage = fb_.getPage(page.getId());

            fullPages.put(fullPage.getId(), fullPage);

            // records number of likes
            totalLikes += fullPage.getLikes();

            // to reduce speed
            //                pause(1);

            System.out.println(idx++);
        }
    } catch (FacebookException ex) {
        Logger.getLogger(Facebook.class.getName()).log(Level.SEVERE, null, ex);
    }

    // post processing. only keep pages with number of likes above average 
    int average = totalLikes / fullPages.size();
    System.out.println("Average=" + average);
    List<String> removePageIds = new ArrayList<>();
    for (String pageId : fullPages.keySet())
        if (fullPages.get(pageId).getLikes() < average) {
            System.out.println("RM: " + fullPages.get(pageId).getName() + " [L="
                    + fullPages.get(pageId).getLikes().toString() + "]");
            removePageIds.add(pageId);
        }

    for (String pageId : removePageIds)
        fullPages.remove(pageId);

    return fullPages;
}

From source file:org.broadinstitute.gatk.utils.variant.GATKVariantContextUtils.java

public static Map<VariantContext.Type, List<VariantContext>> separateVariantContextsByType(
        final Collection<VariantContext> VCs) {
    if (VCs == null) {
        throw new IllegalArgumentException("VCs cannot be null.");
    }/*from w  w  w  .j  a va 2  s . c  o m*/

    final HashMap<VariantContext.Type, List<VariantContext>> mappedVCs = new HashMap<>();
    for (final VariantContext vc : VCs) {
        VariantContext.Type vcType = vc.getType();

        // look at previous variant contexts of different type. If:
        // a) otherVC has alleles which are subset of vc, remove otherVC from its list and add otherVC to vc's list
        // b) vc has alleles which are subset of otherVC. Then, add vc to otherVC's type list (rather, do nothing since vc will be added automatically to its list)
        // c) neither: do nothing, just add vc to its own list
        boolean addtoOwnList = true;
        for (final VariantContext.Type type : VariantContext.Type.values()) {
            if (type.equals(vcType))
                continue;

            if (!mappedVCs.containsKey(type))
                continue;

            List<VariantContext> vcList = mappedVCs.get(type);
            for (int k = 0; k < vcList.size(); k++) {
                VariantContext otherVC = vcList.get(k);
                if (allelesAreSubset(otherVC, vc)) {
                    // otherVC has a type different than vc and its alleles are a subset of vc: remove otherVC from its list and add it to vc's type list
                    vcList.remove(k);
                    // avoid having empty lists
                    if (vcList.isEmpty())
                        mappedVCs.remove(type);
                    if (!mappedVCs.containsKey(vcType))
                        mappedVCs.put(vcType, new ArrayList<VariantContext>());
                    mappedVCs.get(vcType).add(otherVC);
                    break;
                } else if (allelesAreSubset(vc, otherVC)) {
                    // vc has a type different than otherVC and its alleles are a subset of VC: add vc to otherVC's type list and don't add to its own
                    mappedVCs.get(type).add(vc);
                    addtoOwnList = false;
                    break;
                }
            }
        }
        if (addtoOwnList) {
            if (!mappedVCs.containsKey(vcType))
                mappedVCs.put(vcType, new ArrayList<VariantContext>());
            mappedVCs.get(vcType).add(vc);
        }
    }

    return mappedVCs;
}

From source file:org.sakaiproject.tool.assessment.ui.bean.author.ItemAuthorBean.java

private List prepareItemAttachment(ItemDataIfc item, boolean isEditPendingAssessmentFlow) {
    ToolSession session = SessionManager.getCurrentToolSession();
    if (session.getAttribute(FilePickerHelper.FILE_PICKER_ATTACHMENTS) != null) {

        Set attachmentSet = new HashSet();
        if (item != null) {
            attachmentSet = item.getItemAttachmentSet();
        }/*www . jav a  2s  .  c om*/
        HashMap map = getResourceIdHash(attachmentSet);
        ArrayList newAttachmentList = new ArrayList();

        AssessmentService assessmentService = new AssessmentService();
        String protocol = ContextUtil.getProtocol();

        List refs = (List) session.getAttribute(FilePickerHelper.FILE_PICKER_ATTACHMENTS);
        if (refs != null && refs.size() > 0) {
            Reference ref;

            for (int i = 0; i < refs.size(); i++) {
                ref = (Reference) refs.get(i);
                String resourceId = ref.getId();
                if (map.get(resourceId) == null) {
                    // new attachment, add 
                    log.debug("**** ref.Id=" + ref.getId());
                    log.debug("**** ref.name="
                            + ref.getProperties().getProperty(ref.getProperties().getNamePropDisplayName()));
                    ItemAttachmentIfc newAttach = assessmentService.createItemAttachment(item, ref.getId(),
                            ref.getProperties().getProperty(ref.getProperties().getNamePropDisplayName()),
                            protocol, isEditPendingAssessmentFlow);
                    newAttachmentList.add(newAttach);
                } else {
                    // attachment already exist, let's add it to new list and
                    // check it off from map
                    newAttachmentList.add((ItemAttachmentIfc) map.get(resourceId));
                    map.remove(resourceId);
                }
            }
        }

        session.removeAttribute(FilePickerHelper.FILE_PICKER_ATTACHMENTS);
        session.removeAttribute(FilePickerHelper.FILE_PICKER_CANCEL);
        return newAttachmentList;
    } else if (item == null) {
        return new ArrayList();
    } else
        return item.getItemAttachmentList();
}

From source file:StorageEngineClient.CombineFileInputFormat.java

private void getMoreSplits(JobConf job, Path[] paths1, long maxSize, long minSizeNode, long minSizeRack,
        List<CombineFileSplit> splits) throws IOException, NullGzFileException {
    if (paths1.length == 0) {
        return;/*from ww  w.  j av a 2  s  .  c o  m*/
    }

    Path[] paths = paths1;
    ArrayList<Path> splitable = new ArrayList<Path>();
    ArrayList<Path> unsplitable = new ArrayList<Path>();
    for (int i = 0; i < paths1.length; i++) {
        if (isSplitable(paths1[i].getFileSystem(job), paths1[i])) {
            splitable.add(paths1[i]);
        } else {
            unsplitable.add(paths1[i]);
        }
    }
    if (unsplitable.size() != 0) {
        paths = new Path[splitable.size()];
        splitable.toArray(paths);
    }

    OneFileInfo[] files;

    HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>();

    HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>();

    HashMap<String, List<OneBlockInfo>> nodeToBlocks = new HashMap<String, List<OneBlockInfo>>();

    files = new OneFileInfo[paths.length];

    long totLength = 0;
    for (int i = 0; i < paths.length; i++) {
        files[i] = new OneFileInfo(paths[i], job, rackToBlocks, blockToNodes, nodeToBlocks);
        totLength += files[i].getLength();
    }

    for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = nodeToBlocks.entrySet().iterator(); iter
            .hasNext();) {

        Map.Entry<String, List<OneBlockInfo>> onenode = iter.next();
        this.processsplit(job, onenode, blockToNodes, maxSize, minSizeNode, minSizeRack, splits, "node");
    }

    for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = rackToBlocks.entrySet().iterator(); iter
            .hasNext();) {

        Map.Entry<String, List<OneBlockInfo>> onerack = iter.next();
        this.processsplit(job, onerack, blockToNodes, maxSize, minSizeNode, minSizeRack, splits, "rack");
    }

    this.processsplit(job, null, blockToNodes, maxSize, minSizeNode, minSizeRack, splits, "all");

    int maxFileNumPerSplit = job.getInt("hive.merge.inputfiles.maxFileNumPerSplit", 1000);

    HashSet<OneBlockInfo> hs = new HashSet<OneBlockInfo>();
    while (blockToNodes.size() > 0) {
        ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
        List<String> nodes = new ArrayList<String>();
        int filenum = 0;
        hs.clear();
        for (OneBlockInfo blockInfo : blockToNodes.keySet()) {
            validBlocks.add(blockInfo);
            filenum++;
            for (String host : blockInfo.hosts) {
                nodes.add(host);
            }
            hs.add(blockInfo);
            if (filenum >= maxFileNumPerSplit) {
                break;
            }
        }
        for (OneBlockInfo blockInfo : hs) {
            blockToNodes.remove(blockInfo);
        }
        this.addCreatedSplit(job, splits, nodes, validBlocks);
    }

    if (unsplitable.size() != 0) {

        HashMap<OneBlockInfo, String[]> fileToNodes = new HashMap<OneBlockInfo, String[]>();

        for (Path path : unsplitable) {
            FileSystem fs = path.getFileSystem(job);
            FileStatus stat = fs.getFileStatus(path);
            long len = fs.getFileStatus(path).getLen();
            BlockLocation[] locations = path.getFileSystem(job).getFileBlockLocations(stat, 0, len);
            if (locations.length == 0) {
                console.printError("The file " + path.toUri().toString() + " maybe is empty, please check it!");
                throw new NullGzFileException(
                        "The file " + path.toUri().toString() + " maybe is empty, please check it!");
            }

            LOG.info("unsplitable file:" + path.toUri().toString() + " length:" + len);

            OneBlockInfo oneblock = new OneBlockInfo(path, 0, len, locations[0].getHosts(),
                    locations[0].getTopologyPaths());
            fileToNodes.put(oneblock, locations[0].getHosts());
        }

        this.processsplitForUnsplit(job, null, fileToNodes, maxSize, minSizeNode, minSizeRack, splits, "all");
    }
}

From source file:org.apache.hadoop.mapred.lib.CombineFileInputFormat.java

/**
 * Return all the splits in the specified set of paths
 *///from   w ww  .j ava 2 s .c o m
private void getMoreSplits(JobConf job, Collection<LocatedFileStatus> stats, long maxSize, long minSizeNode,
        long minSizeRack, long maxNumBlocksPerSplit, List<CombineFileSplit> splits) throws IOException {

    // all blocks for all the files in input set
    OneFileInfo[] files;

    // mapping from a rack name to the list of blocks it has
    HashMap<String, List<OneBlockInfo>> rackToBlocks = new HashMap<String, List<OneBlockInfo>>();

    // mapping from a block to the nodes on which it has replicas
    HashMap<OneBlockInfo, String[]> blockToNodes = new HashMap<OneBlockInfo, String[]>();

    // mapping from a node to the list of blocks that it contains
    HashMap<String, List<OneBlockInfo>> nodeToBlocks = new HashMap<String, List<OneBlockInfo>>();

    if (stats.isEmpty()) {
        return;
    }
    files = new OneFileInfo[stats.size()];

    // populate all the blocks for all files
    long totLength = 0;
    int fileIndex = 0;
    for (LocatedFileStatus oneStatus : stats) {
        files[fileIndex] = new OneFileInfo(oneStatus, job,
                isSplitable(FileSystem.get(job), oneStatus.getPath()), rackToBlocks, blockToNodes, nodeToBlocks,
                rackToNodes, maxSize);
        totLength += files[fileIndex].getLength();
        fileIndex++;
    }

    // Sort the blocks on each node from biggest to smallest by size to
    // encourage more node-local single block splits
    sortBlocksBySize(nodeToBlocks);

    ArrayList<OneBlockInfo> validBlocks = new ArrayList<OneBlockInfo>();
    Set<String> nodes = new HashSet<String>();
    long curSplitSize = 0;

    // process all nodes and create splits that are local
    // to a node.
    for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = nodeToBlocks.entrySet().iterator(); iter
            .hasNext();) {

        Map.Entry<String, List<OneBlockInfo>> one = iter.next();
        nodes.add(one.getKey());
        List<OneBlockInfo> blocksInNode = one.getValue();

        // for each block, copy it into validBlocks. Delete it from
        // blockToNodes so that the same block does not appear in
        // two different splits.
        for (OneBlockInfo oneblock : blocksInNode) {
            if (blockToNodes.containsKey(oneblock)) {
                validBlocks.add(oneblock);
                blockToNodes.remove(oneblock);
                curSplitSize += oneblock.length;

                // if the accumulated split size exceeds the maximum, then
                // create this split.
                if ((maxSize != 0 && curSplitSize >= maxSize)
                        || (maxNumBlocksPerSplit > 0 && validBlocks.size() >= maxNumBlocksPerSplit)) {
                    // create an input split and add it to the splits array
                    // if only one block, add all the node replicas
                    if (validBlocks.size() == 1) {
                        Set<String> blockLocalNodes = new HashSet<String>(
                                Arrays.asList(validBlocks.get(0).hosts));
                        addCreatedSplit(job, splits, blockLocalNodes, validBlocks);
                        addStatsForSplitType(SplitType.SINGLE_BLOCK_LOCAL, curSplitSize, blockLocalNodes.size(),
                                validBlocks.size());
                    } else {
                        addCreatedSplit(job, splits, nodes, validBlocks);
                        addStatsForSplitType(SplitType.NODE_LOCAL, curSplitSize, nodes.size(),
                                validBlocks.size());
                    }
                    curSplitSize = 0;
                    validBlocks.clear();
                }
            }
        }
        // if there were any blocks left over and their combined size is
        // larger than minSplitNode, then combine them into one split.
        // Otherwise add them back to the unprocessed pool. It is likely
        // that they will be combined with other blocks from the same rack later on.
        if (minSizeNode != 0 && curSplitSize >= minSizeNode) {
            // create an input split and add it to the splits array
            addCreatedSplit(job, splits, nodes, validBlocks);
            addStatsForSplitType(SplitType.NODE_LOCAL_LEFTOVER, curSplitSize, nodes.size(), validBlocks.size());
        } else {
            for (OneBlockInfo oneblock : validBlocks) {
                blockToNodes.put(oneblock, oneblock.hosts);
            }
        }
        validBlocks.clear();
        nodes.clear();
        curSplitSize = 0;
    }

    // if blocks in a rack are below the specified minimum size, then keep them
    // in 'overflow'. After the processing of all racks is complete, these overflow
    // blocks will be combined into splits.
    ArrayList<OneBlockInfo> overflowBlocks = new ArrayList<OneBlockInfo>();
    Set<String> racks = new HashSet<String>();

    // Process all racks over and over again until there is no more work to do.
    boolean noRacksMadeSplit = false;
    while (blockToNodes.size() > 0) {

        // Create one split for this rack before moving over to the next rack.
        // Come back to this rack after creating a single split for each of the
        // remaining racks.
        // Process one rack location at a time, Combine all possible blocks that
        // reside on this rack as one split. (constrained by minimum and maximum
        // split size).

        // Iterate over all racks.  Add to the overflow blocks only if at least
        // one pass over all the racks was completed without adding any splits
        long splitsAddedOnAllRacks = 0;
        for (Iterator<Map.Entry<String, List<OneBlockInfo>>> iter = rackToBlocks.entrySet().iterator(); iter
                .hasNext();) {

            Map.Entry<String, List<OneBlockInfo>> one = iter.next();
            racks.add(one.getKey());
            List<OneBlockInfo> blocks = one.getValue();

            // for each block, copy it into validBlocks. Delete it from
            // blockToNodes so that the same block does not appear in
            // two different splits.
            boolean createdSplit = false;
            for (OneBlockInfo oneblock : blocks) {
                if (blockToNodes.containsKey(oneblock)) {
                    validBlocks.add(oneblock);
                    blockToNodes.remove(oneblock);
                    curSplitSize += oneblock.length;

                    // if the accumulated split size exceeds the maximum, then
                    // create this split.
                    if ((maxSize != 0 && curSplitSize >= maxSize)
                            || (maxNumBlocksPerSplit > 0 && validBlocks.size() >= maxNumBlocksPerSplit)) {
                        // create an input split and add it to the splits array
                        addCreatedSplit(job, splits, getHosts(racks), validBlocks);
                        addStatsForSplitType(SplitType.RACK_LOCAL, curSplitSize, getHosts(racks).size(),
                                validBlocks.size());
                        createdSplit = true;
                        ++splitsAddedOnAllRacks;
                        break;
                    }
                }
            }

            // if we created a split, then just go to the next rack
            if (createdSplit) {
                curSplitSize = 0;
                validBlocks.clear();
                racks.clear();
                continue;
            }

            if (!validBlocks.isEmpty()) {
                if (minSizeRack != 0 && curSplitSize >= minSizeRack) {
                    // if there is a mimimum size specified, then create a single split
                    // otherwise, store these blocks into overflow data structure
                    addCreatedSplit(job, splits, getHosts(racks), validBlocks);
                    addStatsForSplitType(SplitType.RACK_LOCAL_LEFTOVER, curSplitSize, getHosts(racks).size(),
                            validBlocks.size());
                    ++splitsAddedOnAllRacks;
                } else if (!noRacksMadeSplit) {
                    // Add the blocks back if a pass on all rack found at least one
                    // split or this is the first pass
                    for (OneBlockInfo oneblock : validBlocks) {
                        blockToNodes.put(oneblock, oneblock.hosts);
                    }
                } else {
                    // There were a few blocks in this rack that remained to be processed.
                    // Keep them in 'overflow' block list. These will be combined later.
                    overflowBlocks.addAll(validBlocks);
                }
            }
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }

        if (splitsAddedOnAllRacks == 0) {
            noRacksMadeSplit = true;
        }
    }

    assert blockToNodes.isEmpty();
    assert curSplitSize == 0;
    assert validBlocks.isEmpty();
    assert racks.isEmpty();

    // Process all overflow blocks
    for (OneBlockInfo oneblock : overflowBlocks) {
        validBlocks.add(oneblock);
        curSplitSize += oneblock.length;

        // This might cause an exiting rack location to be re-added,
        // but it should be OK because racks is a Set.
        for (int i = 0; i < oneblock.racks.length; i++) {
            racks.add(oneblock.racks[i]);
        }

        // if the accumulated split size exceeds the maximum, then
        // create this split.
        if ((maxSize != 0 && curSplitSize >= maxSize)
                || (maxNumBlocksPerSplit > 0 && validBlocks.size() >= maxNumBlocksPerSplit)) {
            // create an input split and add it to the splits array
            addCreatedSplit(job, splits, getHosts(racks), validBlocks);
            addStatsForSplitType(SplitType.OVERFLOW, curSplitSize, getHosts(racks).size(), validBlocks.size());
            curSplitSize = 0;
            validBlocks.clear();
            racks.clear();
        }
    }

    // Process any remaining blocks, if any.
    if (!validBlocks.isEmpty()) {
        addCreatedSplit(job, splits, getHosts(racks), validBlocks);
        addStatsForSplitType(SplitType.OVERFLOW_LEFTOVER, curSplitSize, getHosts(racks).size(),
                validBlocks.size());
    }
}