Example usage for java.util Map.Entry get

List of usage examples for java.util Map.Entry get

Introduction

In this page you can find the example usage for java.util Map.Entry get.

Prototype

V get(Object key);

Source Link

Document

Returns the value to which the specified key is mapped, or null if this map contains no mapping for the key.

Usage

From source file:org.voltdb.regressionsuites.TestInsertIntoSelectSuite.java

public void testInsertIntoSelectGeneratedProcs() throws Exception {
    Set<Map.Entry<String, List<String>>> allEntries = mapOfAllGeneratedStatements().entrySet();
    System.out.println("\n\nRUNNING testInsertIntoSelectGeneratedProcs with " + allEntries.size()
            + " stored procedures\n\n");

    final Client client = getClient();
    initializeTables(client);/*from w w w  .j a  v a2s  .  c  om*/

    for (long partitioningValue = 4; partitioningValue < 11; partitioningValue++) {
        for (Map.Entry<String, List<String>> e : allEntries) {
            clearTargetTables(client);

            // The strategy here is:
            //   Insert rows via stored procedure that invokes INSERT INTO ... <some_query>.
            //   Select the inserted rows back, compare with the table produced by <some_query>,
            //     verify the tables are equal.
            //   Do the same verification with ad hoc SQL.

            String proc = e.getKey();
            boolean needsParams = (numberOfParametersNeeded(proc) > 0);

            String prefix = "Assertion failed running stored procedure " + proc + ": ";

            // insert rows with stored procedure
            ClientResponse resp;
            if (needsParams) {
                resp = client.callProcedure(proc, partitioningValue);
            } else {
                resp = client.callProcedure(proc);
            }
            assertEquals(prefix + "procedure call failed", ClientResponse.SUCCESS, resp.getStatus());
            VoltTable insertResult = resp.getResults()[0];
            insertResult.advanceRow();

            // make sure we actually inserted something
            long numRowsInserted = insertResult.getLong(0);

            // fetch the rows we just inserted
            if (proc.contains("target_p")) {
                resp = client.callProcedure("get_all_target_p_rows");
            } else {
                resp = client.callProcedure("get_all_target_r_rows");
            }
            assertEquals(prefix + "could not fetch rows of target table", ClientResponse.SUCCESS,
                    resp.getStatus());
            VoltTable actualRows = resp.getResults()[0];

            if (needsParams) {
                resp = client.callProcedure("verify_" + proc, partitioningValue);
            } else {
                resp = client.callProcedure("verify_" + proc);
            }
            // Fetch the rows we expect to have inserted
            assertEquals(prefix + "could not verify rows of target table", ClientResponse.SUCCESS,
                    resp.getStatus());
            VoltTable expectedRows = resp.getResults()[0];

            assertTablesAreEqual(prefix, expectedRows, actualRows);
            int actualNumRows = actualRows.getRowCount();
            assertEquals(prefix + "insert statement returned " + numRowsInserted + " but only " + actualNumRows
                    + " rows selected from target table", actualNumRows, numRowsInserted);

            // Now try the corresponding ad hoc statement
            String adHocQuery = e.getValue().get(0);
            prefix = "Assertion failed running ad hoc SQL: " + adHocQuery;
            clearTargetTables(client);

            // insert rows with stored procedure
            if (needsParams) {
                resp = client.callProcedure("@AdHoc", adHocQuery, partitioningValue);
            } else {
                resp = client.callProcedure("@AdHoc", adHocQuery);
            }
            assertEquals(prefix + "ad hoc statement failed", ClientResponse.SUCCESS, resp.getStatus());
            insertResult = resp.getResults()[0];
            insertResult.advanceRow();

            numRowsInserted = insertResult.getLong(0);

            // fetch the rows we just inserted
            if (proc.contains("target_p")) {
                resp = client.callProcedure("get_all_target_p_rows");
            } else {
                resp = client.callProcedure("get_all_target_r_rows");
            }
            assertEquals(prefix + "could not fetch rows of target table", ClientResponse.SUCCESS,
                    resp.getStatus());
            actualRows = resp.getResults()[0];

            expectedRows.resetRowPosition();
            assertTablesAreEqual(prefix, expectedRows, actualRows);
            actualNumRows = actualRows.getRowCount();
            assertEquals(prefix + "insert statement returned " + numRowsInserted + " but only " + actualNumRows
                    + " rows selected from target table", actualNumRows, numRowsInserted);
        }
    }
}

From source file:edu.ku.brc.specify.tasks.subpane.qb.QueryBldrPane.java

/**
 * @return/*from w  ww .  j a v a  2 s .c  o m*/
 */
public static Vector<QueryFieldPanel> getQueryFieldPanelsForMapping(
        final QueryFieldPanelContainerIFace container, Set<SpQueryField> fields, final TableTree tblTree,
        final Hashtable<String, TableTree> ttHash, final Component saveBtn, SpExportSchemaMapping schemaMapping,
        List<String> missingFlds, Map<String, Vector<MappedFieldInfo>> autoMaps) {
    Vector<QueryFieldPanel> result = new Vector<QueryFieldPanel>();
    //Need to change columnDefStr if mapMode...
    //result.add(bldQueryFieldPanel(this, null, null, getColumnDefStr(), saveBtn));
    result.add(new QueryFieldPanel(container, null, container.getColumnDefStr(), saveBtn, null, schemaMapping,
            null));

    Vector<SpExportSchemaItem> sis = new Vector<SpExportSchemaItem>();
    if (schemaMapping.getSpExportSchema() != null) {
        sis.addAll(schemaMapping.getSpExportSchema().getSpExportSchemaItems());
    }
    Collections.sort(sis, new Comparator<SpExportSchemaItem>() {

        /* (non-Javadoc)
         * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object)
         */
        @Override
        public int compare(SpExportSchemaItem o1, SpExportSchemaItem o2) {
            return o1.getFieldName().compareTo(o2.getFieldName());
        }

    });
    for (SpExportSchemaItem schemaItem : sis) {
        //System.out.println("SchemaItem: " + schemaItem.getFieldName());
        boolean autoMapped = false;
        SpQueryField fld = getQueryFieldMapping(schemaMapping, schemaItem);
        FieldQRI fieldQRI = null;
        if (fld == null && autoMaps != null) {
            Vector<MappedFieldInfo> mappedTos = autoMaps.get(schemaItem.getFieldName().toLowerCase());

            if (mappedTos != null) {
                for (MappedFieldInfo mappedTo : mappedTos) {
                    fieldQRI = getFieldQRI(tblTree, mappedTo.getFieldName(), mappedTo.isRel(),
                            mappedTo.getStringId(), getTableIds(mappedTo.getTableIds()), 0, ttHash);
                    if (fieldQRI != null) {
                        if (!fieldQRI.isFieldHidden()) {
                            autoMapped = true;
                        } else {
                            fieldQRI = null;
                        }
                        break;
                    }
                }
            }
            //result.add(new QueryFieldPanel(container, null, 
            //   container.getColumnDefStr(), saveBtn, fld, schemaMapping, schemaItem));
        } else if (fld != null) {
            fieldQRI = getFieldQRI(tblTree, fld.getFieldName(), fld.getIsRelFld() != null && fld.getIsRelFld(),
                    fld.getStringId(), getTableIds(fld.getTableList()), 0, ttHash);
        }
        if (fieldQRI != null) {
            QueryFieldPanel newPanel = new QueryFieldPanel(container, fieldQRI, container.getColumnDefStr(),
                    saveBtn, fld, schemaMapping, schemaItem);
            newPanel.setAutoMapped(autoMapped);
            result.add(newPanel);
            fieldQRI.setIsInUse(true);
            if (fieldQRI.isFieldHidden() && !container.isPromptMode() && !container.isForSchemaExport()) {
                UIRegistry.showLocalizedMsg("QB_FIELD_HIDDEN_TITLE", "QB_FIELD_HIDDEN_SHOULD_REMOVE",
                        fieldQRI.getTitle());
            }
        } else if (fld != null) {
            log.error("Couldn't find [" + fld.getFieldName() + "] [" + fld.getTableList() + "]");
            if (!container.isForSchemaExport() && !container.isPromptMode()) {
                for (SpQueryField field : fields) {
                    //ain't superstitious but checking ids in case 
                    //fld and field are different java objects
                    if (field.getId().equals(fld.getId())) {
                        SpExportSchemaItemMapping mappingForField = null;
                        for (SpExportSchemaItemMapping m : schemaMapping.getMappings()) {
                            if (m.getQueryField() != null && field.getId().equals(m.getQueryField().getId())) {
                                mappingForField = m;
                                break;
                            }
                        }
                        if (mappingForField != null) {
                            schemaMapping.getMappings().remove(mappingForField);
                            mappingForField.setExportSchemaItem(null);
                            mappingForField.setExportSchemaMapping(null);
                            mappingForField.setQueryField(null);
                        }
                        fields.remove(field);
                        field.setQuery(null);
                        fld.setQuery(null);
                        break;
                    }
                }
            }
            if (missingFlds != null) {
                String fldText = fld.getColumnAlias() != null ? fld.getColumnAlias() : fld.getFieldName();
                missingFlds.add(fldText);
            }
        }
    }

    List<SpQueryField> toRemove = new ArrayList<SpQueryField>();

    //add 'auto-mapped' fields not mapped to a concept
    if (autoMaps != null && fields.size() == 0 /* a new mapping */) {
        int cnt = 0;
        for (Map.Entry<String, Vector<MappedFieldInfo>> me : autoMaps.entrySet()) {
            if (me.getKey().startsWith("Unmapped:")) {
                MappedFieldInfo fi = me.getValue().get(0);
                SpQueryField fld = new SpQueryField();
                fld.initialize();

                fld.setIsNot(false);
                fld.setAlwaysFilter(false);
                fld.setIsPrompt(true);
                fld.setIsRelFld(false);
                fld.setSortType(Byte.valueOf("0"));
                fld.setPosition(Short.valueOf(String.valueOf(result.size() - 1 + cnt++)));

                fld.setSpQueryFieldId(-1);
                fld.setIsDisplay(false);
                fld.setOperStart(fi.getOperator());
                fld.setFieldName(fi.getFieldName());
                fld.setStringId(fi.getStringId());
                fld.setTableList(fi.getTableIds());
                fld.setContextTableIdent(fi.getContextTableId());

                fields.add(fld);
                toRemove.add(fld);
            }
        }

    }

    //now add un-mapped fields
    for (SpQueryField fld : fields) {
        //int insertAt = 0;
        if (fld.getMapping() == null || fld.getMapping().getExportSchemaItem() == null) {
            FieldQRI fieldQRI = getFieldQRI(tblTree, fld.getFieldName(),
                    fld.getIsRelFld() != null && fld.getIsRelFld(), fld.getStringId(),
                    getTableIds(fld.getTableList()), 0, ttHash);
            if (fieldQRI != null) {
                //                 result.insertElementAt(new QueryFieldPanel(container, fieldQRI, 
                //                        container.getColumnDefStr(), saveBtn, fld, null, true), insertAt++);
                QueryFieldPanel newQfp = new QueryFieldPanel(container, fieldQRI, container.getColumnDefStr(),
                        saveBtn, fld, schemaMapping, null);
                result.add(newQfp);
                fieldQRI.setIsInUse(true);
                if (fld.getSpQueryFieldId() == -1) {
                    newQfp.setAutoMapped(true);
                    newQfp.setQueryFieldForAutomapping(null);
                }
                if (fieldQRI.isFieldHidden() && !container.isPromptMode() && !container.isForSchemaExport()) {
                    UIRegistry.showLocalizedMsg("QB_FIELD_HIDDEN_TITLE", "QB_FIELD_HIDDEN_SHOULD_REMOVE",
                            fieldQRI.getTitle());
                }
            } else {
                log.error("Couldn't find [" + fld.getFieldName() + "] [" + fld.getTableList() + "]");
                if (!container.isForSchemaExport() && !container.isPromptMode()) {
                    for (SpQueryField field : fields) {
                        //ain't superstitious but checking ids in case 
                        //fld and field are different java objects
                        if (field.getId().equals(fld.getId())) {
                            SpExportSchemaItemMapping mappingForField = null;
                            for (SpExportSchemaItemMapping m : schemaMapping.getMappings()) {
                                if (m.getQueryField() != null
                                        && field.getId().equals(m.getQueryField().getId())) {
                                    mappingForField = m;
                                    break;
                                }
                            }
                            if (mappingForField != null) {
                                schemaMapping.getMappings().remove(mappingForField);
                                mappingForField.setExportSchemaItem(null);
                                mappingForField.setExportSchemaMapping(null);
                                mappingForField.setQueryField(null);
                            }
                            toRemove.add(field);
                            field.setQuery(null);
                            fld.setQuery(null);
                            break;
                        }
                    }
                }
                if (missingFlds != null) {
                    missingFlds.add(fld.getColumnAlias());
                }
            }
        }
    }
    for (SpQueryField f : toRemove) {
        fields.remove(f);
    }

    // now add placeHolder panel for adding new condition
    //result.add(new QueryFieldPanel(container, null, 
    //      container.getColumnDefStr(), saveBtn, null, null, null, true));

    //now sort on queryfield position
    Collections.sort(result, new Comparator<QueryFieldPanel>() {

        /* (non-Javadoc)
         * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object)
         */
        @Override
        public int compare(QueryFieldPanel o1, QueryFieldPanel o2) {
            SpQueryField f1 = o1.getQueryField();
            SpQueryField f2 = o2.getQueryField();
            if (f1 != null && f2 != null) {
                return f1.getPosition().compareTo(f2.getPosition());
            } else if (f1 != null) {
                return 1;
            } else if (f2 != null) {
                return -1;
            } else
                return 0;
        }

    });

    return result;
}

From source file:org.wso2.carbon.is.migration.client.MigrateFrom520to530.java

public boolean migrateClaimData() {
    List<Claim> claims = new ArrayList<>();
    Connection umConnection = null;

    PreparedStatement loadDialectsStatement = null;
    PreparedStatement loadMappedAttributeStatement;

    PreparedStatement updateRole = null;

    ResultSet dialects = null;/*from   w  w  w . j  a va 2s. c o m*/
    ResultSet claimResultSet;
    StringBuilder report = new StringBuilder();
    report.append(
            "---------------------------------- WSO2 Identity Server 5.3.0 claim Migration Report -----------------------------------------\n \n");

    report.append(
            "\n\n------------------------------------------------- Validating Existing Claims----------------------------------------------\n \n");

    //Is validation success. If not success, it will be created additional calims to be success
    boolean isSuccess = true;
    //This is used to record error log
    int count = 1;
    try {
        umConnection = umDataSource.getConnection();

        umConnection.setAutoCommit(false);

        loadDialectsStatement = umConnection.prepareStatement(SQLConstants.LOAD_CLAIM_DIALECTS);
        dialects = loadDialectsStatement.executeQuery();

        //This is used for validating multiple mapped attribute in each dialect. Format : dialectURL-->
        // MappedAttribute---> List of claim URIs. If any of the mapped attribute corresponds to multiple claim
        // URI, the validation should be false.
        Map<String, Map<String, List<String>>> data = new HashMap<>();

        while (dialects.next()) {
            //Keep the list of claim URI against domain Qualified Mapped Attribute
            Map<String, List<String>> mappedAttributes = new HashMap<>();

            int dialectId = dialects.getInt("UM_ID");
            String dialectUri = dialects.getString("UM_DIALECT_URI");
            int tenantId = dialects.getInt("UM_TENANT_ID");

            loadMappedAttributeStatement = umConnection.prepareStatement(SQLConstants.LOAD_MAPPED_ATTRIBUTE);
            loadMappedAttributeStatement.setInt(1, dialectId);
            claimResultSet = loadMappedAttributeStatement.executeQuery();

            //Read all records in UM_CLAIM one by one
            while (claimResultSet.next()) {
                List<String> claimURIs;
                String attribute = claimResultSet.getString("UM_MAPPED_ATTRIBUTE");
                String claimURI = claimResultSet.getString("UM_CLAIM_URI");
                String displayTag = claimResultSet.getString("UM_DISPLAY_TAG");
                String description = claimResultSet.getString("UM_DESCRIPTION");
                String mappedAttributeDomain = claimResultSet.getString("UM_MAPPED_ATTRIBUTE_DOMAIN");
                String regEx = claimResultSet.getString("UM_REG_EX");
                int supportedByDefault = claimResultSet.getInt("UM_SUPPORTED");
                int required = claimResultSet.getInt("UM_REQUIRED");
                int displayOrder = claimResultSet.getInt("UM_DISPLAY_ORDER");
                int readOnly = claimResultSet.getInt("UM_READ_ONLY");

                boolean isRequired = required == 1 ? true : false;
                boolean isSupportedByDefault = supportedByDefault == 1 ? true : false;
                boolean isReadOnly = readOnly == 1 ? true : false;

                Claim claimDTO = new Claim(claimURI, displayTag, description, regEx, isSupportedByDefault,
                        isRequired, displayOrder, isReadOnly, tenantId, dialectUri);
                if (claims.contains(claimDTO)) {
                    for (Claim claim : claims) {
                        if (claim.equals(claimDTO)) {
                            MappedAttribute mappedAttribute = new MappedAttribute(attribute,
                                    mappedAttributeDomain);
                            claim.getAttributes().add(mappedAttribute);
                            break;
                        }
                    }
                } else {
                    MappedAttribute mappedAttribute = new MappedAttribute(attribute, mappedAttributeDomain);
                    List<MappedAttribute> mappedAttributesList = claimDTO.getAttributes();
                    mappedAttributesList.add(mappedAttribute);
                    claimDTO.setAttributes(mappedAttributesList);
                    claims.add(claimDTO);
                }

                String domainQualifiedAttribute;
                if (StringUtils.isBlank(mappedAttributeDomain)) {
                    domainQualifiedAttribute = attribute;
                } else {
                    domainQualifiedAttribute = mappedAttributeDomain + "/" + attribute;
                }
                if (mappedAttributes.get(domainQualifiedAttribute) != null) {
                    claimURIs = mappedAttributes.get(domainQualifiedAttribute);
                } else {
                    claimURIs = new ArrayList<>();
                }

                claimURIs.add(claimURI);
                mappedAttributes.put(domainQualifiedAttribute, claimURIs);
            }

            //get the tenant qualified dialect URL
            dialectUri = dialectUri + "@" + IdentityTenantUtil.getTenantDomain(tenantId);
            data.put(dialectUri, mappedAttributes);
        }

        //This is used to keep mapped attributes in each dialect in each tenant.
        // Format is tenantDomain:dialectURL->List of Mapped Attributes. If any remote dialect has a mapped
        // attribute which is not matching to any of the local claim's mapped attribute, the validation should be
        // false.
        Map<String, Map<String, List<String>>> tenantDialectMappedAttributes = new HashMap<>();

        for (Map.Entry<String, Map<String, List<String>>> entry : data.entrySet()) {

            //This is used to keep the mapped attributes against dialect URI
            Map<String, List<String>> dialectMappedAttributes = new HashMap<>();

            List<String> attributes = new ArrayList<>();
            String dialect = entry.getKey();
            String[] split = dialect.split("@");
            //separate the dialect URL and tenant domain from domain qualified dialect URL
            dialect = split[0];
            String tenantDomain = split[1];
            if (tenantDialectMappedAttributes.get(tenantDomain) != null) {
                dialectMappedAttributes = tenantDialectMappedAttributes.get(tenantDomain);
            }

            if (dialectMappedAttributes.get(dialect) != null) {
                attributes = dialectMappedAttributes.get(dialect);
            }

            if (entry.getValue() != null) {
                for (Map.Entry<String, List<String>> claimEntry : entry.getValue().entrySet()) {
                    String mappedAttribute = claimEntry.getKey();
                    attributes.add(mappedAttribute.trim());
                    if (claimEntry.getValue() != null && claimEntry.getValue().size() > 1) {
                        isSuccess = false;

                        report.append(count + ")  Duplicate Mapped Attribute found for dialect :" + dialect
                                + " | Mapped Attribute :" + mappedAttribute + " | " + "Relevant Claims : "
                                + claimEntry.getValue() + " | Tenant Domain :" + tenantDomain);
                        report.append("\n\n");
                        if (log.isDebugEnabled()) {
                            log.debug("Duplicate Mapped Attribute found for dialect :" + dialect
                                    + " | Mapped Attribute :" + mappedAttribute + " | " + "Relevant Claims : "
                                    + claimEntry.getValue() + " | Tenant Domain :" + tenantDomain);
                        }

                        count++;
                    }
                }

                dialectMappedAttributes.put(entry.getKey().replace("@" + tenantDomain, ""), attributes);
                tenantDialectMappedAttributes.put(tenantDomain, dialectMappedAttributes);
            }
        }

        //If there is no matching mapped attribute in each tenants remote dialect's claims in relevant tenant's
        // local dialect's claims mapped attributes, it is required to create a new mapping the local dialect
        // this variable is used to keep the new local claim URIs which needs to be added in each tenant. Format
        // is tenantDomain--> List of mappedAttributes
        Map<String, Set<String>> claimsToAddMap = new HashMap<>();
        if (tenantDialectMappedAttributes != null) {
            for (Map.Entry<String, Map<String, List<String>>> entry : tenantDialectMappedAttributes
                    .entrySet()) {
                String tenantDomain = entry.getKey();
                Set<String> claimsToAdd = new HashSet<>();
                if (claimsToAddMap.get(tenantDomain) != null) {
                    claimsToAdd = claimsToAddMap.get(tenantDomain);
                }

                if (entry.getValue() != null) {
                    List<String> localAttributes = entry.getValue().get(ClaimConstants.LOCAL_CLAIM_DIALECT_URI);
                    for (Map.Entry<String, List<String>> dialect : entry.getValue().entrySet()) {
                        if (!ClaimConstants.LOCAL_CLAIM_DIALECT_URI.equalsIgnoreCase(dialect.getKey())) {
                            List<String> remoteClaimAttributes = dialect.getValue();
                            if (remoteClaimAttributes != null) {
                                for (String remoteClaimAttribute : remoteClaimAttributes) {
                                    if (!localAttributes.contains(remoteClaimAttribute)) {
                                        claimsToAdd.add(remoteClaimAttribute);
                                        isSuccess = false;
                                        report.append("\n\n" + count + ")  Mapped Attribute : "
                                                + remoteClaimAttribute + " in dialect :" + dialect.getKey()
                                                + " is not associated to any of the local claim in tenant domain: "
                                                + tenantDomain);

                                        if (log.isDebugEnabled()) {
                                            log.debug("Mapped Attribute : " + remoteClaimAttribute
                                                    + " in dialect :" + dialect.getKey()
                                                    + " is not associated to any of the local claim in tenant domain: "
                                                    + tenantDomain);
                                        }
                                        count++;
                                    }
                                }
                            }
                        }
                    }
                }
                claimsToAddMap.put(tenantDomain, claimsToAdd);
            }
        }

    } catch (SQLException e) {
        log.error("Error while validating claim management data", e);
    } finally {
        IdentityDatabaseUtil.closeResultSet(dialects);
        IdentityDatabaseUtil.closeStatement(loadDialectsStatement);
        IdentityDatabaseUtil.closeStatement(updateRole);
        IdentityDatabaseUtil.closeConnection(umConnection);
    }

    // Migrating claim Data starts here.
    ClaimManager claimManager = ClaimManager.getInstance();

    if (claims != null) {

        report.append(
                "\n\n------------------------------------------------------------------------------ Claim "
                        + "Migration -------------------------------------------------------------------------------\n \n");
        try {
            // Add Claim Dialects
            claimManager.addClaimDialects(claims, report);

            // Add Local Claims.
            claimManager.addLocalClaims(claims, report);

            // Add External Claims
            claimManager.addExternalClaim(claims, report);
        } catch (ISMigrationException e) {
            log.error("Error while migrating claim data", e);
        }
    }

    if (!isSuccess) {
        PrintWriter out = null;
        try {
            out = new PrintWriter("claim-migration.txt");
            out.println(report.toString());
        } catch (FileNotFoundException e) {
            log.error("Error while creating claim Migration Report");
        } finally {
            if (out != null) {
                out.close();
            }
        }
    }
    return isSuccess;
}

From source file:edu.ku.brc.specify.tasks.subpane.qb.QueryBldrPane.java

/**
 * Adds qualifiers (TableOrRelationship/Field Title) to query fields where necessary.
 * /*from   w ww . j  a  v a2s  . c  om*/
 */
protected void qualifyFieldLabels() {
    List<String> labels = new ArrayList<String>(queryFieldItems.size());
    Map<String, List<QueryFieldPanel>> map = new HashMap<String, List<QueryFieldPanel>>();
    for (QueryFieldPanel qfp : queryFieldItems) {
        if (qfp.getFieldQRI() != null && qfp.getFieldTitle() != null) //this means tree levels won't get qualified.
        {
            if (!map.containsKey(qfp.getFieldTitle())) {
                map.put(qfp.getFieldTitle(), new LinkedList<QueryFieldPanel>());
            }
            map.get(qfp.getFieldTitle()).add(qfp);
            labels.add(qfp.getFieldTitle());
        }
    }

    for (Map.Entry<String, List<QueryFieldPanel>> entry : map.entrySet()) {
        if (entry.getValue().size() > 1 || entry.getValue().get(0).isLabelQualified()) {
            for (QueryFieldPanel q : entry.getValue()) {
                labels.remove(entry.getKey());
                labels.add(q.qualifyLabel(labels, entry.getValue().size() == 1));
            }
        }
    }
}

From source file:com.dell.asm.asmcore.asmmanager.app.rest.ServiceTemplateService.java

private static Map<String, String> mapBiosSettings(ServiceTemplateComponent component) {
    Map<String, String> biosSettings = new HashMap<>();
    ServiceTemplateCategory resource = component
            .getTemplateResource(ServiceTemplateSettingIDs.SERVICE_TEMPLATE_SERVER_IDRAC_RESOURCE);
    if (resource != null) {
        for (Map.Entry<String, Map<String, String>> setting : OLD_BIOS_TEMPLATE_MAP.entrySet()) {
            String newName = setting.getKey();
            String oldName = setting.getValue().get("name");
            ServiceTemplateSetting oldSetting = resource.getParameter(oldName);
            if (oldSetting != null) {
                if (setting.getValue().get("remap").equals("true")) {
                    biosSettings.put(newName, setting.getValue().get(oldSetting.getValue()));
                } else {
                    biosSettings.put(newName, oldSetting.getValue());
                }/*from ww w  .j a  v  a 2s  .c  o  m*/
            }
        }
        return biosSettings;
    }
    return null;
}

From source file:com.alibaba.wasp.master.balancer.DefaultLoadBalancer.java

/**
 * Generate a global load balancing plan according to the specified map of
 * server information to the most loaded entityGroups of each server.
 * //from www  . j a  v  a 2 s. co  m
 * The load balancing invariant is that all servers are within 1 entityGroup of the
 * average number of entityGroups per server. If the average is an integer number,
 * all servers will be balanced to the average. Otherwise, all servers will
 * have either floor(average) or ceiling(average) entityGroups.
 * 
 * HBASE-3609 Modeled entityGroupsToMove using Guava's MinMaxPriorityQueue so that
 * we can fetch from both ends of the queue. At the beginning, we check
 * whether there was empty entityGroup server just discovered by Master. If so, we
 * alternately choose new / old entityGroups from head / tail of entityGroupsToMove,
 * respectively. This alternation avoids clustering young entityGroups on the newly
 * discovered entityGroup server. Otherwise, we choose new entityGroups from head of
 * entityGroupsToMove.
 * 
 * Another improvement from HBASE-3609 is that we assign entityGroups from
 * entityGroupsToMove to underloaded servers in round-robin fashion. Previously one
 * underloaded server would be filled before we move onto the next underloaded
 * server, leading to clustering of young entityGroups.
 * 
 * Finally, we randomly shuffle underloaded servers so that they receive
 * offloaded entityGroups relatively evenly across calls to balanceCluster().
 * 
 * The algorithm is currently implemented as such:
 * 
 * <ol>
 * <li>Determine the two valid numbers of entityGroups each server should have,
 * <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
 * 
 * <li>Iterate down the most loaded servers, shedding entityGroups from each so
 * each server hosts exactly <b>MAX</b> entityGroups. Stop once you reach a server
 * that already has &lt;= <b>MAX</b> entityGroups.
 * <p>
 * Order the entityGroups to move from most recent to least.
 * 
 * <li>Iterate down the least loaded servers, assigning entityGroups so each server
 * has exactly </b>MIN</b> entityGroups. Stop once you reach a server that already
 * has &gt;= <b>MIN</b> entityGroups.
 * 
 * EntityGroups being assigned to underloaded servers are those that were shed in
 * the previous step. It is possible that there were not enough entityGroups shed
 * to fill each underloaded server to <b>MIN</b>. If so we end up with a
 * number of entityGroups required to do so, <b>neededEntityGroups</b>.
 * 
 * It is also possible that we were able to fill each underloaded but ended up
 * with entityGroups that were unassigned from overloaded servers but that still do
 * not have assignment.
 * 
 * If neither of these conditions hold (no entityGroups needed to fill the
 * underloaded servers, no entityGroups leftover from overloaded servers), we are
 * done and return. Otherwise we handle these cases below.
 * 
 * <li>If <b>neededEntityGroups</b> is non-zero (still have underloaded servers),
 * we iterate the most loaded servers again, shedding a single server from
 * each (this brings them from having <b>MAX</b> entityGroups to having <b>MIN</b>
 * entityGroups).
 * 
 * <li>We now definitely have more entityGroups that need assignment, either from
 * the previous step or from the original shedding from overloaded servers.
 * Iterate the least loaded servers filling each to <b>MIN</b>.
 * 
 * <li>If we still have more entityGroups that need assignment, again iterate the
 * least loaded servers, this time giving each one (filling them to
 * </b>MAX</b>) until we run out.
 * 
 * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> entityGroups.
 * 
 * In addition, any server hosting &gt;= <b>MAX</b> entityGroups is guaranteed to
 * end up with <b>MAX</b> entityGroups at the end of the balancing. This ensures
 * the minimal number of entityGroups possible are moved.
 * </ol>
 * 
 * TODO: We can at-most reassign the number of entityGroups away from a particular
 * server to be how many they report as most loaded. Should we just keep all
 * assignment in memory? Any objections? Does this mean we need HeapSize on
 * HMaster? Or just careful monitor? (current thinking is we will hold all
 * assignments in memory)
 * 
 * @param clusterState Map of entityGroupservers and their load/entityGroup information
 *          to a list of their most loaded entityGroups
 * @return a list of entityGroups to be moved, including source and destination, or
 *         null if cluster is already balanced
 */
public List<EntityGroupPlan> balanceCluster(Map<ServerName, List<EntityGroupInfo>> clusterMap) {
    boolean emptyFServerPresent = false;
    long startTime = System.currentTimeMillis();

    ClusterLoadState cs = new ClusterLoadState(clusterMap);

    int numServers = cs.getNumServers();
    if (numServers == 0) {
        LOG.debug("numServers=0 so skipping load balancing");
        return null;
    }
    NavigableMap<ServerAndLoad, List<EntityGroupInfo>> serversByLoad = cs.getServersByLoad();

    int numEntityGroups = cs.getNumEntityGroups();

    if (!this.needsBalance(cs)) {
        // Skipped because no server outside (min,max) range
        float average = cs.getLoadAverage(); // for logging
        LOG.info("Skipping load balancing because balanced cluster; " + "servers=" + numServers + " "
                + "entityGroups=" + numEntityGroups + " average=" + average + " " + "mostloaded="
                + serversByLoad.lastKey().getLoad() + " leastloaded=" + serversByLoad.firstKey().getLoad());
        return null;
    }

    int min = numEntityGroups / numServers;
    int max = numEntityGroups % numServers == 0 ? min : min + 1;

    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numEntityGroups=").append(numEntityGroups)
            .append(", numServers=").append(numServers).append(", max=").append(max).append(", min=")
            .append(min);
    LOG.debug(strBalanceParam.toString());

    // Balance the cluster
    // TODO: Look at data block locality or a more complex load to do this
    MinMaxPriorityQueue<EntityGroupPlan> entityGroupsToMove = MinMaxPriorityQueue.orderedBy(rpComparator)
            .create();
    List<EntityGroupPlan> entityGroupsToReturn = new ArrayList<EntityGroupPlan>();

    // Walk down most loaded, pruning each to the max
    int serversOverloaded = 0;
    // flag used to fetch entityGroups from head and tail of list, alternately
    boolean fetchFromTail = false;
    Map<ServerName, BalanceInfo> serverBalanceInfo = new TreeMap<ServerName, BalanceInfo>();
    for (Map.Entry<ServerAndLoad, List<EntityGroupInfo>> server : serversByLoad.descendingMap().entrySet()) {
        ServerAndLoad sal = server.getKey();
        int entityGroupCount = sal.getLoad();
        if (entityGroupCount <= max) {
            serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(0, 0));
            break;
        }
        serversOverloaded++;
        List<EntityGroupInfo> entityGroups = server.getValue();
        int numToOffload = Math.min(entityGroupCount - max, entityGroups.size());
        // account for the out-of-band entityGroups which were assigned to this server
        // after some other entityGroup server crashed
        Collections.sort(entityGroups, riComparator);
        int numTaken = 0;
        for (int i = 0; i <= numToOffload;) {
            EntityGroupInfo egInfo = entityGroups.get(i); // fetch from head
            if (fetchFromTail) {
                egInfo = entityGroups.get(entityGroups.size() - 1 - i);
            }
            i++;
            entityGroupsToMove.add(new EntityGroupPlan(egInfo, sal.getServerName(), null));
            numTaken++;
            if (numTaken >= numToOffload)
                break;
            // fetch in alternate order if there is new entityGroup server
            if (emptyFServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
        serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(numToOffload, (-1) * numTaken));
    }
    int totalNumMoved = entityGroupsToMove.size();

    // Walk down least loaded, filling each to the min
    int neededEntityGroups = 0; // number of entityGroups needed to bring all up to min
    fetchFromTail = false;

    Map<ServerName, Integer> underloadedServers = new HashMap<ServerName, Integer>();
    for (Map.Entry<ServerAndLoad, List<EntityGroupInfo>> server : serversByLoad.entrySet()) {
        int entityGroupCount = server.getKey().getLoad();
        if (entityGroupCount >= min) {
            break;
        }
        underloadedServers.put(server.getKey().getServerName(), min - entityGroupCount);
    }
    // number of servers that get new entityGroups
    int serversUnderloaded = underloadedServers.size();
    int incr = 1;
    List<ServerName> sns = Arrays
            .asList(underloadedServers.keySet().toArray(new ServerName[serversUnderloaded]));
    Collections.shuffle(sns, RANDOM);
    while (entityGroupsToMove.size() > 0) {
        int cnt = 0;
        int i = incr > 0 ? 0 : underloadedServers.size() - 1;
        for (; i >= 0 && i < underloadedServers.size(); i += incr) {
            if (entityGroupsToMove.isEmpty())
                break;
            ServerName si = sns.get(i);
            int numToTake = underloadedServers.get(si);
            if (numToTake == 0)
                continue;

            addEntityGroupPlan(entityGroupsToMove, fetchFromTail, si, entityGroupsToReturn);
            if (emptyFServerPresent) {
                fetchFromTail = !fetchFromTail;
            }

            underloadedServers.put(si, numToTake - 1);
            cnt++;
            BalanceInfo bi = serverBalanceInfo.get(si);
            if (bi == null) {
                bi = new BalanceInfo(0, 0);
                serverBalanceInfo.put(si, bi);
            }
            bi.setNumEntityGroupsAdded(bi.getNumEntityGroupsAdded() + 1);
        }
        if (cnt == 0)
            break;
        // iterates underloadedServers in the other direction
        incr = -incr;
    }
    for (Integer i : underloadedServers.values()) {
        // If we still want to take some, increment needed
        neededEntityGroups += i;
    }

    // If none needed to fill all to min and none left to drain all to max,
    // we are done
    if (neededEntityGroups == 0 && entityGroupsToMove.isEmpty()) {
        long endTime = System.currentTimeMillis();
        LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
                + " entityGroups off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
                + " less loaded servers");
        return entityGroupsToReturn;
    }

    // Need to do a second pass.
    // Either more entityGroups to assign out or servers that are still underloaded

    // If we need more to fill min, grab one from each most loaded until enough
    if (neededEntityGroups != 0) {
        // Walk down most loaded, grabbing one from each until we get enough
        for (Map.Entry<ServerAndLoad, List<EntityGroupInfo>> server : serversByLoad.descendingMap()
                .entrySet()) {
            BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
            int idx = balanceInfo == null ? 0 : balanceInfo.getNextEntityGroupForUnload();
            if (idx >= server.getValue().size())
                break;
            EntityGroupInfo entityGroup = server.getValue().get(idx);
            entityGroupsToMove.add(new EntityGroupPlan(entityGroup, server.getKey().getServerName(), null));
            totalNumMoved++;
            if (--neededEntityGroups == 0) {
                // No more entityGroups needed, done shedding
                break;
            }
        }
    }

    // Now we have a set of entityGroups that must be all assigned out
    // Assign each underloaded up to the min, then if leftovers, assign to max

    // Walk down least loaded, assigning to each to fill up to min
    for (Map.Entry<ServerAndLoad, List<EntityGroupInfo>> server : serversByLoad.entrySet()) {
        int entityGroupCount = server.getKey().getLoad();
        if (entityGroupCount >= min)
            break;
        BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
        if (balanceInfo != null) {
            entityGroupCount += balanceInfo.getNumEntityGroupsAdded();
        }
        if (entityGroupCount >= min) {
            continue;
        }
        int numToTake = min - entityGroupCount;
        int numTaken = 0;
        while (numTaken < numToTake && 0 < entityGroupsToMove.size()) {
            addEntityGroupPlan(entityGroupsToMove, fetchFromTail, server.getKey().getServerName(),
                    entityGroupsToReturn);
            numTaken++;
            if (emptyFServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
    }

    // If we still have entityGroups to dish out, assign underloaded to max
    if (0 < entityGroupsToMove.size()) {
        for (Map.Entry<ServerAndLoad, List<EntityGroupInfo>> server : serversByLoad.entrySet()) {
            int entityGroupCount = server.getKey().getLoad();
            if (entityGroupCount >= max) {
                break;
            }
            addEntityGroupPlan(entityGroupsToMove, fetchFromTail, server.getKey().getServerName(),
                    entityGroupsToReturn);
            if (emptyFServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
            if (entityGroupsToMove.isEmpty()) {
                break;
            }
        }
    }

    long endTime = System.currentTimeMillis();

    if (!entityGroupsToMove.isEmpty() || neededEntityGroups != 0) {
        // Emit data so can diagnose how balancer went astray.
        LOG.warn("entityGroupsToMove=" + totalNumMoved + ", numServers=" + numServers + ", serversOverloaded="
                + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded);
        StringBuilder sb = new StringBuilder();
        for (Map.Entry<ServerName, List<EntityGroupInfo>> e : clusterMap.entrySet()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(e.getKey().toString());
            sb.append(" ");
            sb.append(e.getValue().size());
        }
        LOG.warn("Input " + sb.toString());
    }

    // All done!
    LOG.info("Done. Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
            + " entityGroups off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
            + " less loaded servers");

    return entityGroupsToReturn;
}

From source file:org.apache.hadoop.hbase.master.LoadBalancer.java

/**
 * Generate a global load balancing plan according to the specified map of
 * server information to the most loaded regions of each server.
 *
 * The load balancing invariant is that all servers are within 1 region of the
 * average number of regions per server.  If the average is an integer number,
 * all servers will be balanced to the average.  Otherwise, all servers will
 * have either floor(average) or ceiling(average) regions.
 *
 * The algorithm is currently implemented as such:
 *
 * <ol>/*from w  w w .  ja  v a2  s. co  m*/
 * <li>Determine the two valid numbers of regions each server should have,
 *     <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
 *
 * <li>Iterate down the most loaded servers, shedding regions from each so
 *     each server hosts exactly <b>MAX</b> regions.  Stop once you reach a
 *     server that already has &lt;= <b>MAX</b> regions.
 *     <p>
 *     Order the regions to move from most recent to least.
 *
 * <li>Iterate down the least loaded servers, assigning regions so each server
 *     has exactly </b>MIN</b> regions.  Stop once you reach a server that
 *     already has &gt;= <b>MIN</b> regions.
 *
 *     Regions being assigned to underloaded servers are those that were shed
 *     in the previous step.  It is possible that there were not enough
 *     regions shed to fill each underloaded server to <b>MIN</b>.  If so we
 *     end up with a number of regions required to do so, <b>neededRegions</b>.
 *
 *     It is also possible that we were able fill each underloaded but ended
 *     up with regions that were unassigned from overloaded servers but that
 *     still do not have assignment.
 *
 *     If neither of these conditions hold (no regions needed to fill the
 *     underloaded servers, no regions leftover from overloaded servers),
 *     we are done and return.  Otherwise we handle these cases below.
 *
 * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers),
 *     we iterate the most loaded servers again, shedding a single server from
 *     each (this brings them from having <b>MAX</b> regions to having
 *     <b>MIN</b> regions).
 *
 * <li>We now definitely have more regions that need assignment, either from
 *     the previous step or from the original shedding from overloaded servers.
 *
 *     Iterate the least loaded servers filling each to <b>MIN</b>.
 *
 * <li>If we still have more regions that need assignment, again iterate the
 *     least loaded servers, this time giving each one (filling them to
 *     </b>MAX</b>) until we run out.
 *
 * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
 *
 *     In addition, any server hosting &gt;= <b>MAX</b> regions is guaranteed
 *     to end up with <b>MAX</b> regions at the end of the balancing.  This
 *     ensures the minimal number of regions possible are moved.
 * </ol>
 *
 * TODO: We can at-most reassign the number of regions away from a particular
 *       server to be how many they report as most loaded.
 *       Should we just keep all assignment in memory?  Any objections?
 *       Does this mean we need HeapSize on HMaster?  Or just careful monitor?
 *       (current thinking is we will hold all assignments in memory)
 *
 * @param clusterState Map of regionservers and their load/region information to
 *                   a list of their most loaded regions
 * @return a list of regions to be moved, including source and destination,
 *         or null if cluster is already balanced
 */
public List<RegionPlan> balanceCluster(Map<HServerInfo, List<HRegionInfo>> clusterState) {
    long startTime = System.currentTimeMillis();

    // Make a map sorted by load and count regions
    TreeMap<HServerInfo, List<HRegionInfo>> serversByLoad = new TreeMap<HServerInfo, List<HRegionInfo>>(
            new HServerInfo.LoadComparator());
    int numServers = clusterState.size();
    if (numServers == 0) {
        LOG.debug("numServers=0 so skipping load balancing");
        return null;
    }
    int numRegions = 0;
    // Iterate so we can count regions as we build the map
    for (Map.Entry<HServerInfo, List<HRegionInfo>> server : clusterState.entrySet()) {
        server.getKey().getLoad().setNumberOfRegions(server.getValue().size());
        numRegions += server.getKey().getLoad().getNumberOfRegions();
        serversByLoad.put(server.getKey(), server.getValue());
    }

    // Check if we even need to do any load balancing
    float average = (float) numRegions / numServers; // for logging
    // HBASE-3681 check sloppiness first
    int floor = (int) Math.floor(average * (1 - slop));
    int ceiling = (int) Math.ceil(average * (1 + slop));
    if (serversByLoad.lastKey().getLoad().getNumberOfRegions() <= ceiling
            && serversByLoad.firstKey().getLoad().getNumberOfRegions() >= floor) {
        // Skipped because no server outside (min,max) range
        LOG.info("Skipping load balancing.  servers=" + numServers + " " + "regions=" + numRegions + " average="
                + average + " " + "mostloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions()
                + " leastloaded=" + serversByLoad.lastKey().getLoad().getNumberOfRegions());
        return null;
    }
    int min = numRegions / numServers;
    int max = numRegions % numServers == 0 ? min : min + 1;

    // Balance the cluster
    // TODO: Look at data block locality or a more complex load to do this
    List<RegionPlan> regionsToMove = new ArrayList<RegionPlan>();
    int regionidx = 0; // track the index in above list for setting destination

    // Walk down most loaded, pruning each to the max
    int serversOverloaded = 0;
    Map<HServerInfo, BalanceInfo> serverBalanceInfo = new TreeMap<HServerInfo, BalanceInfo>();
    for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
        HServerInfo serverInfo = server.getKey();
        int regionCount = serverInfo.getLoad().getNumberOfRegions();
        if (regionCount <= max) {
            serverBalanceInfo.put(serverInfo, new BalanceInfo(0, 0));
            break;
        }
        serversOverloaded++;
        List<HRegionInfo> regions = randomize(server.getValue());
        int numToOffload = Math.min(regionCount - max, regions.size());
        int numTaken = 0;
        for (int i = regions.size() - 1; i >= 0; i--) {
            HRegionInfo hri = regions.get(i);
            // Don't rebalance meta regions.
            if (hri.isMetaRegion())
                continue;
            regionsToMove.add(new RegionPlan(hri, serverInfo, null));
            numTaken++;
            if (numTaken >= numToOffload)
                break;
        }
        serverBalanceInfo.put(serverInfo, new BalanceInfo(numToOffload, (-1) * numTaken));
    }

    // Walk down least loaded, filling each to the min
    int serversUnderloaded = 0; // number of servers that get new regions
    int neededRegions = 0; // number of regions needed to bring all up to min
    for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad().getNumberOfRegions();
        if (regionCount >= min) {
            break;
        }
        serversUnderloaded++;
        int numToTake = min - regionCount;
        int numTaken = 0;
        while (numTaken < numToTake && regionidx < regionsToMove.size()) {
            regionsToMove.get(regionidx).setDestination(server.getKey());
            numTaken++;
            regionidx++;
        }
        serverBalanceInfo.put(server.getKey(), new BalanceInfo(0, numTaken));
        // If we still want to take some, increment needed
        if (numTaken < numToTake) {
            neededRegions += (numToTake - numTaken);
        }
    }

    // If none needed to fill all to min and none left to drain all to max,
    // we are done
    if (neededRegions == 0 && regionidx == regionsToMove.size()) {
        long endTime = System.currentTimeMillis();
        LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving "
                + regionsToMove.size() + " regions off of " + serversOverloaded + " overloaded servers onto "
                + serversUnderloaded + " less loaded servers");
        return regionsToMove;
    }

    // Need to do a second pass.
    // Either more regions to assign out or servers that are still underloaded

    // If we need more to fill min, grab one from each most loaded until enough
    if (neededRegions != 0) {
        // Walk down most loaded, grabbing one from each until we get enough
        for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
            BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey());
            int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload();
            if (idx >= server.getValue().size())
                break;
            HRegionInfo region = server.getValue().get(idx);
            if (region.isMetaRegion())
                continue; // Don't move meta regions.
            regionsToMove.add(new RegionPlan(region, server.getKey(), null));
            if (--neededRegions == 0) {
                // No more regions needed, done shedding
                break;
            }
        }
    }

    // Now we have a set of regions that must be all assigned out
    // Assign each underloaded up to the min, then if leftovers, assign to max

    // Walk down least loaded, assigning to each to fill up to min
    for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad().getNumberOfRegions();
        if (regionCount >= min)
            break;
        BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey());
        if (balanceInfo != null) {
            regionCount += balanceInfo.getNumRegionsAdded();
        }
        if (regionCount >= min) {
            continue;
        }
        int numToTake = min - regionCount;
        int numTaken = 0;
        while (numTaken < numToTake && regionidx < regionsToMove.size()) {
            regionsToMove.get(regionidx).setDestination(server.getKey());
            numTaken++;
            regionidx++;
        }
    }

    // If we still have regions to dish out, assign underloaded to max
    if (regionidx != regionsToMove.size()) {
        for (Map.Entry<HServerInfo, List<HRegionInfo>> server : serversByLoad.entrySet()) {
            int regionCount = server.getKey().getLoad().getNumberOfRegions();
            if (regionCount >= max) {
                break;
            }
            regionsToMove.get(regionidx).setDestination(server.getKey());
            regionidx++;
            if (regionidx == regionsToMove.size()) {
                break;
            }
        }
    }

    long endTime = System.currentTimeMillis();

    if (regionidx != regionsToMove.size() || neededRegions != 0) {
        // Emit data so can diagnose how balancer went astray.
        LOG.warn("regionidx=" + regionidx + ", regionsToMove=" + regionsToMove.size() + ", numServers="
                + numServers + ", serversOverloaded=" + serversOverloaded + ", serversUnderloaded="
                + serversUnderloaded);
        StringBuilder sb = new StringBuilder();
        for (Map.Entry<HServerInfo, List<HRegionInfo>> e : clusterState.entrySet()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(e.getKey().getServerName());
            sb.append(" ");
            sb.append(e.getValue().size());
        }
        LOG.warn("Input " + sb.toString());
    }

    // All done!
    LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + regionsToMove.size()
            + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
            + " less loaded servers");

    return regionsToMove;
}

From source file:org.apache.hadoop.hbase.master.balancer.DefaultLoadBalancer.java

/**
 * Generate a global load balancing plan according to the specified map of
 * server information to the most loaded regions of each server.
 *
 * The load balancing invariant is that all servers are within 1 region of the
 * average number of regions per server.  If the average is an integer number,
 * all servers will be balanced to the average.  Otherwise, all servers will
 * have either floor(average) or ceiling(average) regions.
 *
 * HBASE-3609 Modeled regionsToMove using Guava's MinMaxPriorityQueue so that
 *   we can fetch from both ends of the queue. 
 * At the beginning, we check whether there was empty region server 
 *   just discovered by Master. If so, we alternately choose new / old
 *   regions from head / tail of regionsToMove, respectively. This alternation
 *   avoids clustering young regions on the newly discovered region server.
 *   Otherwise, we choose new regions from head of regionsToMove.
 *   //from w  w  w .j av a 2  s.c  om
 * Another improvement from HBASE-3609 is that we assign regions from
 *   regionsToMove to underloaded servers in round-robin fashion.
 *   Previously one underloaded server would be filled before we move onto
 *   the next underloaded server, leading to clustering of young regions.
 *   
 * Finally, we randomly shuffle underloaded servers so that they receive
 *   offloaded regions relatively evenly across calls to balanceCluster().
 *         
 * The algorithm is currently implemented as such:
 *
 * <ol>
 * <li>Determine the two valid numbers of regions each server should have,
 *     <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
 *
 * <li>Iterate down the most loaded servers, shedding regions from each so
 *     each server hosts exactly <b>MAX</b> regions.  Stop once you reach a
 *     server that already has &lt;= <b>MAX</b> regions.
 *     <p>
 *     Order the regions to move from most recent to least.
 *
 * <li>Iterate down the least loaded servers, assigning regions so each server
 *     has exactly </b>MIN</b> regions.  Stop once you reach a server that
 *     already has &gt;= <b>MIN</b> regions.
 *
 *     Regions being assigned to underloaded servers are those that were shed
 *     in the previous step.  It is possible that there were not enough
 *     regions shed to fill each underloaded server to <b>MIN</b>.  If so we
 *     end up with a number of regions required to do so, <b>neededRegions</b>.
 *
 *     It is also possible that we were able to fill each underloaded but ended
 *     up with regions that were unassigned from overloaded servers but that
 *     still do not have assignment.
 *
 *     If neither of these conditions hold (no regions needed to fill the
 *     underloaded servers, no regions leftover from overloaded servers),
 *     we are done and return.  Otherwise we handle these cases below.
 *
 * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers),
 *     we iterate the most loaded servers again, shedding a single server from
 *     each (this brings them from having <b>MAX</b> regions to having
 *     <b>MIN</b> regions).
 *
 * <li>We now definitely have more regions that need assignment, either from
 *     the previous step or from the original shedding from overloaded servers.
 *     Iterate the least loaded servers filling each to <b>MIN</b>.
 *
 * <li>If we still have more regions that need assignment, again iterate the
 *     least loaded servers, this time giving each one (filling them to
 *     </b>MAX</b>) until we run out.
 *
 * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
 *
 *     In addition, any server hosting &gt;= <b>MAX</b> regions is guaranteed
 *     to end up with <b>MAX</b> regions at the end of the balancing.  This
 *     ensures the minimal number of regions possible are moved.
 * </ol>
 *
 * TODO: We can at-most reassign the number of regions away from a particular
 *       server to be how many they report as most loaded.
 *       Should we just keep all assignment in memory?  Any objections?
 *       Does this mean we need HeapSize on HMaster?  Or just careful monitor?
 *       (current thinking is we will hold all assignments in memory)
 *
 * @param clusterMap Map of regionservers and their load/region information to
 *                   a list of their most loaded regions
 * @return a list of regions to be moved, including source and destination,
 *         or null if cluster is already balanced
 */
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterMap) {
    boolean emptyRegionServerPresent = false;
    long startTime = System.currentTimeMillis();

    ClusterLoadState cs = new ClusterLoadState(clusterMap);

    if (!this.needsBalance(cs))
        return null;

    int numServers = cs.getNumServers();
    NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();
    int numRegions = cs.getNumRegions();
    int min = numRegions / numServers;
    int max = numRegions % numServers == 0 ? min : min + 1;

    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numRegions=").append(numRegions).append(", numServers=")
            .append(numServers).append(", max=").append(max).append(", min=").append(min);
    LOG.debug(strBalanceParam.toString());

    // Balance the cluster
    // TODO: Look at data block locality or a more complex load to do this
    MinMaxPriorityQueue<RegionPlan> regionsToMove = MinMaxPriorityQueue.orderedBy(rpComparator).create();
    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    // Walk down most loaded, pruning each to the max
    int serversOverloaded = 0;
    // flag used to fetch regions from head and tail of list, alternately
    boolean fetchFromTail = false;
    Map<ServerName, BalanceInfo> serverBalanceInfo = new TreeMap<ServerName, BalanceInfo>();
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
        ServerAndLoad sal = server.getKey();
        int regionCount = sal.getLoad();
        if (regionCount <= max) {
            serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(0, 0));
            break;
        }
        serversOverloaded++;
        List<HRegionInfo> regions = server.getValue();
        int numToOffload = Math.min(regionCount - max, regions.size());
        // account for the out-of-band regions which were assigned to this server
        // after some other region server crashed 
        Collections.sort(regions, riComparator);
        int numTaken = 0;
        for (int i = 0; i <= numToOffload;) {
            HRegionInfo hri = regions.get(i); // fetch from head
            if (fetchFromTail) {
                hri = regions.get(regions.size() - 1 - i);
            }
            i++;
            // Don't rebalance meta regions.
            if (hri.isMetaRegion())
                continue;
            regionsToMove.add(new RegionPlan(hri, sal.getServerName(), null));
            numTaken++;
            if (numTaken >= numToOffload)
                break;
            // fetch in alternate order if there is new region server
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
        serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(numToOffload, (-1) * numTaken));
    }
    int totalNumMoved = regionsToMove.size();

    // Walk down least loaded, filling each to the min
    int neededRegions = 0; // number of regions needed to bring all up to min
    fetchFromTail = false;

    Map<ServerName, Integer> underloadedServers = new HashMap<ServerName, Integer>();
    float average = (float) numRegions / numServers; // for logging
    int maxToTake = numRegions - (int) average;
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        if (maxToTake == 0)
            break; // no more to take
        int regionCount = server.getKey().getLoad();
        if (regionCount >= min && regionCount > 0) {
            continue; // look for other servers which haven't reached min
        }
        int regionsToPut = min - regionCount;
        if (regionsToPut == 0) {
            regionsToPut = 1;
            maxToTake--;
        }
        underloadedServers.put(server.getKey().getServerName(), regionsToPut);
    }
    // number of servers that get new regions
    int serversUnderloaded = underloadedServers.size();
    int incr = 1;
    List<ServerName> sns = Arrays
            .asList(underloadedServers.keySet().toArray(new ServerName[serversUnderloaded]));
    Collections.shuffle(sns, RANDOM);
    while (regionsToMove.size() > 0) {
        int cnt = 0;
        int i = incr > 0 ? 0 : underloadedServers.size() - 1;
        for (; i >= 0 && i < underloadedServers.size(); i += incr) {
            if (regionsToMove.isEmpty())
                break;
            ServerName si = sns.get(i);
            int numToTake = underloadedServers.get(si);
            if (numToTake == 0)
                continue;

            addRegionPlan(regionsToMove, fetchFromTail, si, regionsToReturn);
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }

            underloadedServers.put(si, numToTake - 1);
            cnt++;
            BalanceInfo bi = serverBalanceInfo.get(si);
            if (bi == null) {
                bi = new BalanceInfo(0, 0);
                serverBalanceInfo.put(si, bi);
            }
            bi.setNumRegionsAdded(bi.getNumRegionsAdded() + 1);
        }
        if (cnt == 0)
            break;
        // iterates underloadedServers in the other direction
        incr = -incr;
    }
    for (Integer i : underloadedServers.values()) {
        // If we still want to take some, increment needed
        neededRegions += i;
    }

    // If none needed to fill all to min and none left to drain all to max,
    // we are done
    if (neededRegions == 0 && regionsToMove.isEmpty()) {
        long endTime = System.currentTimeMillis();
        LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
                + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
                + " less loaded servers");
        return regionsToReturn;
    }

    // Need to do a second pass.
    // Either more regions to assign out or servers that are still underloaded

    // If we need more to fill min, grab one from each most loaded until enough
    if (neededRegions != 0) {
        // Walk down most loaded, grabbing one from each until we get enough
        for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
            BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
            int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload();
            if (idx >= server.getValue().size())
                break;
            HRegionInfo region = server.getValue().get(idx);
            if (region.isMetaRegion())
                continue; // Don't move meta regions.
            regionsToMove.add(new RegionPlan(region, server.getKey().getServerName(), null));
            totalNumMoved++;
            if (--neededRegions == 0) {
                // No more regions needed, done shedding
                break;
            }
        }
    }

    // Now we have a set of regions that must be all assigned out
    // Assign each underloaded up to the min, then if leftovers, assign to max

    // Walk down least loaded, assigning to each to fill up to min
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad();
        if (regionCount >= min)
            break;
        BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
        if (balanceInfo != null) {
            regionCount += balanceInfo.getNumRegionsAdded();
        }
        if (regionCount >= min) {
            continue;
        }
        int numToTake = min - regionCount;
        int numTaken = 0;
        while (numTaken < numToTake && 0 < regionsToMove.size()) {
            addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
            numTaken++;
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
    }

    // If we still have regions to dish out, assign underloaded to max
    if (0 < regionsToMove.size()) {
        for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
            int regionCount = server.getKey().getLoad();
            if (regionCount >= max) {
                break;
            }
            addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
            if (regionsToMove.isEmpty()) {
                break;
            }
        }
    }

    long endTime = System.currentTimeMillis();

    if (!regionsToMove.isEmpty() || neededRegions != 0) {
        // Emit data so can diagnose how balancer went astray.
        LOG.warn("regionsToMove=" + totalNumMoved + ", numServers=" + numServers + ", serversOverloaded="
                + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded);
        StringBuilder sb = new StringBuilder();
        for (Map.Entry<ServerName, List<HRegionInfo>> e : clusterMap.entrySet()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(e.getKey().toString());
            sb.append(" ");
            sb.append(e.getValue().size());
        }
        LOG.warn("Input " + sb.toString());
    }

    // All done!
    LOG.info("Done. Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
            + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
            + " less loaded servers");

    return regionsToReturn;
}

From source file:org.apache.asterix.utils.FeedOperations.java

private static JobSpecification combineIntakeCollectJobs(MetadataProvider metadataProvider, Feed feed,
        JobSpecification intakeJob, List<JobSpecification> jobsList, List<FeedConnection> feedConnections,
        String[] intakeLocations) throws AlgebricksException, HyracksDataException {
    JobSpecification jobSpec = new JobSpecification(intakeJob.getFrameSize());

    // copy ingestor
    FeedIntakeOperatorDescriptor firstOp = (FeedIntakeOperatorDescriptor) intakeJob.getOperatorMap()
            .get(new OperatorDescriptorId(0));
    FeedIntakeOperatorDescriptor ingestionOp;
    if (firstOp.getAdaptorFactory() == null) {
        ingestionOp = new FeedIntakeOperatorDescriptor(jobSpec, feed, firstOp.getAdaptorLibraryName(),
                firstOp.getAdaptorFactoryClassName(), firstOp.getAdapterOutputType(),
                firstOp.getPolicyAccessor(), firstOp.getOutputRecordDescriptors()[0]);
    } else {//from www . j a  v  a 2s.c  o  m
        ingestionOp = new FeedIntakeOperatorDescriptor(jobSpec, feed, firstOp.getAdaptorFactory(),
                firstOp.getAdapterOutputType(), firstOp.getPolicyAccessor(),
                firstOp.getOutputRecordDescriptors()[0]);
    }
    // create replicator
    ReplicateOperatorDescriptor replicateOp = new ReplicateOperatorDescriptor(jobSpec,
            ingestionOp.getOutputRecordDescriptors()[0], jobsList.size());
    jobSpec.connect(new OneToOneConnectorDescriptor(jobSpec), ingestionOp, 0, replicateOp, 0);
    PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, ingestionOp, intakeLocations);
    PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, replicateOp, intakeLocations);
    // Loop over the jobs to copy operators and connections
    Map<OperatorDescriptorId, OperatorDescriptorId> operatorIdMapping = new HashMap<>();
    Map<ConnectorDescriptorId, ConnectorDescriptorId> connectorIdMapping = new HashMap<>();
    Map<OperatorDescriptorId, List<LocationConstraint>> operatorLocations = new HashMap<>();
    Map<OperatorDescriptorId, Integer> operatorCounts = new HashMap<>();
    List<JobId> jobIds = new ArrayList<>();
    FeedMetaOperatorDescriptor metaOp;

    for (int iter1 = 0; iter1 < jobsList.size(); iter1++) {
        FeedConnection curFeedConnection = feedConnections.get(iter1);
        JobSpecification subJob = jobsList.get(iter1);
        operatorIdMapping.clear();
        Map<OperatorDescriptorId, IOperatorDescriptor> operatorsMap = subJob.getOperatorMap();
        FeedConnectionId feedConnectionId = new FeedConnectionId(ingestionOp.getEntityId(),
                feedConnections.get(iter1).getDatasetName());

        FeedPolicyEntity feedPolicyEntity = FeedMetadataUtil.validateIfPolicyExists(
                curFeedConnection.getDataverseName(), curFeedConnection.getPolicyName(),
                metadataProvider.getMetadataTxnContext());

        for (Map.Entry<OperatorDescriptorId, IOperatorDescriptor> entry : operatorsMap.entrySet()) {
            IOperatorDescriptor opDesc = entry.getValue();
            OperatorDescriptorId oldId = opDesc.getOperatorId();
            OperatorDescriptorId opId = null;
            if (opDesc instanceof LSMTreeInsertDeleteOperatorDescriptor
                    && ((LSMTreeInsertDeleteOperatorDescriptor) opDesc).isPrimary()) {
                String operandId = ((LSMTreeInsertDeleteOperatorDescriptor) opDesc).getIndexName();
                metaOp = new FeedMetaOperatorDescriptor(jobSpec, feedConnectionId, opDesc,
                        feedPolicyEntity.getProperties(), FeedRuntimeType.STORE, operandId);
                opId = metaOp.getOperatorId();
                opDesc.setOperatorId(opId);
            } else {
                if (opDesc instanceof AlgebricksMetaOperatorDescriptor) {
                    AlgebricksMetaOperatorDescriptor algOp = (AlgebricksMetaOperatorDescriptor) opDesc;
                    IPushRuntimeFactory[] runtimeFactories = algOp.getPipeline().getRuntimeFactories();
                    // Tweak AssignOp to work with messages
                    if (runtimeFactories[0] instanceof AssignRuntimeFactory && runtimeFactories.length > 1) {
                        IConnectorDescriptor connectorDesc = subJob.getOperatorInputMap()
                                .get(opDesc.getOperatorId()).get(0);
                        // anything on the network interface needs to be message compatible
                        if (connectorDesc instanceof MToNPartitioningConnectorDescriptor) {
                            metaOp = new FeedMetaOperatorDescriptor(jobSpec, feedConnectionId, opDesc,
                                    feedPolicyEntity.getProperties(), FeedRuntimeType.COMPUTE, null);
                            opId = metaOp.getOperatorId();
                            opDesc.setOperatorId(opId);
                        }
                    }
                }
                if (opId == null) {
                    opId = jobSpec.createOperatorDescriptorId(opDesc);
                }
            }
            operatorIdMapping.put(oldId, opId);
        }

        // copy connectors
        connectorIdMapping.clear();
        for (Entry<ConnectorDescriptorId, IConnectorDescriptor> entry : subJob.getConnectorMap().entrySet()) {
            IConnectorDescriptor connDesc = entry.getValue();
            ConnectorDescriptorId newConnId;
            if (connDesc instanceof MToNPartitioningConnectorDescriptor) {
                MToNPartitioningConnectorDescriptor m2nConn = (MToNPartitioningConnectorDescriptor) connDesc;
                connDesc = new MToNPartitioningWithMessageConnectorDescriptor(jobSpec,
                        m2nConn.getTuplePartitionComputerFactory());
                newConnId = connDesc.getConnectorId();
            } else {
                newConnId = jobSpec.createConnectorDescriptor(connDesc);
            }
            connectorIdMapping.put(entry.getKey(), newConnId);
        }

        // make connections between operators
        for (Entry<ConnectorDescriptorId, Pair<Pair<IOperatorDescriptor, Integer>, Pair<IOperatorDescriptor, Integer>>> entry : subJob
                .getConnectorOperatorMap().entrySet()) {
            ConnectorDescriptorId newId = connectorIdMapping.get(entry.getKey());
            IConnectorDescriptor connDesc = jobSpec.getConnectorMap().get(newId);
            Pair<IOperatorDescriptor, Integer> leftOp = entry.getValue().getLeft();
            Pair<IOperatorDescriptor, Integer> rightOp = entry.getValue().getRight();
            IOperatorDescriptor leftOpDesc = jobSpec.getOperatorMap().get(leftOp.getLeft().getOperatorId());
            IOperatorDescriptor rightOpDesc = jobSpec.getOperatorMap().get(rightOp.getLeft().getOperatorId());
            if (leftOp.getLeft() instanceof FeedCollectOperatorDescriptor) {
                jobSpec.connect(new OneToOneConnectorDescriptor(jobSpec), replicateOp, iter1, leftOpDesc,
                        leftOp.getRight());
            }
            jobSpec.connect(connDesc, leftOpDesc, leftOp.getRight(), rightOpDesc, rightOp.getRight());
        }

        // prepare for setting partition constraints
        operatorLocations.clear();
        operatorCounts.clear();

        for (Constraint constraint : subJob.getUserConstraints()) {
            LValueConstraintExpression lexpr = constraint.getLValue();
            ConstraintExpression cexpr = constraint.getRValue();
            OperatorDescriptorId opId;
            switch (lexpr.getTag()) {
            case PARTITION_COUNT:
                opId = ((PartitionCountExpression) lexpr).getOperatorDescriptorId();
                operatorCounts.put(operatorIdMapping.get(opId), (int) ((ConstantExpression) cexpr).getValue());
                break;
            case PARTITION_LOCATION:
                opId = ((PartitionLocationExpression) lexpr).getOperatorDescriptorId();
                IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(operatorIdMapping.get(opId));
                List<LocationConstraint> locations = operatorLocations.get(opDesc.getOperatorId());
                if (locations == null) {
                    locations = new ArrayList<>();
                    operatorLocations.put(opDesc.getOperatorId(), locations);
                }
                String location = (String) ((ConstantExpression) cexpr).getValue();
                LocationConstraint lc = new LocationConstraint(location,
                        ((PartitionLocationExpression) lexpr).getPartition());
                locations.add(lc);
                break;
            default:
                break;
            }
        }

        // set absolute location constraints
        for (Entry<OperatorDescriptorId, List<LocationConstraint>> entry : operatorLocations.entrySet()) {
            IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(entry.getKey());
            // why do we need to sort?
            Collections.sort(entry.getValue(), (LocationConstraint o1, LocationConstraint o2) -> {
                return o1.partition - o2.partition;
            });
            String[] locations = new String[entry.getValue().size()];
            for (int j = 0; j < locations.length; ++j) {
                locations[j] = entry.getValue().get(j).location;
            }
            PartitionConstraintHelper.addAbsoluteLocationConstraint(jobSpec, opDesc, locations);
        }

        // set count constraints
        for (Entry<OperatorDescriptorId, Integer> entry : operatorCounts.entrySet()) {
            IOperatorDescriptor opDesc = jobSpec.getOperatorMap().get(entry.getKey());
            if (!operatorLocations.keySet().contains(entry.getKey())) {
                PartitionConstraintHelper.addPartitionCountConstraint(jobSpec, opDesc, entry.getValue());
            }
        }
        // roots
        for (OperatorDescriptorId root : subJob.getRoots()) {
            jobSpec.addRoot(jobSpec.getOperatorMap().get(operatorIdMapping.get(root)));
        }
        jobIds.add(((JobEventListenerFactory) subJob.getJobletEventListenerFactory()).getJobId());
    }

    // jobEventListenerFactory
    jobSpec.setJobletEventListenerFactory(new MultiTransactionJobletEventListenerFactory(jobIds, true));
    // useConnectorSchedulingPolicy
    jobSpec.setUseConnectorPolicyForScheduling(jobsList.get(0).isUseConnectorPolicyForScheduling());
    // connectorAssignmentPolicy
    jobSpec.setConnectorPolicyAssignmentPolicy(jobsList.get(0).getConnectorPolicyAssignmentPolicy());
    return jobSpec;
}

From source file:org.apache.hadoop.hbase.master.balancer.SimpleLoadBalancer.java

/**
 * Generate a global load balancing plan according to the specified map of
 * server information to the most loaded regions of each server.
 *
 * The load balancing invariant is that all servers are within 1 region of the
 * average number of regions per server.  If the average is an integer number,
 * all servers will be balanced to the average.  Otherwise, all servers will
 * have either floor(average) or ceiling(average) regions.
 *
 * HBASE-3609 Modeled regionsToMove using Guava's MinMaxPriorityQueue so that
 *   we can fetch from both ends of the queue. 
 * At the beginning, we check whether there was empty region server 
 *   just discovered by Master. If so, we alternately choose new / old
 *   regions from head / tail of regionsToMove, respectively. This alternation
 *   avoids clustering young regions on the newly discovered region server.
 *   Otherwise, we choose new regions from head of regionsToMove.
 *   /*from  w  w w.j  a v a2  s. c  o  m*/
 * Another improvement from HBASE-3609 is that we assign regions from
 *   regionsToMove to underloaded servers in round-robin fashion.
 *   Previously one underloaded server would be filled before we move onto
 *   the next underloaded server, leading to clustering of young regions.
 *   
 * Finally, we randomly shuffle underloaded servers so that they receive
 *   offloaded regions relatively evenly across calls to balanceCluster().
 *         
 * The algorithm is currently implemented as such:
 *
 * <ol>
 * <li>Determine the two valid numbers of regions each server should have,
 *     <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
 *
 * <li>Iterate down the most loaded servers, shedding regions from each so
 *     each server hosts exactly <b>MAX</b> regions.  Stop once you reach a
 *     server that already has &lt;= <b>MAX</b> regions.
 *     <p>
 *     Order the regions to move from most recent to least.
 *
 * <li>Iterate down the least loaded servers, assigning regions so each server
 *     has exactly </b>MIN</b> regions.  Stop once you reach a server that
 *     already has &gt;= <b>MIN</b> regions.
 *
 *     Regions being assigned to underloaded servers are those that were shed
 *     in the previous step.  It is possible that there were not enough
 *     regions shed to fill each underloaded server to <b>MIN</b>.  If so we
 *     end up with a number of regions required to do so, <b>neededRegions</b>.
 *
 *     It is also possible that we were able to fill each underloaded but ended
 *     up with regions that were unassigned from overloaded servers but that
 *     still do not have assignment.
 *
 *     If neither of these conditions hold (no regions needed to fill the
 *     underloaded servers, no regions leftover from overloaded servers),
 *     we are done and return.  Otherwise we handle these cases below.
 *
 * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers),
 *     we iterate the most loaded servers again, shedding a single server from
 *     each (this brings them from having <b>MAX</b> regions to having
 *     <b>MIN</b> regions).
 *
 * <li>We now definitely have more regions that need assignment, either from
 *     the previous step or from the original shedding from overloaded servers.
 *     Iterate the least loaded servers filling each to <b>MIN</b>.
 *
 * <li>If we still have more regions that need assignment, again iterate the
 *     least loaded servers, this time giving each one (filling them to
 *     </b>MAX</b>) until we run out.
 *
 * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
 *
 *     In addition, any server hosting &gt;= <b>MAX</b> regions is guaranteed
 *     to end up with <b>MAX</b> regions at the end of the balancing.  This
 *     ensures the minimal number of regions possible are moved.
 * </ol>
 *
 * TODO: We can at-most reassign the number of regions away from a particular
 *       server to be how many they report as most loaded.
 *       Should we just keep all assignment in memory?  Any objections?
 *       Does this mean we need HeapSize on HMaster?  Or just careful monitor?
 *       (current thinking is we will hold all assignments in memory)
 *
 * @param clusterMap Map of regionservers and their load/region information to
 *                   a list of their most loaded regions
 * @return a list of regions to be moved, including source and destination,
 *         or null if cluster is already balanced
 */
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterMap) {
    List<RegionPlan> regionsToReturn = balanceMasterRegions(clusterMap);
    if (regionsToReturn != null) {
        return regionsToReturn;
    }
    filterExcludedServers(clusterMap);
    boolean emptyRegionServerPresent = false;
    long startTime = System.currentTimeMillis();

    Collection<ServerName> backupMasters = getBackupMasters();
    ClusterLoadState cs = new ClusterLoadState(masterServerName, backupMasters, backupMasterWeight, clusterMap);

    if (!this.needsBalance(cs))
        return null;

    int numServers = cs.getNumServers();
    NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();
    int numRegions = cs.getNumRegions();
    float average = cs.getLoadAverage();
    int max = (int) Math.ceil(average);
    int min = (int) average;

    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numRegions=").append(numRegions).append(", numServers=")
            .append(numServers).append(", numBackupMasters=").append(cs.getNumBackupMasters())
            .append(", backupMasterWeight=").append(backupMasterWeight).append(", max=").append(max)
            .append(", min=").append(min);
    LOG.debug(strBalanceParam.toString());

    // Balance the cluster
    // TODO: Look at data block locality or a more complex load to do this
    MinMaxPriorityQueue<RegionPlan> regionsToMove = MinMaxPriorityQueue.orderedBy(rpComparator).create();
    regionsToReturn = new ArrayList<RegionPlan>();

    // Walk down most loaded, pruning each to the max
    int serversOverloaded = 0;
    // flag used to fetch regions from head and tail of list, alternately
    boolean fetchFromTail = false;
    Map<ServerName, BalanceInfo> serverBalanceInfo = new TreeMap<ServerName, BalanceInfo>();
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
        ServerAndLoad sal = server.getKey();
        int load = sal.getLoad();
        if (load <= max) {
            serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(0, 0));
            break;
        }
        serversOverloaded++;
        List<HRegionInfo> regions = server.getValue();
        int w = 1; // Normal region server has weight 1
        if (backupMasters != null && backupMasters.contains(sal.getServerName())) {
            w = backupMasterWeight; // Backup master has heavier weight
        }
        int numToOffload = Math.min((load - max) / w, regions.size());
        // account for the out-of-band regions which were assigned to this server
        // after some other region server crashed 
        Collections.sort(regions, riComparator);
        int numTaken = 0;
        for (int i = 0; i <= numToOffload;) {
            HRegionInfo hri = regions.get(i); // fetch from head
            if (fetchFromTail) {
                hri = regions.get(regions.size() - 1 - i);
            }
            i++;
            // Don't rebalance special regions.
            if (shouldBeOnMaster(hri) && masterServerName.equals(sal.getServerName()))
                continue;
            regionsToMove.add(new RegionPlan(hri, sal.getServerName(), null));
            numTaken++;
            if (numTaken >= numToOffload)
                break;
            // fetch in alternate order if there is new region server
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
        serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(numToOffload, (-1) * numTaken));
    }
    int totalNumMoved = regionsToMove.size();

    // Walk down least loaded, filling each to the min
    int neededRegions = 0; // number of regions needed to bring all up to min
    fetchFromTail = false;

    Map<ServerName, Integer> underloadedServers = new HashMap<ServerName, Integer>();
    int maxToTake = numRegions - min;
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        if (maxToTake == 0)
            break; // no more to take
        int load = server.getKey().getLoad();
        if (load >= min && load > 0) {
            continue; // look for other servers which haven't reached min
        }
        int w = 1; // Normal region server has weight 1
        if (backupMasters != null && backupMasters.contains(server.getKey().getServerName())) {
            w = backupMasterWeight; // Backup master has heavier weight
        }
        int regionsToPut = (min - load) / w;
        if (regionsToPut == 0) {
            regionsToPut = 1;
        }
        maxToTake -= regionsToPut;
        underloadedServers.put(server.getKey().getServerName(), regionsToPut);
    }
    // number of servers that get new regions
    int serversUnderloaded = underloadedServers.size();
    int incr = 1;
    List<ServerName> sns = Arrays
            .asList(underloadedServers.keySet().toArray(new ServerName[serversUnderloaded]));
    Collections.shuffle(sns, RANDOM);
    while (regionsToMove.size() > 0) {
        int cnt = 0;
        int i = incr > 0 ? 0 : underloadedServers.size() - 1;
        for (; i >= 0 && i < underloadedServers.size(); i += incr) {
            if (regionsToMove.isEmpty())
                break;
            ServerName si = sns.get(i);
            int numToTake = underloadedServers.get(si);
            if (numToTake == 0)
                continue;

            addRegionPlan(regionsToMove, fetchFromTail, si, regionsToReturn);
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }

            underloadedServers.put(si, numToTake - 1);
            cnt++;
            BalanceInfo bi = serverBalanceInfo.get(si);
            if (bi == null) {
                bi = new BalanceInfo(0, 0);
                serverBalanceInfo.put(si, bi);
            }
            bi.setNumRegionsAdded(bi.getNumRegionsAdded() + 1);
        }
        if (cnt == 0)
            break;
        // iterates underloadedServers in the other direction
        incr = -incr;
    }
    for (Integer i : underloadedServers.values()) {
        // If we still want to take some, increment needed
        neededRegions += i;
    }

    // If none needed to fill all to min and none left to drain all to max,
    // we are done
    if (neededRegions == 0 && regionsToMove.isEmpty()) {
        long endTime = System.currentTimeMillis();
        LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
                + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
                + " less loaded servers");
        return regionsToReturn;
    }

    // Need to do a second pass.
    // Either more regions to assign out or servers that are still underloaded

    // If we need more to fill min, grab one from each most loaded until enough
    if (neededRegions != 0) {
        // Walk down most loaded, grabbing one from each until we get enough
        for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
            BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
            int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload();
            if (idx >= server.getValue().size())
                break;
            HRegionInfo region = server.getValue().get(idx);
            if (region.isMetaRegion())
                continue; // Don't move meta regions.
            regionsToMove.add(new RegionPlan(region, server.getKey().getServerName(), null));
            totalNumMoved++;
            if (--neededRegions == 0) {
                // No more regions needed, done shedding
                break;
            }
        }
    }

    // Now we have a set of regions that must be all assigned out
    // Assign each underloaded up to the min, then if leftovers, assign to max

    // Walk down least loaded, assigning to each to fill up to min
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad();
        if (regionCount >= min)
            break;
        BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
        if (balanceInfo != null) {
            regionCount += balanceInfo.getNumRegionsAdded();
        }
        if (regionCount >= min) {
            continue;
        }
        int numToTake = min - regionCount;
        int numTaken = 0;
        while (numTaken < numToTake && 0 < regionsToMove.size()) {
            addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
            numTaken++;
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
    }

    // If we still have regions to dish out, assign underloaded to max
    if (0 < regionsToMove.size()) {
        for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
            int regionCount = server.getKey().getLoad();
            BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
            if (balanceInfo != null) {
                regionCount += balanceInfo.getNumRegionsAdded();
            }
            if (regionCount >= max) {
                break;
            }
            addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
            if (regionsToMove.isEmpty()) {
                break;
            }
        }
    }

    long endTime = System.currentTimeMillis();

    if (!regionsToMove.isEmpty() || neededRegions != 0) {
        // Emit data so can diagnose how balancer went astray.
        LOG.warn("regionsToMove=" + totalNumMoved + ", numServers=" + numServers + ", serversOverloaded="
                + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded);
        StringBuilder sb = new StringBuilder();
        for (Map.Entry<ServerName, List<HRegionInfo>> e : clusterMap.entrySet()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(e.getKey().toString());
            sb.append(" ");
            sb.append(e.getValue().size());
        }
        LOG.warn("Input " + sb.toString());
    }

    // All done!
    LOG.info("Done. Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
            + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
            + " less loaded servers");

    return regionsToReturn;
}