Example usage for java.util EnumMap EnumMap

List of usage examples for java.util EnumMap EnumMap

Introduction

In this page you can find the example usage for java.util EnumMap EnumMap.

Prototype

public EnumMap(Map<K, ? extends V> m) 

Source Link

Document

Creates an enum map initialized from the specified map.

Usage

From source file:org.openecomp.sdc.asdctool.impl.DataMigration.java

/**
 * the method create the writers to each file
 * //from  w w  w. j a  va  2s .  c  o m
 * @param files
 *            a map of the files according to table
 * @return returns a map of writers according to table.
 */
private Map<Table, PrintWriter> createWriters(Map<Table, File> files) {
    Map<Table, PrintWriter> printerWritersMap = new EnumMap<>(Table.class);
    try {
        for (Table table : files.keySet()) {
            log.info("creating writer for {}", table);
            File file = files.get(table);
            FileWriter fw = new FileWriter(file, true);
            BufferedWriter bw = new BufferedWriter(fw);
            PrintWriter out = new PrintWriter(bw);
            printerWritersMap.put(table, out);
            log.info("creating writer for {} was successful", table);
        }
    } catch (IOException e) {
        log.error("create writer to file failed", e);
        return null;
    }
    return printerWritersMap;
}

From source file:org.apache.hadoop.hbase.io.hfile.LruBlockCache.java

Map<BlockType, Integer> getBlockTypeCountsForTest() {
    Map<BlockType, Integer> counts = new EnumMap<BlockType, Integer>(BlockType.class);
    for (CachedBlock cb : map.values()) {
        BlockType blockType = ((HFileBlock) cb.getBuffer()).getBlockType();
        Integer count = counts.get(blockType);
        counts.put(blockType, (count == null ? 0 : count) + 1);
    }/*from w  w  w  . j av  a2  s .  c om*/
    return counts;
}

From source file:org.talend.camel.designer.ui.wizards.actions.JavaCamelJobScriptsExportWSAction.java

protected Map<ExportChoice, Object> getExportChoiceMap() {
    Map<ExportChoice, Object> exportChoiceMap = new EnumMap<ExportChoice, Object>(ExportChoice.class);

    exportChoiceMap.put(ExportChoice.esbExportType, "kar");
    exportChoiceMap.put(ExportChoice.needJobItem, false);
    exportChoiceMap.put(ExportChoice.needSourceCode, false);
    exportChoiceMap.put(ExportChoice.needMetaInfo, true);
    exportChoiceMap.put(ExportChoice.needContext, true);
    exportChoiceMap.put(ExportChoice.needLauncher, false);

    exportChoiceMap.put(ExportChoice.onlyDefautContext, false);
    return exportChoiceMap;
}

From source file:org.apache.hadoop.hbase.io.hfile.LruBlockCache.java

public Map<DataBlockEncoding, Integer> getEncodingCountsForTest() {
    Map<DataBlockEncoding, Integer> counts = new EnumMap<DataBlockEncoding, Integer>(DataBlockEncoding.class);
    for (CachedBlock block : map.values()) {
        DataBlockEncoding encoding = ((HFileBlock) block.getBuffer()).getDataBlockEncoding();
        Integer count = counts.get(encoding);
        counts.put(encoding, (count == null ? 0 : count) + 1);
    }//from w  ww  .  j a v a2s  .  c  o  m
    return counts;
}

From source file:org.lol.reddit.reddit.prepared.RedditPreparedPost.java

public VerticalToolbar generateToolbar(final Activity activity, boolean isComments,
        final SideToolbarOverlay overlay) {

    final VerticalToolbar toolbar = new VerticalToolbar(activity);
    final EnumSet<Action> itemsPref = PrefsUtility.pref_menus_post_toolbar_items(activity,
            PreferenceManager.getDefaultSharedPreferences(activity));

    final Action[] possibleItems = { Action.ACTION_MENU,
            isComments ? Action.LINK_SWITCH : Action.COMMENTS_SWITCH, Action.UPVOTE, Action.DOWNVOTE,
            Action.SAVE, Action.HIDE, Action.REPLY, Action.EXTERNAL, Action.SAVE_IMAGE, Action.SHARE,
            Action.COPY, Action.USER_PROFILE, Action.PROPERTIES };

    // TODO make static
    final EnumMap<Action, Integer> iconsDark = new EnumMap<Action, Integer>(Action.class);
    iconsDark.put(Action.ACTION_MENU, R.drawable.ic_action_overflow);
    iconsDark.put(Action.COMMENTS_SWITCH, R.drawable.ic_action_comments_dark);
    iconsDark.put(Action.LINK_SWITCH,
            imageUrl != null ? R.drawable.ic_action_image_dark : R.drawable.ic_action_page_dark);
    iconsDark.put(Action.UPVOTE, R.drawable.action_upvote_dark);
    iconsDark.put(Action.DOWNVOTE, R.drawable.action_downvote_dark);
    iconsDark.put(Action.SAVE, R.drawable.ic_action_star_filled_dark);
    iconsDark.put(Action.HIDE, R.drawable.ic_action_cross_dark);
    iconsDark.put(Action.REPLY, R.drawable.ic_action_reply_dark);
    iconsDark.put(Action.EXTERNAL, R.drawable.ic_action_globe_dark);
    iconsDark.put(Action.SAVE_IMAGE, R.drawable.ic_action_save_dark);
    iconsDark.put(Action.SHARE, R.drawable.ic_action_share_dark);
    iconsDark.put(Action.COPY, R.drawable.ic_action_copy_dark);
    iconsDark.put(Action.USER_PROFILE, R.drawable.ic_action_person_dark);
    iconsDark.put(Action.PROPERTIES, R.drawable.ic_action_info_dark);

    final EnumMap<Action, Integer> iconsLight = new EnumMap<Action, Integer>(Action.class);
    iconsLight.put(Action.ACTION_MENU, R.drawable.ic_action_overflow);
    iconsLight.put(Action.COMMENTS_SWITCH, R.drawable.ic_action_comments_light);
    iconsLight.put(Action.LINK_SWITCH,
            imageUrl != null ? R.drawable.ic_action_image_light : R.drawable.ic_action_page_light);
    iconsLight.put(Action.UPVOTE, R.drawable.action_upvote_light);
    iconsLight.put(Action.DOWNVOTE, R.drawable.action_downvote_light);
    iconsLight.put(Action.SAVE, R.drawable.ic_action_star_filled_light);
    iconsLight.put(Action.HIDE, R.drawable.ic_action_cross_light);
    iconsLight.put(Action.REPLY, R.drawable.ic_action_reply_light);
    iconsLight.put(Action.EXTERNAL, R.drawable.ic_action_globe_light);
    iconsLight.put(Action.SAVE_IMAGE, R.drawable.ic_action_save_light);
    iconsLight.put(Action.SHARE, R.drawable.ic_action_share_light);
    iconsLight.put(Action.COPY, R.drawable.ic_action_copy_light);
    iconsLight.put(Action.USER_PROFILE, R.drawable.ic_action_person_light);
    iconsLight.put(Action.PROPERTIES, R.drawable.ic_action_info_light);

    for (final Action action : possibleItems) {

        if (action == Action.SAVE_IMAGE && imageUrl == null)
            continue;

        if (itemsPref.contains(action)) {

            final FlatImageButton ib = new FlatImageButton(activity);

            final int buttonPadding = General.dpToPixels(activity, 10);
            ib.setPadding(buttonPadding, buttonPadding, buttonPadding, buttonPadding);

            if (action == Action.UPVOTE && isUpvoted() || action == Action.DOWNVOTE && isDownvoted()
                    || action == Action.SAVE && isSaved() || action == Action.HIDE && isHidden()) {

                ib.setBackgroundColor(Color.WHITE);
                ib.setImageResource(iconsLight.get(action));

            } else {
                ib.setImageResource(iconsDark.get(action));
                // TODO highlight on click
            }//  www. j a v a  2  s.co m

            ib.setOnClickListener(new View.OnClickListener() {
                public void onClick(View v) {

                    final Action actionToTake;

                    switch (action) {
                    case UPVOTE:
                        actionToTake = isUpvoted() ? Action.UNVOTE : Action.UPVOTE;
                        break;

                    case DOWNVOTE:
                        actionToTake = isDownvoted() ? Action.UNVOTE : Action.DOWNVOTE;
                        break;

                    case SAVE:
                        actionToTake = isSaved() ? Action.UNSAVE : Action.SAVE;
                        break;

                    case HIDE:
                        actionToTake = isHidden() ? Action.UNHIDE : Action.HIDE;
                        break;

                    default:
                        actionToTake = action;
                        break;
                    }

                    onActionMenuItemSelected(RedditPreparedPost.this, activity, actionToTake);
                    overlay.hide();
                }
            });

            toolbar.addItem(ib);
        }
    }

    return toolbar;
}

From source file:org.squashtest.tm.service.internal.requirement.VerifiedRequirementsManagerServiceImpl.java

@SuppressWarnings("unchecked")
private Map<ExecutionStatus, Long> findResultsForSteppedCoverageWithoutExecution(
        List<RequirementVersionCoverage> stepedCoverage, List<Long> testCaseIds, List<Long> iterationsIds,
        Map<Long, Long> nbSteppedCoverageByTestCase) {
    MultiMap testCaseExecutionStatus = iterationDao.findVerifiedITPI(testCaseIds, iterationsIds);
    Map<ExecutionStatus, Long> result = new EnumMap<>(ExecutionStatus.class);
    for (RequirementVersionCoverage cov : stepedCoverage) {
        Long tcId = cov.getVerifyingTestCase().getId();
        List<TestCaseExecutionStatus> tcsStatus = (List<TestCaseExecutionStatus>) testCaseExecutionStatus
                .get(tcId);/*from   ww w. j a  va 2  s  .  c  o m*/
        if (tcsStatus != null) {
            for (TestCaseExecutionStatus tcStatus : tcsStatus) {
                //For each cov we must count one status per steps. So fast pass status is forwarded to steps...
                result.put(tcStatus.getStatus(), (long) cov.getVerifyingSteps().size());
            }
        }
    }
    return result;
}

From source file:org.apache.hadoop.corona.ConfigManager.java

/**
 * Reload the general configuration and update all in-memory values. Should
 * be invoked under synchronization.//  w w  w  .  j a  v a 2  s  .  c  o  m
 * 
 * @throws IOException
 * @throws SAXException
 * @throws ParserConfigurationException
 * @throws JSONException
 */
private void reloadJsonConfig() throws IOException, SAXException, ParserConfigurationException, JSONException {
    Map<ResourceType, Long> newTypeToNodeWait;
    Map<ResourceType, Long> newTypeToRackWait;
    ScheduleComparator newDefaultPoolComparator = DEFAULT_POOL_COMPARATOR;
    double newShareStarvingRatio = DEFAULT_SHARE_STARVING_RATIO;
    long newMinPreemptPeriod = DEFAULT_MIN_PREEMPT_PERIOD;
    int newGrantsPerIteration = DEFAULT_GRANTS_PER_ITERATION;
    long newStarvingTimeForMinimum = DEFAULT_STARVING_TIME_FOR_MINIMUM;
    long newStarvingTimeForShare = DEFAULT_STARVING_TIME_FOR_SHARE;
    long newPreemptedTaskMaxRunningTime = DEFAULT_PREEMPT_TASK_MAX_RUNNING_TIME;
    int newPreemptionRounds = DEFAULT_PREEMPTION_ROUNDS;
    boolean newScheduleFromNodeToSession = DEFAULT_SCHEDULE_FROM_NODE_TO_SESSION;

    newTypeToNodeWait = new EnumMap<ResourceType, Long>(ResourceType.class);
    newTypeToRackWait = new EnumMap<ResourceType, Long>(ResourceType.class);
    Map<PoolInfo, PoolInfo> newPoolInfoToRedirect = new HashMap<PoolInfo, PoolInfo>();
    for (ResourceType type : TYPES) {
        newTypeToNodeWait.put(type, 0L);
        newTypeToRackWait.put(type, 0L);
    }

    // All the configuration files for a cluster are placed in one large   
    // json object. This large json object has keys that map to smaller   
    // json objects which hold the same resources as xml configuration    
    // files. Here, we try to parse the json object that corresponds to   
    // corona.xml
    File jsonConfigFile = new File(configFileName);
    InputStream in = new BufferedInputStream(new FileInputStream(jsonConfigFile));
    JSONObject json = conf.instantiateJsonObject(in);
    json = json.getJSONObject(conf.xmlToThrift(CoronaConf.DEFAULT_CONFIG_FILE));
    Iterator<String> keys = json.keys();
    while (keys.hasNext()) {
        String key = keys.next();
        if (!json.isNull(key)) {
            if (key.equals("localityWaits")) {
                JSONObject jsonTypes = json.getJSONObject(key);
                loadLocalityWaits(jsonTypes, newTypeToNodeWait, newTypeToRackWait);
            }
            if (key.equals("defaultSchedulingMode")) {
                newDefaultPoolComparator = ScheduleComparator.valueOf(json.getString(key));
            }
            if (key.equals("shareStarvingRatio")) {
                newShareStarvingRatio = json.getDouble(key);
                if (newShareStarvingRatio < 0 || newShareStarvingRatio > 1.0) {
                    LOG.error("Illegal shareStarvingRatio:" + newShareStarvingRatio);
                    newShareStarvingRatio = DEFAULT_SHARE_STARVING_RATIO;
                }
            }
            if (key.equals("grantsPerIteration")) {
                newGrantsPerIteration = json.getInt(key);
                if (newMinPreemptPeriod < 0) {
                    LOG.error("Illegal grantsPerIteration: " + newGrantsPerIteration);
                    newGrantsPerIteration = DEFAULT_GRANTS_PER_ITERATION;
                }
            }
            if (key.equals("minPreemptPeriod")) {
                newMinPreemptPeriod = json.getLong(key);
                if (newMinPreemptPeriod < 0) {
                    LOG.error("Illegal minPreemptPeriod: " + newMinPreemptPeriod);
                    newMinPreemptPeriod = DEFAULT_MIN_PREEMPT_PERIOD;
                }
            }
            if (key.equals("starvingTimeForShare")) {
                newStarvingTimeForShare = json.getLong(key);
                if (newStarvingTimeForShare < 0) {
                    LOG.error("Illegal starvingTimeForShare:" + newStarvingTimeForShare);
                    newStarvingTimeForShare = DEFAULT_STARVING_TIME_FOR_SHARE;
                }
            }
            if (key.equals("starvingTimeForMinimum")) {
                newStarvingTimeForMinimum = json.getLong(key);
                if (newStarvingTimeForMinimum < 0) {
                    LOG.error("Illegal starvingTimeForMinimum:" + newStarvingTimeForMinimum);
                    newStarvingTimeForMinimum = DEFAULT_STARVING_TIME_FOR_MINIMUM;
                }
            }
            if (key.equals("preemptedTaskMaxRunningTime")) {
                newPreemptedTaskMaxRunningTime = json.getLong(key);
                if (newPreemptedTaskMaxRunningTime < 0) {
                    LOG.error("Illegal preemptedTaskMaxRunningTime:" + newPreemptedTaskMaxRunningTime);
                    newPreemptedTaskMaxRunningTime = DEFAULT_PREEMPT_TASK_MAX_RUNNING_TIME;
                }
            }
            if (key.equals("preemptionRounds")) {
                newPreemptionRounds = json.getInt(key);
                if (newPreemptionRounds < 0) {
                    LOG.error("Illegal preemptedTaskMaxRunningTime:" + newPreemptionRounds);
                    newPreemptionRounds = DEFAULT_PREEMPTION_ROUNDS;
                }
            }
            if (key.equals("scheduleFromNodeToSession")) {
                newScheduleFromNodeToSession = json.getBoolean(key);
            }
            if (key.equals(REDIRECT_TAG_NAME)) {
                JSONArray jsonPoolInfoToRedirect = json.getJSONArray(key);
                loadPoolInfoToRedirect(jsonPoolInfoToRedirect, newPoolInfoToRedirect);
            }
        }
    }
    synchronized (this) {
        this.typeToNodeWait = newTypeToNodeWait;
        this.typeToRackWait = newTypeToRackWait;
        this.defaultPoolComparator = newDefaultPoolComparator;
        this.shareStarvingRatio = newShareStarvingRatio;
        this.minPreemptPeriod = newMinPreemptPeriod;
        this.grantsPerIteration = newGrantsPerIteration;
        this.starvingTimeForMinimum = newStarvingTimeForMinimum;
        this.starvingTimeForShare = newStarvingTimeForShare;
        this.preemptedTaskMaxRunningTime = newPreemptedTaskMaxRunningTime;
        this.preemptionRounds = newPreemptionRounds;
        this.scheduleFromNodeToSession = newScheduleFromNodeToSession;
        this.poolInfoToRedirect = newPoolInfoToRedirect;
    }
}

From source file:org.squashtest.tm.service.internal.requirement.VerifiedRequirementsManagerServiceImpl.java

@SuppressWarnings("unchecked")
private Map<ExecutionStatus, Long> findResultsForSteppedCoverageWithExecution(
        List<RequirementVersionCoverage> stepedCoverage, List<Long> mainVersionTCWithItpiIds,
        Map<Long, Long> nbSimpleCoverageByTestCase) {
    List<Long> testStepsIds = new ArrayList<>();
    Map<ExecutionStatus, Long> result = new EnumMap<>(ExecutionStatus.class);
    //First we compute all testStep id in a list, to allow multiple occurrence of the same step.
    //Witch is not a good practice but is allowed by the app so we must take this possibility in account for calculations.
    for (RequirementVersionCoverage cov : stepedCoverage) {
        Long tcId = cov.getVerifyingTestCase().getId();
        if (mainVersionTCWithItpiIds.contains(tcId)) {
            for (ActionTestStep step : cov.getVerifyingSteps()) {
                testStepsIds.add(step.getId());
            }/* ww  w.  j  a  va 2  s .  co  m*/
        }
    }
    //now retrieve a list of exec steps
    MultiMap executionsStatus = executionStepDao.findStepExecutionsStatus(mainVersionTCWithItpiIds,
            testStepsIds);
    for (Long testStepsId : testStepsIds) {
        List<ExecutionStep> executionSteps = (List<ExecutionStep>) executionsStatus.get(testStepsId);
        for (ExecutionStep executionStep : executionSteps) {
            //Here come horrible code to detect if ITPI was fast passed AFTER execution.
            //We have no attribute in model to help us, and no time to develop a proper solution.
            //So we'll use execution date on itpi and exec. If the delta between two date is superior to 2 seconds,
            //we consider it's a fast pass
            Execution execution = executionStep.getExecution();
            IterationTestPlanItem itpi = execution.getTestPlan();
            Date itpiDateLastExecutedOn = itpi.getLastExecutedOn();
            Date execDateLastExecutedOn = execution.getLastExecutedOn();
            ExecutionStatus status = ExecutionStatus.READY;
            //if execution dates are null, the execution was only READY, so we don't compare dates to avoid npe
            if (itpiDateLastExecutedOn != null && execDateLastExecutedOn != null) {
                DateTime itpiLastExecutedOn = new DateTime(itpi.getLastExecutedOn().getTime());
                DateTime execLastExecutedOn = new DateTime(execution.getLastExecutedOn().getTime());
                Interval interval = new Interval(execLastExecutedOn, itpiLastExecutedOn);
                boolean fastPass = interval.toDuration().isLongerThan(new Duration(2000L));
                //If we have a fast path use it for step status
                status = fastPass ? itpi.getExecutionStatus() : executionStep.getExecutionStatus();
            }
            Long memo = result.get(status);
            if (memo == null) {
                result.put(status, 1L);
            } else {
                result.put(status, memo + 1);
            }
        }
    }
    return result;
}

From source file:org.apache.hadoop.corona.NodeManager.java

/**
 * return true if a new node has been added - else return false
 * @param clusterNodeInfo the node that is heartbeating
 * @return true if this is a new node that has been added, false otherwise
 *///from   w  w  w . j av  a  2  s .co m
public boolean heartbeat(ClusterNodeInfo clusterNodeInfo) throws DisallowedNode {
    ClusterNode node = nameToNode.get(clusterNodeInfo.name);
    if (!canAllowNode(clusterNodeInfo.getAddress().getHost())) {
        if (node != null) {
            node.heartbeat(clusterNodeInfo);
        } else {
            throw new DisallowedNode(clusterNodeInfo.getAddress().getHost());
        }
        return false;
    }
    boolean newNode = false;
    Map<ResourceType, String> currentResources = clusterNodeInfo.getResourceInfos();
    if (currentResources == null) {
        currentResources = new EnumMap<ResourceType, String>(ResourceType.class);
    }

    if (node == null) {
        LOG.info("Adding node with heartbeat: " + clusterNodeInfo.toString());
        node = new ClusterNode(clusterNodeInfo, topologyCache.getNode(clusterNodeInfo.address.host),
                cpuToResourcePartitioning);
        addNode(node, currentResources);
        newNode = true;
    }

    node.heartbeat(clusterNodeInfo);

    boolean appsChanged = false;
    Map<ResourceType, String> prevResources = nameToApps.get(clusterNodeInfo.name);
    Set<ResourceType> deletedApps = null;
    for (Map.Entry<ResourceType, String> entry : prevResources.entrySet()) {
        String newAppInfo = currentResources.get(entry.getKey());
        String oldAppInfo = entry.getValue();
        if (newAppInfo == null || !newAppInfo.equals(oldAppInfo)) {
            if (deletedApps == null) {
                deletedApps = EnumSet.noneOf(ResourceType.class);
            }
            deletedApps.add(entry.getKey());
            appsChanged = true;
        }
    }
    Map<ResourceType, String> addedApps = null;
    for (Map.Entry<ResourceType, String> entry : currentResources.entrySet()) {
        String newAppInfo = entry.getValue();
        String oldAppInfo = prevResources.get(entry.getKey());
        if (oldAppInfo == null || !oldAppInfo.equals(newAppInfo)) {
            if (addedApps == null) {
                addedApps = new EnumMap<ResourceType, String>(ResourceType.class);
            }
            addedApps.put(entry.getKey(), entry.getValue());
            appsChanged = true;
        }
    }
    if (deletedApps != null) {
        for (ResourceType deleted : deletedApps) {
            clusterManager.nodeAppRemoved(clusterNodeInfo.name, deleted);
        }
    }
    if (addedApps != null) {
        for (Map.Entry<ResourceType, String> added : addedApps.entrySet()) {
            addAppToNode(node, added.getKey(), added.getValue());
        }
    }

    updateRunnability(node);
    return newNode || appsChanged;
}

From source file:com.zimbra.cs.db.DbMailItem.java

public static SetMultimap<MailItem.Type, Integer> getIndexDeferredIds(DbConnection conn, Mailbox mbox)
        throws ServiceException {
    SetMultimap<MailItem.Type, Integer> result = Multimaps.newSetMultimap(
            new EnumMap<MailItem.Type, Collection<Integer>>(MailItem.Type.class), new Supplier<Set<Integer>>() {
                @Override/*from w w w . java  2  s .c  o  m*/
                public Set<Integer> get() {
                    return new HashSet<Integer>();
                }
            });

    PreparedStatement stmt = null;
    ResultSet rs = null;
    try { // from MAIL_ITEM table
        stmt = conn.prepareStatement("SELECT type, id FROM " + getMailItemTableName(mbox, false) + " WHERE "
                + IN_THIS_MAILBOX_AND + "index_id <= 1"); // 0: deferred, 1: stale
        setMailboxId(stmt, mbox, 1);
        rs = stmt.executeQuery();
        while (rs.next()) {
            result.put(MailItem.Type.of(rs.getByte(1)), rs.getInt(2));
        }
    } catch (SQLException e) {
        throw ServiceException.FAILURE("Failed to query index deferred IDs", e);
    } finally {
        conn.closeQuietly(rs);
        conn.closeQuietly(stmt);
    }

    if (mbox.dumpsterEnabled()) {
        try { // also from MAIL_ITEM_DUMPSTER table
            stmt = conn.prepareStatement("SELECT type, id FROM " + getMailItemTableName(mbox, true) + " WHERE "
                    + IN_THIS_MAILBOX_AND + "index_id <= 1");
            setMailboxId(stmt, mbox, 1);
            rs = stmt.executeQuery();
            while (rs.next()) {
                result.put(MailItem.Type.of(rs.getByte(1)), rs.getInt(2));
            }
        } catch (SQLException e) {
            throw ServiceException.FAILURE("Failed to query index deferred IDs from dumpster", e);
        } finally {
            conn.closeQuietly(rs);
            conn.closeQuietly(stmt);
        }
    }
    return result;
}