Example usage for java.util TreeMap size

List of usage examples for java.util TreeMap size

Introduction

In this page you can find the example usage for java.util TreeMap size.

Prototype

int size

To view the source code for java.util TreeMap size.

Click Source Link

Document

The number of entries in the tree

Usage

From source file:org.mule.devkit.doclet.ClassInfo.java

public AttributeInfo[] selfAttributes() {
    if (mSelfAttributes == null) {
        TreeMap<FieldInfo, AttributeInfo> attrs = new TreeMap<FieldInfo, AttributeInfo>();

        // the ones in the class comment won't have any methods
        for (AttrTagInfo tag : comment().attrTags()) {
            FieldInfo field = tag.reference();
            if (field != null) {
                AttributeInfo attr = attrs.get(field);
                if (attr == null) {
                    attr = new AttributeInfo(this, field);
                    attrs.put(field, attr);
                }/*from w  w  w .  ja v a2  s  .c om*/
                tag.setAttribute(attr);
            }
        }

        // in the methods
        for (MethodInfo m : selfMethods()) {
            for (AttrTagInfo tag : m.comment().attrTags()) {
                FieldInfo field = tag.reference();
                if (field != null) {
                    AttributeInfo attr = attrs.get(field);
                    if (attr == null) {
                        attr = new AttributeInfo(this, field);
                        attrs.put(field, attr);
                    }
                    tag.setAttribute(attr);
                    attr.methods.add(m);
                }
            }
        }

        // constructors too
        for (MethodInfo m : constructors()) {
            for (AttrTagInfo tag : m.comment().attrTags()) {
                FieldInfo field = tag.reference();
                if (field != null) {
                    AttributeInfo attr = attrs.get(field);
                    if (attr == null) {
                        attr = new AttributeInfo(this, field);
                        attrs.put(field, attr);
                    }
                    tag.setAttribute(attr);
                    attr.methods.add(m);
                }
            }
        }

        mSelfAttributes = attrs.values().toArray(new AttributeInfo[attrs.size()]);
        Arrays.sort(mSelfAttributes, AttributeInfo.comparator);
    }
    return mSelfAttributes;
}

From source file:org.jahia.admin.sites.ManageSites.java

/**
 * Display page to let user choose a set of templates.
 *
 * @param request  Servlet request.//from  w ww. ja  v a2s  . co m
 * @param response Servlet response.
 * @param session  HttpSession object.
 */
private void displayTemplateSetChoice(HttpServletRequest request, HttpServletResponse response,
        HttpSession session) throws IOException, ServletException {
    try {
        logger.debug("Display template set choice started ");

        // retrieve previous form values...
        String jahiaDisplayMessage = (String) request.getAttribute(CLASS_NAME + "jahiaDisplayMessage");
        // set default values...
        if (jahiaDisplayMessage == null) {
            jahiaDisplayMessage = Jahia.COPYRIGHT;
        }

        String selectedTmplSet = (String) request.getAttribute("selectedTmplSet");

        TreeMap<String, JCRNodeWrapper> orderedTemplateSets = getTemplatesSets();

        // try to select the default set if not selected
        if (selectedTmplSet == null) {
            selectedTmplSet = SettingsBean.getInstance().getPropertiesFile()
                    .getProperty("default_templates_set", orderedTemplateSets.firstKey());
        }

        JCRNodeWrapper selectedPackage = selectedTmplSet != null
                && orderedTemplateSets.containsKey(selectedTmplSet) ? orderedTemplateSets.get(selectedTmplSet)
                        : orderedTemplateSets.get(orderedTemplateSets.firstKey());

        request.setAttribute("selectedTmplSet", selectedTmplSet);
        request.setAttribute("tmplSets", orderedTemplateSets.values());
        request.setAttribute("modules",
                getModulesOfType(JahiaTemplateManagerService.MODULE_TYPE_MODULE).values());
        request.setAttribute("jahiApps",
                getModulesOfType(JahiaTemplateManagerService.MODULE_TYPE_JAHIAPP).values());
        request.setAttribute("selectedModules", jParams.getParameterValues("selectedModules"));
        request.setAttribute("selectedPackage", selectedPackage);
        Locale currentLocale = (Locale) session.getAttribute(ProcessingContext.SESSION_LOCALE);
        if (currentLocale == null) {
            currentLocale = request.getLocale();
        }
        Locale selectedLocale = (Locale) session.getAttribute(CLASS_NAME + "selectedLocale");
        if (selectedLocale == null) {
            selectedLocale = LanguageCodeConverters
                    .languageCodeToLocale(Jahia.getSettings().getDefaultLanguageCode());
        }
        session.setAttribute(CLASS_NAME + "selectedLocale", selectedLocale);
        request.setAttribute("selectedLocale", selectedLocale);
        request.setAttribute("currentLocale", currentLocale);

        logger.debug("Nb template set found " + orderedTemplateSets.size());

        // redirect...
        JahiaAdministration.doRedirect(request, response, session, JSP_PATH + "site_choose_template_set.jsp");

        // set default values...
        session.setAttribute(CLASS_NAME + "jahiaDisplayMessage", Jahia.COPYRIGHT);
    } catch (RepositoryException e) {
        throw new ServletException(e);
    }
}

From source file:com.impetus.ankush2.framework.monitor.AbstractMonitor.java

/**
 * Memoryheatmap./*w  ww  .  ja va2 s. c  o m*/
 */
public void memoryheatmap() {
    // cluster nodes
    Set<Node> nodes = dbCluster.getNodes(); // dbCluster.getSortedNodesByIp();
    // list of state which needs to be included for creating the heat map
    // data.
    List<String> includeStates = new ArrayList<String>();
    includeStates.add(com.impetus.ankush2.constant.Constant.Node.State.DEPLOYED.toString());
    // adding removing state for node
    includeStates.add(com.impetus.ankush2.constant.Constant.Node.State.REMOVING.toString()); // heat map data object.
    TreeMap heatMapData = new TreeMap();
    // iterating over the nodes
    for (Node node : nodes) {

        // If state id adding then no need to send the heat map data.
        if (!includeStates.contains(node.getState())) {
            continue;
        }

        // getting node monitoring data.
        NodeMonitoring nodeMonitoring = new MonitoringManager().getMonitoringData(node.getId());

        String usageValue = null;
        // if node monitoring, its monitoring info and its up time info is
        // not null
        if (nodeMonitoring != null && nodeMonitoring.getMonitoringInfo() != null
                && nodeMonitoring.getMonitoringInfo().getMemoryInfos() != null) {
            // get usage value.
            Double usageValueDouble = nodeMonitoring.getMonitoringInfo().getMemoryInfos().get(0)
                    .getUsedPercentage();
            // current usage value.
            if (usageValueDouble != null) {
                usageValue = formator.format(usageValueDouble).toString();
            }
        }

        // Getting the status value for the CPU Usage
        DBEventManager eventManager = new DBEventManager();
        Event event = eventManager.getEvent(null, node.getPublicIp(), null,
                com.impetus.ankush2.constant.Constant.Component.Name.AGENT, Constant.Alerts.Metric.MEMORY,
                null);

        // Getting the severity value.
        String status = Event.Severity.NORMAL.toString();
        if (event != null) {
            status = event.getSeverity().toString();
        }

        // if agent is down making status as unavailable.
        if (DBServiceManager.getManager().isAgentDown(node.getPublicIp())) {
            usageValue = "0";
            status = Constant.Alerts.Severity.UNAVAILABLE;
        }

        // Getting rack info for node.
        String rackId = getRackId(node);
        // update the rack heat map data and put it in main heat map data.
        heatMapData.put(rackId, updateRackHeatMapData(rackId, node, usageValue, status, heatMapData));
    }
    // setting rack info in map.
    result.put(com.impetus.ankush2.constant.Constant.Keys.RACKINFO, heatMapData.values());
    // setting total rack.
    result.put(com.impetus.ankush2.constant.Constant.Keys.TOTALRACKS, heatMapData.size());
}

From source file:com.impetus.ankush2.framework.monitor.AbstractMonitor.java

/**
 * Cpu heat map.//w  ww  .j  av  a 2s  . co m
 */
public void cpuheatmap() {
    // cluster nodes
    Set<Node> nodes = dbCluster.getNodes();// getSortedNodesByIp();
    // list of state which needs to be included for creating the heat map
    // data.
    List<String> includeStates = new ArrayList<String>();
    // adding deployed state for node
    includeStates.add(com.impetus.ankush2.constant.Constant.Node.State.DEPLOYED.toString());
    // adding removing state for node
    includeStates.add(com.impetus.ankush2.constant.Constant.Node.State.REMOVING.toString());
    // heat map data object.
    TreeMap heatMapData = new TreeMap();
    // iterating over the nodes.
    for (Node node : nodes) {

        // if the node state is available in including state list.
        if (!includeStates.contains(node.getState())) {
            continue;
        }
        // node monitoring object.
        NodeMonitoring nodeMonitoring = new MonitoringManager().getMonitoringData(node.getId());

        // usage value.
        String usageValue = null;
        // if node monitoring, its monitoring info and its up time info is
        // not null
        if (nodeMonitoring != null && nodeMonitoring.getMonitoringInfo() != null
                && nodeMonitoring.getMonitoringInfo().getUptimeInfos() != null) {
            // get usage value.
            Double usageValueDouble = nodeMonitoring.getMonitoringInfo().getUptimeInfos().get(0).getCpuUsage();
            // current usage value.
            if (usageValueDouble != null) {
                usageValue = formator.format(usageValueDouble).toString();
            }
        }

        // Getting the status value for the CPU Usage
        DBEventManager eventManager = new DBEventManager();
        // Getting the event for the node.
        Event event = eventManager.getEvent(null, node.getPublicIp(), null,
                com.impetus.ankush2.constant.Constant.Component.Name.AGENT, Constant.Alerts.Metric.CPU, null);

        // Getting the severity value.
        String status = Event.Severity.NORMAL.toString();
        if (event != null) {
            status = event.getSeverity().toString();
        }

        // if agent is down making status as unavailable.
        if (DBServiceManager.getManager().isAgentDown(node.getPublicIp())) {
            usageValue = "0";
            status = Constant.Alerts.Severity.UNAVAILABLE;
        }
        // Getting rack info for node.
        String rackId = getRackId(node);
        // update the rack heat map data and put it in main heat map data.
        heatMapData.put(rackId, updateRackHeatMapData(rackId, node, usageValue, status, heatMapData));
    }
    // setting rack info in map.
    result.put(com.impetus.ankush2.constant.Constant.Keys.RACKINFO, heatMapData.values());
    // setting total rack.
    result.put(com.impetus.ankush2.constant.Constant.Keys.TOTALRACKS, heatMapData.size());
}

From source file:org.apache.hadoop.mapred.HFSPScheduler.java

private void assignSizeBasedTasks(TaskType type, HelperForType helper,
        TreeMap<JobDurationInfo, JobInProgress> sizeBasedJobs,
        TreeMap<JobDurationInfo, TaskStatuses> taskStatusesSizeBased) throws IOException {

    final boolean isMap = type == TaskType.MAP;
    int totClaimedSlots = 0;

    // StringBuilder builder = new StringBuilder("SBJobs(");
    // builder.append(type).append("): [");
    // boolean first = true;
    // for (Entry<JobDurationInfo,JobInProgress> jip : sizeBasedJobs.entrySet())
    // {//w  w  w. java2 s  .  c  o  m
    // if (first)
    // first = false;
    // else
    // builder.append(",");
    // builder.append(jip.getValue().getJobID())
    // .append(" -> ")
    // .append(jip.getKey().getPhaseDuration())
    // .append("/")
    // .append(jip.getKey().getPhaseTotalDuration())
    // .append(" p: ")
    // .append(this.getNumPendingNewTasks(jip.getValue(), type))
    // .append(" r: ")
    // .append(this.getNumRunningTasks(jip.getValue(), type))
    // .append(" f: ")
    // .append(this.getNumFinishedTasks(jip.getValue(), type));
    // }
    // builder.append("]");
    // LOG.debug(builder.toString());

    for (Entry<JobDurationInfo, JobInProgress> entry : sizeBasedJobs.entrySet()) {

        JobInProgress jip = entry.getValue();
        JobDurationInfo jdi = entry.getKey();
        TaskStatuses taskStatuses = taskStatusesSizeBased.get(jdi);

        if (!this.isJobReadyForTypeScheduling(jip, type)) {
            if (LOG.isDebugEnabled() && jip.getStatus().getRunState() != JobStatus.SUCCEEDED) {
                LOG.debug(
                        "SIZEBASED(" + jip.getJobID() + ":" + type + "):" + "job is not ready for scheduling ("
                                + "status: " + JobStatus.getJobRunState(jip.getStatus().getRunState())
                                + ", mapProgress: " + jip.getStatus().mapProgress() + ", reduceProgress: "
                                + jip.getStatus().reduceProgress() + ", scheduleReduces: "
                                + jip.scheduleReduces() + ")");
            }
            continue;
        }

        // NEW
        int pendingNewTasks = this.getNumPendingNewTasks(jip, type);
        int pendingResumableTasks = (taskStatuses == null) ? 0 : taskStatuses.suspendedTaskStatuses.size();

        int totAvailableSizeBasedSlots = helper.totAvailableSizeBasedSlots();

        // missing slots for resumable
        int missingResumableSlots = 0;
        if (pendingResumableTasks > 0 && pendingResumableTasks > totAvailableSizeBasedSlots) {
            if (totAvailableSizeBasedSlots <= 0)
                missingResumableSlots = pendingResumableTasks;
            else
                missingResumableSlots = pendingResumableTasks - totAvailableSizeBasedSlots;
            totAvailableSizeBasedSlots = (pendingResumableTasks > totAvailableSizeBasedSlots) ? 0
                    : totAvailableSizeBasedSlots - pendingResumableTasks;
        }

        int missingNewSlots = 0;
        if (pendingNewTasks > 0 && pendingNewTasks > totAvailableSizeBasedSlots) {
            if (totAvailableSizeBasedSlots <= 0)
                missingNewSlots = pendingNewTasks;
            else
                missingNewSlots = pendingNewTasks - totAvailableSizeBasedSlots;
            totAvailableSizeBasedSlots = (pendingNewTasks > totAvailableSizeBasedSlots) ? 0
                    : totAvailableSizeBasedSlots - pendingNewTasks;
        }

        TreeMap<TaskAttemptID, TaskStatus> suspended = null;
        if (taskStatuses != null)
            suspended = taskStatuses.suspendedTaskStatuses;

        if (pendingNewTasks > 0 || pendingResumableTasks > 0 || (suspended != null && !suspended.isEmpty())) {
            LOG.debug(jip.getJobID() + ":" + type + " (d: " + jdi.getPhaseDuration() + "/"
                    + jdi.getPhaseTotalDuration() + "):" + " pendingNewTasks: " + pendingNewTasks
                    + " pendingResumableTasks: " + pendingResumableTasks
                    // + " notResumableTasksOnThisTT: " + notResumableTasks
                    + " totAvailableSizeBasedSlots: "
                    + (helper.totAvailableSizeBasedSlots() <= 0 ? 0 : helper.totAvailableSizeBasedSlots())
                    + " currAvailableSlots: " + helper.currAvailableSlots + " => missingNewSlots: "
                    + missingNewSlots + " missingResumableSlots: " + missingResumableSlots);
        }

        if (this.preemptionStrategy.isPreemptionActive()
                && (missingNewSlots > 0 || missingResumableSlots > 0)) {
            ClaimedSlots claimedSlots = this.claimSlots(helper, Phase.SIZE_BASED, jip, missingNewSlots,
                    missingResumableSlots, totClaimedSlots, sizeBasedJobs, taskStatusesSizeBased);

            totClaimedSlots += claimedSlots.getNumPreemptedForNewTasks()
                    + claimedSlots.getNumPreemptedForResumableTasks();

            LOG.debug(jip.getJobID() + " taskStatusesOnTT: " + taskStatusesSizeBased.get(jdi)
                    + " pendingNewTasks: " + pendingNewTasks + " pendingResumableTasks: "
                    + pendingResumableTasks + " missingNewSlots: " + missingNewSlots
                    + " missingResumableSlots: " + missingResumableSlots);
        }

        while (pendingNewTasks > 0 || pendingResumableTasks > 0
                || (suspended != null && !suspended.isEmpty())) {

            if (helper.currAvailableSlots <= 0) {
                LOG.debug("SIZEBASED(" + jip.getJobID() + ":" + type + "):" + " no slots available on "
                        + taskHelper.ttStatus.getTrackerName());
                return;
            }

            LOG.debug("SIZEBASED(" + jip.getJobID() + ":" + type + "):" + " totAvailableSizeBasedSlots(): "
                    + helper.totAvailableSizeBasedSlots() + " pendingNewTasks: " + pendingNewTasks
                    + " pendingResumableTasks: " + pendingResumableTasks + " suspended("
                    + (suspended == null ? 0 : suspended.size()) + "): " + suspended);

            if (this.preemptionStrategy.isPreemptionActive() && (suspended != null && !suspended.isEmpty())) {
                TaskStatus toResume = suspended.remove(suspended.firstKey());
                // LOG.debug("RESUME: " + toResume.getTaskID() + " " +
                // toResume.getRunState());
                TaskAttemptID tAID = toResume.getTaskID();
                JobInProgress rJIP = this.taskTrackerManager.getJob(tAID.getTaskID().getJobID());
                TaskInProgress tip = rJIP.getTaskInProgress(tAID.getTaskID());
                if (this.preemptionStrategy.resume(tip, toResume)) {
                    taskHelper.resume(tAID, Phase.SIZE_BASED);
                    pendingResumableTasks -= 1;
                } else {
                    LOG.debug("SIZEBASED(" + jip.getJobID() + ":" + type + "):" + " cannot resume " + tAID
                            + " on " + taskHelper.ttStatus.getTrackerName());
                }
            } else {

                Task task = this.obtainNewTask(jip, taskHelper.ttStatus, isMap, taskHelper.currentTime);

                if (task == null) {
                    LOG.debug("SIZEBASED(" + jip.getJobID() + ":" + type + "):"
                            + " cannot obtain slot for new task on " + taskHelper.ttStatus.getTrackerName()
                            + " (#pendingNew: " + pendingNewTasks + ", #pendingResumable: "
                            + pendingResumableTasks + ", #free_" + type + "_slots: " + helper.currAvailableSlots
                            + ")");
                    break;
                }

                taskHelper.slotObtained(task, Phase.SIZE_BASED);
                pendingNewTasks -= 1;
            }
        }
    }
}

From source file:com.tesora.dve.sql.parser.TranslatorUtils.java

public void assignPositions() {
    if (pc.getCapability() == Capability.PARSING_ONLY)
        return;/*from  w  w  w  .  ja  v  a  2s . c  o  m*/
    if (!parameters.isEmpty()) {
        TreeMap<SourceLocation, Parameter> map = new TreeMap<SourceLocation, Parameter>();
        for (Parameter p : parameters)
            map.put(p.getSourceLocation(), p);
        if (map.size() != parameters.size())
            throw new SchemaException(Pass.SECOND, "Lost parameters while doing position assignment");
        int i = 0;
        for (Parameter p : map.values()) {
            p.setPosition(i);
            pc.getValueManager().registerParameter(pc, p);
            i++;
        }
    }
    if (literals.size() > KnownVariables.CACHED_PLAN_LITERALS_MAX
            .getValue(pc.getConnection().getVariableSource()).intValue()) {
        forceUncacheable(ValueManager.CacheStatus.NOCACHE_TOO_MANY_LITERALS);
    } else {
        TreeMap<SourceLocation, DelegatingLiteralExpression> map = new TreeMap<SourceLocation, DelegatingLiteralExpression>();
        for (Pair<DelegatingLiteralExpression, Object> p : literals) {
            map.put(p.getFirst().getSourceLocation(), p.getFirst());
        }
        if (map.size() != literals.size())
            throw new SchemaException(Pass.SECOND, "Lost literals while doing position assignment");
        int i = 0;
        for (DelegatingLiteralExpression dle : map.values()) {
            pc.getValueManager().addLiteralValue(pc, i, literals.get(dle.getPosition()).getSecond(), dle);
            dle.setPosition(i, true);
            i++;
        }
    }
}

From source file:AnalysisModule.DataAnalysis.java

protected void bitmapAnalyse(List<Scenario> lstScenario) throws Exception {
    BitMap bitMap;/* ww  w .  ja  v  a  2s .c o  m*/
    String bmpDir;
    int traffic;
    int numberOfFiles;
    FileInputStream fin = null;
    ObjectInputStream ois = null;

    for (Scenario scenario : lstScenario) {
        for (Topology topology : scenario.lstTopology) {
            for (Instance instance : topology.getLstInstance()) {
                instance.trafficMatrix = new double[topology.getNumberOfSwitches()][topology
                        .getNumberOfSwitches()];
            }
        }
    }

    for (Scenario scenario : lstScenario) {
        for (Topology topology : scenario.lstTopology) {
            System.out.println("Analisando Topologia: " + topology.getIdTopology());
            for (Instance instance : topology.getLstInstance()) {
                System.out.println("           Instancia: " + instance.getId());
                for (int i = 0; i < topology.getNumberOfSwitches(); i++) {
                    for (int j = 0; j < topology.getNumberOfSwitches(); j++) {
                        switch (instance.type) {
                        case BITMAP: {
                            if (j <= i) {
                                break;
                            }
                            TreeMap<Long, BitMap> sourceBitMapTree = new TreeMap<>();
                            TreeMap<Long, BitMap> destinationBitMapTree = new TreeMap<>();

                            bmpDir = getSrcDir(instance, i);
                            numberOfFiles = new File(bmpDir).listFiles().length - 1;
                            int firstBitmap = -2;

                            for (int numberOfBmp = 0; numberOfBmp < numberOfFiles; numberOfBmp++) {

                                fin = new FileInputStream(bmpDir + "BitMap" + numberOfBmp + ".bmp");
                                ois = new ObjectInputStream(fin);
                                bitMap = (BitMap) ois.readObject();

                                if ((bitMap.getStartEpoch() - 21600000000L > scenario.startTime)
                                        && (bitMap.getStartEpoch() - 21600000000L < scenario.endTime)) {
                                    if (firstBitmap == -2) {
                                        firstBitmap = numberOfBmp - 1;
                                    }
                                    sourceBitMapTree.put(bitMap.getStartEpoch() - 21600000000L, bitMap);

                                }

                                ois.close();
                                fin.close();
                            }

                            //Add the first bitmap in the Measurement Interval
                            if (firstBitmap >= 0) {
                                fin = new FileInputStream(bmpDir + "BitMap" + firstBitmap + ".bmp");
                                ois = new ObjectInputStream(fin);
                                bitMap = (BitMap) ois.readObject();

                                sourceBitMapTree.put(bitMap.getStartEpoch() - 21600000000L, bitMap);
                                ois.close();
                                fin.close();

                            }

                            bmpDir = getDestDir(instance, j);
                            numberOfFiles = new File(bmpDir).listFiles().length - 1;
                            firstBitmap = -2;

                            for (int numberOfBmp = 0; numberOfBmp < numberOfFiles; numberOfBmp++) {

                                fin = new FileInputStream(bmpDir + "BitMap" + numberOfBmp + ".bmp");
                                ois = new ObjectInputStream(fin);
                                bitMap = (BitMap) ois.readObject();

                                if ((bitMap.getStartEpoch() - 21600000000L > scenario.startTime)
                                        && (bitMap.getStartEpoch() - 21600000000L < scenario.endTime)) {
                                    if (firstBitmap == -2) {
                                        firstBitmap = numberOfBmp - 1;
                                    }
                                    destinationBitMapTree.put(bitMap.getStartEpoch() - 21600000000L, bitMap);
                                }
                                ois.close();
                                fin.close();
                            }

                            //Add the first bitmap in the Measurement Interval
                            if (firstBitmap >= 0) {
                                fin = new FileInputStream(bmpDir + "BitMap" + firstBitmap + ".bmp");
                                ois = new ObjectInputStream(fin);
                                bitMap = (BitMap) ois.readObject();

                                destinationBitMapTree.put(bitMap.getStartEpoch() - 21600000000L, bitMap);

                                ois.close();
                                fin.close();

                            }

                            //Estimation
                            int k1 = sourceBitMapTree.size();
                            int k2 = destinationBitMapTree.size();

                            Collection sourceEntrySet = sourceBitMapTree.entrySet();
                            Iterator sourceEntries = sourceEntrySet.iterator();

                            for (int q = 0; q < k1; q++) {
                                Map.Entry entrySrc = (Map.Entry) sourceEntries.next();
                                BitMap bmpSrc = (BitMap) entrySrc.getValue();

                                Collection destinationEntrySet = destinationBitMapTree.entrySet();
                                Iterator destinationEntries = destinationEntrySet.iterator();

                                for (int r = 0; r < k2; r++) {
                                    Map.Entry entryDst = (Map.Entry) destinationEntries.next();
                                    BitMap bmpDst = (BitMap) entryDst.getValue();

                                    boolean overlap = bmpSrc.getStartEpoch() <= bmpDst.getEndEpoch()
                                            && bmpSrc.getEndEpoch() >= bmpDst.getStartEpoch();

                                    if (overlap) {
                                        double sourceDTr = instance.getBitMapSize()
                                                * Math.log(((double) instance.getBitMapSize())
                                                        / (instance.getBitMapSize() - bmpSrc.occupancy()));

                                        double destinationDTr = instance.getBitMapSize()
                                                * Math.log(((double) instance.getBitMapSize())
                                                        / (instance.getBitMapSize() - bmpDst.occupancy()));

                                        BitSet orSrcDst = (BitSet) bmpSrc.getBitSet().clone();
                                        //BitSet andSrcDst = (BitSet) bmpSrc.getBitSet().clone();

                                        orSrcDst.or(bmpDst.getBitSet());
                                        //andSrcDst.and(bmpDst.getBitSet());
                                        double orDTr = instance.getBitMapSize()
                                                * Math.log(((double) instance.getBitMapSize())
                                                        / (instance.getBitMapSize() - orSrcDst.cardinality()));
                                        //double andDTr = instance.getBitMapSize() * Math.log(((double) instance.getBitMapSize()) / (instance.getBitMapSize() - andSrcDst.cardinality()));

                                        double estimation = 0D;
                                        if (Double.isFinite(orDTr)) {
                                            estimation = sourceDTr + destinationDTr - orDTr;
                                            //estimation = (bmpSrc.getNumberOfPackets()/sourceDTr) * estimation;
                                            //estimation = andDTr;
                                        }

                                        instance.trafficMatrix[i][j] += estimation;
                                    }
                                }
                            }
                            break;
                        }

                        case COUNTER_ARRAY: {
                            traffic = 0;
                            traffic += instance.networkSwitch.get(i).arrayCounter[i][j];
                            //traffic += instance.networkSwitch.get(i).arrayCounter[j][i];
                            instance.doCalculateMatrixElem(i, j, topology, traffic);
                            break;
                        }

                        case OPT_COUNTER_ARRAY: {
                            traffic = 0;
                            for (Integer node : topology.getPathNodes(i, j)) {
                                if (instance.networkSwitch.get(node).isObserver) {
                                    traffic += instance.networkSwitch.get(node).arrayCounter[i][j];
                                    //traffic += instance.networkSwitch.get(node).arrayCounter[j][i];
                                    break;
                                }
                            }
                            instance.doCalculateMatrixElem(i, j, topology, traffic);
                            break;
                        }
                        }
                    }
                }
            }
        }
    }
}

From source file:org.apache.geode.internal.cache.GemFireCacheImpl.java

public void shutDownAll() {
    if (LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER) {
        try {/*  w ww.  j a  va  2 s .  co m*/
            CacheObserverHolder.getInstance().beforeShutdownAll();
        } finally {
            LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
        }
    }
    if (!this.isShutDownAll.compareAndSet(false, true)) {
        // it's already doing shutdown by another thread
        try {
            this.shutDownAllFinished.await();
        } catch (InterruptedException e) {
            logger.debug("Shutdown all interrupted while waiting for another thread to do the shutDownAll");
            Thread.currentThread().interrupt();
        }
        return;
    }
    synchronized (GemFireCacheImpl.class) {
        try {
            boolean testIGE = Boolean.getBoolean("TestInternalGemFireError");

            if (testIGE) {
                InternalGemFireError assErr = new InternalGemFireError(
                        LocalizedStrings.GemFireCache_UNEXPECTED_EXCEPTION.toLocalizedString());
                throw assErr;
            }

            // bug 44031 requires multithread shutdownall should be grouped
            // by root region. However, shutDownAllDuringRecovery.conf test revealed that
            // we have to close colocated child regions first.
            // Now check all the PR, if anyone has colocate-with attribute, sort all the
            // PRs by colocation relationship and close them sequentially, otherwise still
            // group them by root region.
            TreeMap<String, Map<String, PartitionedRegion>> prTrees = getPRTrees();
            if (prTrees.size() > 1 && shutdownAllPoolSize != 1) {
                ExecutorService es = getShutdownAllExecutorService(prTrees.size());
                for (final Map<String, PartitionedRegion> prSubMap : prTrees.values()) {
                    es.execute(new Runnable() {
                        public void run() {
                            ConnectionTable.threadWantsSharedResources();
                            shutdownSubTreeGracefully(prSubMap);
                        }
                    });
                } // for each root
                es.shutdown();
                try {
                    es.awaitTermination(Integer.MAX_VALUE, TimeUnit.SECONDS);
                } catch (InterruptedException e) {
                    logger.debug("Shutdown all interrupted while waiting for PRs to be shutdown gracefully.");
                }

            } else {
                for (final Map<String, PartitionedRegion> prSubMap : prTrees.values()) {
                    shutdownSubTreeGracefully(prSubMap);
                }
            }

            close("Shut down all members", null, false, true);
        } finally {
            this.shutDownAllFinished.countDown();
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.StripeStoreFileManager.java

/**
 * Loads initial store files that were picked up from some physical location pertaining to
 * this store (presumably). Unlike adding files after compaction, assumes empty initial
 * sets, and is forgiving with regard to stripe constraints - at worst, many/all files will
 * go to level 0./*from  w w w .j  av  a 2  s.  c o  m*/
 * @param storeFiles Store files to add.
 */
private void loadUnclassifiedStoreFiles(List<StoreFile> storeFiles) {
    LOG.debug("Attempting to load " + storeFiles.size() + " store files.");
    TreeMap<byte[], ArrayList<StoreFile>> candidateStripes = new TreeMap<byte[], ArrayList<StoreFile>>(
            MAP_COMPARATOR);
    ArrayList<StoreFile> level0Files = new ArrayList<StoreFile>();
    // Separate the files into tentative stripes; then validate. Currently, we rely on metadata.
    // If needed, we could dynamically determine the stripes in future.
    for (StoreFile sf : storeFiles) {
        byte[] startRow = startOf(sf), endRow = endOf(sf);
        // Validate the range and put the files into place.
        if (isInvalid(startRow) || isInvalid(endRow)) {
            insertFileIntoStripe(level0Files, sf); // No metadata - goes to L0.
            ensureLevel0Metadata(sf);
        } else if (!isOpen(startRow) && !isOpen(endRow) && nonOpenRowCompare(startRow, endRow) >= 0) {
            LOG.error("Unexpected metadata - start row [" + Bytes.toString(startRow) + "], end row ["
                    + Bytes.toString(endRow) + "] in file [" + sf.getPath() + "], pushing to L0");
            insertFileIntoStripe(level0Files, sf); // Bad metadata - goes to L0 also.
            ensureLevel0Metadata(sf);
        } else {
            ArrayList<StoreFile> stripe = candidateStripes.get(endRow);
            if (stripe == null) {
                stripe = new ArrayList<StoreFile>();
                candidateStripes.put(endRow, stripe);
            }
            insertFileIntoStripe(stripe, sf);
        }
    }
    // Possible improvement - for variable-count stripes, if all the files are in L0, we can
    // instead create single, open-ended stripe with all files.

    boolean hasOverlaps = false;
    byte[] expectedStartRow = null; // first stripe can start wherever
    Iterator<Map.Entry<byte[], ArrayList<StoreFile>>> entryIter = candidateStripes.entrySet().iterator();
    while (entryIter.hasNext()) {
        Map.Entry<byte[], ArrayList<StoreFile>> entry = entryIter.next();
        ArrayList<StoreFile> files = entry.getValue();
        // Validate the file start rows, and remove the bad ones to level 0.
        for (int i = 0; i < files.size(); ++i) {
            StoreFile sf = files.get(i);
            byte[] startRow = startOf(sf);
            if (expectedStartRow == null) {
                expectedStartRow = startRow; // ensure that first stripe is still consistent
            } else if (!rowEquals(expectedStartRow, startRow)) {
                hasOverlaps = true;
                LOG.warn("Store file doesn't fit into the tentative stripes - expected to start at ["
                        + Bytes.toString(expectedStartRow) + "], but starts at [" + Bytes.toString(startRow)
                        + "], to L0 it goes");
                StoreFile badSf = files.remove(i);
                insertFileIntoStripe(level0Files, badSf);
                ensureLevel0Metadata(badSf);
                --i;
            }
        }
        // Check if any files from the candidate stripe are valid. If so, add a stripe.
        byte[] endRow = entry.getKey();
        if (!files.isEmpty()) {
            expectedStartRow = endRow; // Next stripe must start exactly at that key.
        } else {
            entryIter.remove();
        }
    }

    // In the end, there must be open ends on two sides. If not, and there were no errors i.e.
    // files are consistent, they might be coming from a split. We will treat the boundaries
    // as open keys anyway, and log the message.
    // If there were errors, we'll play it safe and dump everything into L0.
    if (!candidateStripes.isEmpty()) {
        StoreFile firstFile = candidateStripes.firstEntry().getValue().get(0);
        boolean isOpen = isOpen(startOf(firstFile)) && isOpen(candidateStripes.lastKey());
        if (!isOpen) {
            LOG.warn("The range of the loaded files does not cover full key space: from ["
                    + Bytes.toString(startOf(firstFile)) + "], to ["
                    + Bytes.toString(candidateStripes.lastKey()) + "]");
            if (!hasOverlaps) {
                ensureEdgeStripeMetadata(candidateStripes.firstEntry().getValue(), true);
                ensureEdgeStripeMetadata(candidateStripes.lastEntry().getValue(), false);
            } else {
                LOG.warn("Inconsistent files, everything goes to L0.");
                for (ArrayList<StoreFile> files : candidateStripes.values()) {
                    for (StoreFile sf : files) {
                        insertFileIntoStripe(level0Files, sf);
                        ensureLevel0Metadata(sf);
                    }
                }
                candidateStripes.clear();
            }
        }
    }

    // Copy the results into the fields.
    State state = new State();
    state.level0Files = ImmutableList.copyOf(level0Files);
    state.stripeFiles = new ArrayList<ImmutableList<StoreFile>>(candidateStripes.size());
    state.stripeEndRows = new byte[Math.max(0, candidateStripes.size() - 1)][];
    ArrayList<StoreFile> newAllFiles = new ArrayList<StoreFile>(level0Files);
    int i = candidateStripes.size() - 1;
    for (Map.Entry<byte[], ArrayList<StoreFile>> entry : candidateStripes.entrySet()) {
        state.stripeFiles.add(ImmutableList.copyOf(entry.getValue()));
        newAllFiles.addAll(entry.getValue());
        if (i > 0) {
            state.stripeEndRows[state.stripeFiles.size() - 1] = entry.getKey();
        }
        --i;
    }
    state.allFilesCached = ImmutableList.copyOf(newAllFiles);
    this.state = state;
    debugDumpState("Files loaded");
}

From source file:org.apache.nutch.segment.SegmentMerger.java

/**
 * NOTE: in selecting the latest version we rely exclusively on the segment
 * name (not all segment data contain time information). Therefore it is extremely
 * important that segments be named in an increasing lexicographic order as
 * their creation time increases./* w  w  w . j a  va 2s. c o  m*/
 */
public void reduce(Text key, Iterator<MetaWrapper> values, OutputCollector<Text, MetaWrapper> output,
        Reporter reporter) throws IOException {
    CrawlDatum lastG = null;
    CrawlDatum lastF = null;
    CrawlDatum lastSig = null;
    Content lastC = null;
    ParseData lastPD = null;
    ParseText lastPT = null;
    String lastGname = null;
    String lastFname = null;
    String lastSigname = null;
    String lastCname = null;
    String lastPDname = null;
    String lastPTname = null;
    TreeMap<String, ArrayList<CrawlDatum>> linked = new TreeMap<String, ArrayList<CrawlDatum>>();
    while (values.hasNext()) {
        MetaWrapper wrapper = values.next();
        Object o = wrapper.get();
        String spString = wrapper.getMeta(SEGMENT_PART_KEY);
        if (spString == null) {
            throw new IOException("Null segment part, key=" + key);
        }
        SegmentPart sp = SegmentPart.parse(spString);
        if (o instanceof CrawlDatum) {
            CrawlDatum val = (CrawlDatum) o;
            // check which output dir it belongs to
            if (sp.partName.equals(CrawlDatum.GENERATE_DIR_NAME)) {
                if (lastG == null) {
                    lastG = val;
                    lastGname = sp.segmentName;
                } else {
                    // take newer
                    if (lastGname.compareTo(sp.segmentName) < 0) {
                        lastG = val;
                        lastGname = sp.segmentName;
                    }
                }
            } else if (sp.partName.equals(CrawlDatum.FETCH_DIR_NAME)) {
                if (lastF == null) {
                    lastF = val;
                    lastFname = sp.segmentName;
                } else {
                    // take newer
                    if (lastFname.compareTo(sp.segmentName) < 0) {
                        lastF = val;
                        lastFname = sp.segmentName;
                    }
                }
            } else if (sp.partName.equals(CrawlDatum.PARSE_DIR_NAME)) {
                if (val.getStatus() == CrawlDatum.STATUS_SIGNATURE) {
                    if (lastSig == null) {
                        lastSig = val;
                        lastSigname = sp.segmentName;
                    } else {
                        // take newer
                        if (lastSigname.compareTo(sp.segmentName) < 0) {
                            lastSig = val;
                            lastSigname = sp.segmentName;
                        }
                    }
                    continue;
                }
                // collect all LINKED values from the latest segment
                ArrayList<CrawlDatum> segLinked = linked.get(sp.segmentName);
                if (segLinked == null) {
                    segLinked = new ArrayList<CrawlDatum>();
                    linked.put(sp.segmentName, segLinked);
                }
                segLinked.add(val);
            } else {
                throw new IOException("Cannot determine segment part: " + sp.partName);
            }
        } else if (o instanceof Content) {
            if (lastC == null) {
                lastC = (Content) o;
                lastCname = sp.segmentName;
            } else {
                if (lastCname.compareTo(sp.segmentName) < 0) {
                    lastC = (Content) o;
                    lastCname = sp.segmentName;
                }
            }
        } else if (o instanceof ParseData) {
            if (lastPD == null) {
                lastPD = (ParseData) o;
                lastPDname = sp.segmentName;
            } else {
                if (lastPDname.compareTo(sp.segmentName) < 0) {
                    lastPD = (ParseData) o;
                    lastPDname = sp.segmentName;
                }
            }
        } else if (o instanceof ParseText) {
            if (lastPT == null) {
                lastPT = (ParseText) o;
                lastPTname = sp.segmentName;
            } else {
                if (lastPTname.compareTo(sp.segmentName) < 0) {
                    lastPT = (ParseText) o;
                    lastPTname = sp.segmentName;
                }
            }
        }
    }
    // perform filtering based on full merge record
    if (mergeFilters != null && !mergeFilters.filter(key, lastG, lastF, lastSig, lastC, lastPD, lastPT,
            linked.isEmpty() ? null : linked.lastEntry().getValue())) {
        return;
    }

    curCount++;
    String sliceName = null;
    MetaWrapper wrapper = new MetaWrapper();
    if (sliceSize > 0) {
        sliceName = String.valueOf(curCount / sliceSize);
        wrapper.setMeta(SEGMENT_SLICE_KEY, sliceName);
    }
    SegmentPart sp = new SegmentPart();
    // now output the latest values
    if (lastG != null) {
        wrapper.set(lastG);
        sp.partName = CrawlDatum.GENERATE_DIR_NAME;
        sp.segmentName = lastGname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastF != null) {
        wrapper.set(lastF);
        sp.partName = CrawlDatum.FETCH_DIR_NAME;
        sp.segmentName = lastFname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastSig != null) {
        wrapper.set(lastSig);
        sp.partName = CrawlDatum.PARSE_DIR_NAME;
        sp.segmentName = lastSigname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastC != null) {
        wrapper.set(lastC);
        sp.partName = Content.DIR_NAME;
        sp.segmentName = lastCname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastPD != null) {
        wrapper.set(lastPD);
        sp.partName = ParseData.DIR_NAME;
        sp.segmentName = lastPDname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (lastPT != null) {
        wrapper.set(lastPT);
        sp.partName = ParseText.DIR_NAME;
        sp.segmentName = lastPTname;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        output.collect(key, wrapper);
    }
    if (linked.size() > 0) {
        String name = linked.lastKey();
        sp.partName = CrawlDatum.PARSE_DIR_NAME;
        sp.segmentName = name;
        wrapper.setMeta(SEGMENT_PART_KEY, sp.toString());
        ArrayList<CrawlDatum> segLinked = linked.get(name);
        for (int i = 0; i < segLinked.size(); i++) {
            CrawlDatum link = segLinked.get(i);
            wrapper.set(link);
            output.collect(key, wrapper);
        }
    }
}