Example usage for java.util Map clear

List of usage examples for java.util Map clear

Introduction

In this page you can find the example usage for java.util Map clear.

Prototype

void clear();

Source Link

Document

Removes all of the mappings from this map (optional operation).

Usage

From source file:org.obiba.onyx.jade.instrument.holologic.APEXScanDataExtractor.java

/**
 * Called by extractData(). Computes T- and Z-score and adds to data collection.
 *
 * @param data/*w  w w.  j  a v  a  2 s . c  o m*/
 */
protected void computeTZScore(Map<String, Data> data)
        throws DataAccessException, IllegalArgumentException, ParseException {

    if (null == data || data.isEmpty())
        return;

    Map<String, Double> bmdData = new HashMap<String, Double>();
    String prefix = getResultPrefix() + "_";
    String type = getRefType();
    String source = getRefSource();

    // AP lumbar spine:
    // - identify the included vertebral levels
    // - sum the area and sum the bmc of the included vertebral levels
    // - compute the revised total bmd from summed bmc / summed area
    // - provide the proper bone range code for total bmd
    //
    if (type.equals("S")) {
        boolean[] included_array = { false, false, false, false };
        double[] area_array = { 0.0, 0.0, 0.0, 0.0 };
        double[] bmc_array = { 0.0, 0.0, 0.0, 0.0 };
        double tot_bmd = 0.0;
        for (Map.Entry<String, Data> entry : data.entrySet()) {
            String key = entry.getKey();
            int index = -1;
            if (key.startsWith("L1")) {
                index = 0;
            } else if (key.startsWith("L2")) {
                index = 1;
            } else if (key.startsWith("L3")) {
                index = 2;
            } else if (key.startsWith("L4")) {
                index = 3;
            }

            if (-1 != index) {
                if (key.endsWith("_INCLUDED")) {
                    included_array[index] = entry.getValue().getValue();
                } else if (key.endsWith("_AREA")) {
                    area_array[index] = entry.getValue().getValue();
                } else if (key.endsWith("_BMC")) {
                    bmc_array[index] = entry.getValue().getValue();
                }
            }

            if (key.endsWith("_BMD")) {
                log.info("key pre: " + key + ", new key: " + key.replace(prefix, ""));
                key = key.replace(prefix, "");
                if (key.equals("TOT_BMD")) {
                    tot_bmd = entry.getValue().getValue();
                } else {
                    if (ranges.containsKey(key)) {
                        bmdData.put(key, (Double) entry.getValue().getValue());
                        log.info("ranges contains key: " + key);
                    }
                }
            }
        }
        double tot_area = 0.0;
        double tot_bmc = 0.0;
        for (int i = 0; i < 4; i++) {
            if (included_array[i]) {
                tot_area += area_array[i];
                tot_bmc += bmc_array[i];
            }
        }
        if (0. != tot_area) {
            double last_tot_bmd = tot_bmd;
            tot_bmd = tot_bmc / tot_area;
            log.info("updating ap lumbar spine total bmd from " + ((Double) last_tot_bmd).toString() + " to "
                    + ((Double) tot_bmd).toString());
        }
        String tot_key = "TOT_BMD";
        if (included_array[0] && !(included_array[1] || included_array[2] || included_array[3])) {
            //_bonerange="1..."
            tot_key = "TOT_L1_BMD";
        } else if (included_array[1] && !(included_array[0] || included_array[2] || included_array[3])) {
            // bonerange=".2.."
            tot_key = "TOT_L2_BMD";
        } else if (included_array[2] && !(included_array[0] || included_array[1] || included_array[3])) {
            // bonerange="..3."
            tot_key = "TOT_L3_BMD";
        } else if (included_array[3] && !(included_array[0] || included_array[1] || included_array[2])) {
            // bonerange="...4"
            tot_key = "TOT_L4_BMD";
        } else if (included_array[0] && included_array[1] && !(included_array[2] || included_array[3])) {
            // bonerange="12.."
            tot_key = "TOT_L1L2_BMD";
        } else if (included_array[0] && included_array[2] && !(included_array[1] || included_array[3])) {
            // bonerange="1.3."
            tot_key = "TOT_L1L3_BMD";
        } else if (included_array[0] && included_array[3] && !(included_array[1] || included_array[2])) {
            // bonerange="1..4"
            tot_key = "TOT_L1L4_BMD";
        } else if (included_array[1] && included_array[2] && !(included_array[0] || included_array[3])) {
            // bonerange=".23."
            tot_key = "TOT_L2L3_BMD";
        } else if (included_array[1] && included_array[3] && !(included_array[0] || included_array[2])) {
            // bonerange=".2.4"
            tot_key = "TOT_L2L4_BMD";
        } else if (included_array[2] && included_array[3] && !(included_array[0] || included_array[1])) {
            // bonerange="..34"
            tot_key = "TOT_L3L4_BMD";
        } else if (included_array[0] && included_array[1] && included_array[2] && !included_array[3]) {
            // bonerange="123."
            tot_key = "TOT_L1L2L3_BMD";
        } else if (included_array[0] && included_array[1] && included_array[3] && !included_array[2]) {
            // bonerange="12.4"
            tot_key = "TOT_L1L2L4_BMD";
        } else if (included_array[0] && included_array[2] && included_array[3] && !included_array[1]) {
            // bonerange="1.34"
            tot_key = "TOT_L1L3L4_BMD";
        } else if (included_array[1] && included_array[2] && included_array[3] && !included_array[0]) {
            // bonerange=".234"
            tot_key = "TOT_L2L3L4_BMD";
        } else {
            // bonerange="1234"
            tot_key = "TOT_BMD";
        }

        if (ranges.containsKey(tot_key)) {
            bmdData.put(tot_key, (Double) tot_bmd);
            log.info("ranges contains key: " + tot_key);
        }
    } else {
        for (Map.Entry<String, Data> entry : data.entrySet()) {
            String key = entry.getKey();
            if (key.endsWith("_BMD")) {
                log.info("key pre: " + key + ", new key: " + key.replace(prefix, ""));
                key = key.replace(prefix, "");
                if (ranges.containsKey(key)) {
                    bmdData.put(key, (Double) entry.getValue().getValue());
                    log.info("ranges contains key: " + key);
                }
            }
        }
    }

    log.info(prefix + " data contains: " + Integer.toString(data.size())
            + " possible entries to get bmd values from");
    log.info(prefix + " bmddata contains: " + Integer.toString(bmdData.size()) + " entries to get tz");

    DecimalFormat format = new DecimalFormat("#.0");
    ageBracket bracket = new ageBracket();

    // Determine the participant's age (at the time of the scan).
    //
    Double age = null;
    try {
        age = computeYearsDifference(getScanDate(), getParticipantDOB());
    } catch (ParseException e) {
        throw e;
    }

    log.info("computed age from scandate and dob: " + age.toString());

    for (Map.Entry<String, Double> entry : bmdData.entrySet()) {
        String bmdBoneRangeKey = entry.getKey();
        Double bmdValue = entry.getValue();

        log.info("working on range key:" + bmdBoneRangeKey + " with value: " + bmdValue.toString());

        // T- and Z-scores are interpolated from X, Y reference curve data.
        // A curve depends on the type of scan, gender, ethnicity, and
        // the coded anatomic region that bmd was measured in.
        // Determine the unique curve ID along with the age at which
        // peak bmd occurs. Implementation of T-score assumes ethnicity is always Caucasian
        // and gender is always female in accordance with WHO and
        // Osteoporosis Canada guidelines.
        //
        String method = " AND METHOD IS NULL";
        if (type.equals("S") && (bmdBoneRangeKey.contains("L1_") || bmdBoneRangeKey.contains("L4_"))) {
            method = " AND METHOD = 'APEX'";
        }

        String sql = "SELECT UNIQUE_ID, AGE_YOUNG FROM ReferenceCurve";
        sql += " WHERE REFTYPE = '" + type + "'";
        sql += " AND IF_CURRENT = 1 AND SEX = 'F' AND ETHNIC IS NULL";
        sql += method;
        sql += " AND SOURCE LIKE '%" + source + "%'";
        sql += " AND Y_LABEL = 'IDS_REF_LBL_BMD'";
        sql += " AND BONERANGE ";
        sql += (ranges.get(bmdBoneRangeKey).equals("NULL") ? ("IS NULL")
                : ("= '" + ranges.get(bmdBoneRangeKey) + "'"));

        log.info("first query (T score): " + sql);
        Map<String, Object> mapResult;
        try {
            mapResult = refCurveDb.queryForMap(sql);
        } catch (DataAccessException e) {
            throw e;
        }
        String curveId = mapResult.get("UNIQUE_ID").toString();
        Double ageYoung = new Double(mapResult.get("AGE_YOUNG").toString());

        // Determine the bmd, skewness factor and standard deviation
        // at the peak bmd age value.
        //
        sql = "SELECT Y_VALUE, L_VALUE, STD FROM Points WHERE UNIQUE_ID = " + curveId;
        sql += " AND X_VALUE = " + ageYoung;

        log.info("second query (T score): " + sql);

        mapResult.clear();
        try {
            mapResult = refCurveDb.queryForMap(sql);
        } catch (DataAccessException e) {
            throw e;
        }

        List<Double> bmdValues = new ArrayList<Double>();
        bmdValues.add(new Double(mapResult.get("Y_VALUE").toString()));
        bmdValues.add(new Double(mapResult.get("L_VALUE").toString()));
        bmdValues.add(new Double(mapResult.get("STD").toString()));

        Double X_value = bmdValue;
        Double M_value = bmdValues.get(0);
        Double L_value = bmdValues.get(1);
        Double sigma = bmdValues.get(2);

        Double T_score = M_value * (Math.pow(X_value / M_value, L_value) - 1.) / (L_value * sigma);
        T_score = Double.valueOf(format.format(T_score));
        if (0. == Math.abs(T_score))
            T_score = 0.;

        String varName = getResultPrefix() + "_";
        if (type.equals("S") && bmdBoneRangeKey.startsWith("TOT_")) {
            varName += "TOT_T";
        } else {
            varName += bmdBoneRangeKey.replace("_BMD", "_T");
        }
        if (data.keySet().contains(varName)) {
            throw new IllegalArgumentException("Instrument variable name already defined: " + varName);
        }
        data.put(varName, DataBuilder.buildDecimal(T_score));
        log.info(varName + " = " + T_score.toString());

        Double Z_score = null;
        varName = getResultPrefix() + "_";
        if (type.equals("S") && bmdBoneRangeKey.startsWith("TOT_")) {
            varName += "TOT_Z";
        } else {
            varName += bmdBoneRangeKey.replace("_BMD", "_Z");
        }
        if (data.keySet().contains(varName)) {
            throw new IllegalArgumentException("Instrument variable name already defined: " + varName);
        }

        // APEX reference curve db has no ultra distal ulna data for males
        //
        String gender = getParticipantGender().toUpperCase();
        if (0 == gender.length() || gender.startsWith("F"))
            gender = " AND SEX = 'F'";
        else if (gender.startsWith("M")) {
            if (bmdBoneRangeKey.equals("U_UD_BMD")) {
                data.put(varName, DataBuilder.buildDecimal((Double) null));
                continue;
            }
            gender = " AND SEX = 'M'";
        }

        // APEX reference curve db has no forearm data for black or hispanic ethnicity
        //
        String ethnicity = getParticipantEthnicity();
        if (null == ethnicity)
            ethnicity = "";
        ethnicity.toUpperCase();
        if (0 == ethnicity.length() || ethnicity.equals("W") || ethnicity.equals("O") || ethnicity.equals("P")
                || ethnicity.equals("I")
                || (type.equals("R") && (ethnicity.equals("H") || ethnicity.equals("B")))) {
            ethnicity = " AND ETHNIC IS NULL";
        } else {
            ethnicity = " AND ETHNIC = '" + ethnicity + "'";
        }

        sql = "SELECT UNIQUE_ID, AGE_YOUNG FROM ReferenceCurve";
        sql += " WHERE REFTYPE = '" + getRefType() + "'";
        sql += " AND IF_CURRENT = 1";
        sql += gender;
        sql += ethnicity;
        sql += method;
        sql += " AND SOURCE LIKE '%" + getRefSource() + "%'";
        sql += " AND Y_LABEL = 'IDS_REF_LBL_BMD'";
        sql += " AND BONERANGE ";
        sql += (ranges.get(bmdBoneRangeKey).equals("NULL") ? ("IS NULL")
                : ("= '" + ranges.get(bmdBoneRangeKey) + "'"));

        log.info("first query (Z score): " + sql);

        try {
            mapResult = refCurveDb.queryForMap(sql);
        } catch (DataAccessException e) {
            throw e;
        }
        curveId = mapResult.get("UNIQUE_ID").toString();

        // Determine the age values (X axis variable) of the curve
        //
        sql = "SELECT X_VALUE FROM Points WHERE UNIQUE_ID = " + curveId;

        log.info("second query (Z score): " + sql);

        List<Map<String, Object>> listResult;
        try {
            listResult = refCurveDb.queryForList(sql);
        } catch (DataAccessException e) {
            throw e;
        }
        List<Double> ageTable = new ArrayList<Double>();
        for (Map<String, Object> row : listResult) {
            ageTable.add(new Double(row.get("X_VALUE").toString()));
        }

        bracket.compute(age, ageTable);
        if (0. != bracket.ageSpan) {

            // Determine the bmd, skewness factor and standard deviation
            // at the bracketing and peak bmd age values.
            //
            sql = "SELECT Y_VALUE, L_VALUE, STD FROM Points WHERE UNIQUE_ID = " + curveId;
            sql += " AND X_VALUE = ";

            Double[] x_value_array = { bracket.ageMin, bracket.ageMax };
            bmdValues.clear();
            for (int i = 0; i < x_value_array.length; i++) {
                log.info("third query (Z score) iter " + ((Integer) i).toString() + " : " + sql
                        + x_value_array[i].toString());

                mapResult.clear();
                try {
                    mapResult = refCurveDb.queryForMap(sql + x_value_array[i].toString());
                } catch (DataAccessException e) {
                    throw e;
                }

                bmdValues.add(new Double(mapResult.get("Y_VALUE").toString()));
                bmdValues.add(new Double(mapResult.get("L_VALUE").toString()));
                bmdValues.add(new Double(mapResult.get("STD").toString()));
            }

            Double u = (age - bracket.ageMin) / bracket.ageSpan;
            List<Double> interpValues = new ArrayList<Double>();
            for (int i = 0; i < bmdValues.size() / 2; i++)
                interpValues.add((1. - u) * bmdValues.get(i) + u * bmdValues.get(i + 3));

            M_value = interpValues.get(0);
            L_value = interpValues.get(1);
            sigma = interpValues.get(2);

            Z_score = M_value * (Math.pow(X_value / M_value, L_value) - 1.) / (L_value * sigma);
            Z_score = Double.valueOf(format.format(Z_score));
            if (0. == Math.abs(Z_score))
                Z_score = 0.;
        }

        data.put(varName, DataBuilder.buildDecimal(Z_score));

        if (null != Z_score) {
            log.info(varName + " = " + Z_score.toString());
        } else {
            log.info(varName + " = null");
        }

        log.info("finished current key: " + bmdBoneRangeKey);
    }
}

From source file:com.esd.ps.InspectorController.java

/**
 * //from   ww w.  ja v a  2  s. c  o  m
 * 
 * @param session
 * @return
 */
@RequestMapping(value = "/inspectorList", method = RequestMethod.POST)
@ResponseBody
public Map<String, Object> inspectorListPost(int workerId, HttpSession session) {
    Map<String, Object> map = new HashMap<>();
    int userId = Integer.parseInt(session.getAttribute(Constants.USER_ID).toString());
    int inspectorId = 0;
    try {
        inspectorId = inspectorService.getInspectorIdByUserId(userId);
    } catch (BindingException n) {
        inspectorId = -1;
    }
    List<workerRecord> list = workerRecordService.getTaskByWorkerId(inspectorId, workerId, 3, 1);
    List<WorkerRecordTrans> list2 = new ArrayList<>();
    SimpleDateFormat sdf = new SimpleDateFormat(Constants.DATETIME_FORMAT);
    double taskMarkTime = 0.00;
    for (Iterator<workerRecord> iterator = list.iterator(); iterator.hasNext();) {
        workerRecord workerRecord = (workerRecord) iterator.next();
        WorkerRecordTrans workerRecordTrans = new WorkerRecordTrans();
        if (workerRecord.getTaskMarkTime() == 0) {
            continue;
        }
        workerRecordTrans.setTaskName(workerRecord.getTaskName());
        workerRecordTrans.setTaskId(workerRecord.getTaskId());
        if (workerRecord.getTaskUploadTime() == null) {
            workerRecordTrans.setTaskUploadTime(sdf.format(new Date()));
        } else {
            workerRecordTrans.setTaskUploadTime(sdf.format(workerRecord.getTaskUploadTime()));
        }
        workerRecordTrans.setTaskMarkTime(workerRecord.getTaskMarkTime());
        taskMarkTime = taskMarkTime + workerRecord.getTaskMarkTime();
        list2.add(workerRecordTrans);
    }
    map.clear();
    if (list2 == null || list2.size() == 0) {
        map.put("firstDate", "");
        map.put("lastDate", "");
        map.put("last", "");
    } else {
        map.put("firstDate", list2.get(0).getTaskUploadTime());
        map.put("lastDate", list2.get(list2.size() - 1).getTaskUploadTime());
        //System.out.println(taskMarkTime);
        if (taskMarkTime >= 600) {
            List<WorkerRecordTrans> list1 = new ArrayList<>();
            // ??10list1
            Set<Integer> set = new HashSet<Integer>();
            double markTime = 0.00;
            boolean panduan = true;
            int m = 0;
            while (true) {
                m++;
                //
                if (m > 100000) {
                    break;
                }
                int z = (int) (Math.random() * (list2.size() + 1));
                //System.out.println(z);
                panduan = set.add(z);// ??
                if (!panduan || z >= list2.size()) {
                    continue;
                } else {
                    if (list2.get(z).getTaskMarkTime() == 0) {
                        continue;
                    }
                    list1.add(list2.get(z));

                    markTime = markTime + Double.parseDouble(list2.get(z).getTaskMarkTime().toString());
                }
                //System.out.println(markTime);
                if (markTime > 599) {
                    break;
                }

                // if (set.size() >= 10) {
                // break;
                // }
            }
            map.put("list", list1);
        } else {
            map.put("list", list2);
        }
    }
    return map;
}

From source file:de.julielab.jcore.ae.jnet.uima.ConsistencyPreservation.java

/**
 * consistency presevation based on (exact) string matching. If string was
 * annotated once as entity, all other occurrences of this string get the
 * same label. For mode: _string_ TODO: more intelligent (voting) mechanism
 * needed to avoid false positives TODO: needs to be checked for performance
 * /* ww w.  j a  va 2  s  . c  o m*/
 * @param aJCas
 * @param entityMentionClassnames
 * @param confidenceThresholdForConsistencyPreservation
 * @throws AnalysisEngineProcessException
 */
public void stringMatch(final JCas aJCas, final TreeSet<String> entityMentionClassnames,
        double confidenceThresholdForConsistencyPreservation) throws AnalysisEngineProcessException {

    // check whether this mode is enabled
    if ((activeModes == null) || (activeModes.size() == 0)
            || !activeModes.contains(ConsistencyPreservation.MODE_STRING))
        return;

    final String text = aJCas.getDocumentText();

    final TypeSystem ts = aJCas.getTypeSystem();
    // This map stores the EntityMentions that share the same specificType.
    // We want to use the TreeSet to check - for a given specificType - if
    // there is already an annotation overlapping a specific text offset.
    // See the comparator below.
    final Map<String, TreeSet<EntityMention>> overlapIndex = new HashMap<>();
    // This Comparator checks whether two Entities overlap in any way. If
    // so, they are deemed "equal". The idea is to use this Comparator with
    // a TreeSet in which we store all existing entities. Then, we can
    // efficiently check for a specific span if there already exists any
    // overlapping entity.
    Comparator<EntityMention> overlapComparator = new Comparator<EntityMention>() {

        @Override
        public int compare(EntityMention o1, EntityMention o2) {
            int b1 = o1.getBegin();
            int e1 = o1.getEnd();
            int b2 = o2.getBegin();
            int e2 = o2.getEnd();

            if ((b1 <= b2) && (e1 >= e2)) {
                return 0;
            } else if ((b1 >= b2) && (e1 <= e2)) {
                return 0;
            }
            //
            else if ((b1 < e2) && (e1 > e2)) {
                return 0;
            } else if ((b1 < b2) && (e1 > b2)) {
                return 0;
            }
            return b1 - b2;
        }
    };

    for (final String entityMentionClassname : entityMentionClassnames) {
        // we use the index entity class wise; we don't want one class to
        // interfer with another
        overlapIndex.clear();
        try {
            // loop over all entity types to be considered
            EntityMention mentionForOffsetComparison = (EntityMention) JCoReAnnotationTools
                    .getAnnotationByClassName(aJCas, entityMentionClassname);

            LOGGER.debug("doStringBased() - checking consistency for type: " + entityMentionClassname);
            final Multimap<String, EntityMention> entityMap = HashMultimap.create();

            // final EntityMention myEntity = (EntityMention)
            // JCoReAnnotationTools
            // .getAnnotationByClassName(aJCas, entityMentionClassname);
            final Type entityType = ts.getType(entityMentionClassname);
            if (null == entityType)
                throw new IllegalArgumentException(
                        "Entity type \"" + entityMentionClassname + "\" was not found in the type system.");

            // loop over all entity annotations in document and put them in
            // hashmap
            LOGGER.debug("doStringBased() - building entity map");
            final Iterator<Annotation> entityIter = aJCas.getAnnotationIndex(entityType).iterator();
            while (entityIter.hasNext()) {
                final EntityMention entity = (EntityMention) entityIter.next();
                entityMap.put(entity.getCoveredText(), entity);
                // additionally, add the entities into the overlap index so
                // we can later quickly lookup whether there is already an
                // entity with the same specific type at a certain location
                String specificType = "<null>";
                if (!StringUtils.isBlank(entity.getSpecificType()))
                    specificType = entity.getSpecificType();
                TreeSet<EntityMention> set = overlapIndex.get(specificType);
                if (null == set) {
                    set = new TreeSet<>(overlapComparator);
                    overlapIndex.put(specificType, set);
                }
                set.add(entity);

            }

            // now search for strings not detected as this kind of entity
            LOGGER.debug("doStringBased() - searching for missed entities...");
            for (final String entityString : entityMap.keySet()) {
                final EntityMention entity = entityMap.get(entityString).iterator().next();
                String specificType = "<null>";
                if (!StringUtils.isBlank(entity.getSpecificType()))
                    specificType = entity.getSpecificType();
                TreeSet<EntityMention> overlapSet = overlapIndex.get(specificType);

                LOGGER.debug("doStringBased() - checking entity string: " + entityString);

                int pos = 0;
                int length = 0;
                List<EntityMention> stringMatchedEntities = new ArrayList<>();
                while ((pos = text.indexOf(entityString, (pos + length))) > -1) {
                    // for each position where we have found this entity
                    // string
                    LOGGER.debug("doStringBased() - found string at pos: " + pos);

                    // check whether there is already an annotation of this
                    // type
                    // this older approach had the issue that only one
                    // overlapping annotation of entityMentionClassname was
                    // returned; but this type could be the wrong one in
                    // that the returned had a different specific type but
                    // another existed with the same specificType as the
                    // sought entity
                    // EntityMention refEntity = (EntityMention)
                    // JCoReAnnotationTools
                    // .getOverlappingAnnotation(aJCas,
                    // entityMentionClassname, pos, pos
                    // + entityString.length());

                    mentionForOffsetComparison.setBegin(pos);
                    mentionForOffsetComparison.setEnd(pos + length);
                    boolean overlappingExists = overlapSet.contains(mentionForOffsetComparison);

                    // if (refEntity == null
                    // || (refEntity.getSpecificType() == null ^
                    // entity.getSpecificType() == null)
                    // || (refEntity.getSpecificType() != null
                    // && entity.getSpecificType() != null && !refEntity
                    // .getSpecificType().equals(entity.getSpecificType())))
                    // {
                    if (!overlappingExists) {
                        // if there is no annotation of same type on this
                        // text span yet...
                        LOGGER.debug("doStringBased() - adding annotation to unlabeled entity mention");
                        EntityMention refEntity = (EntityMention) JCoReAnnotationTools
                                .getAnnotationByClassName(aJCas, entityMentionClassname);
                        // We will not directly just annotate the found
                        // string but extend it to offsets of
                        // overlapped tokens.
                        List<Token> overlappingTokens = JCoReAnnotationTools.getNearestOverlappingAnnotations(
                                aJCas,
                                new Annotation(entity.getCAS().getJCas(), pos, pos + entityString.length()),
                                Token.class);
                        int begin = overlappingTokens.size() > 0 ? overlappingTokens.get(0).getBegin() : pos;
                        int end = overlappingTokens.size() > 0
                                ? overlappingTokens.get(overlappingTokens.size() - 1).getEnd()
                                : pos + entityString.length();
                        // If we would have to adjust the offsets too much,
                        // we have most like just hit some
                        // substring of a larger token by coincidence.
                        refEntity.setBegin(begin);
                        refEntity.setEnd(end);
                        refEntity.setSpecificType(entity.getSpecificType());
                        refEntity.setResourceEntryList(entity.getResourceEntryList());
                        refEntity.setConfidence(entity.getConfidence());
                        refEntity.setTextualRepresentation(entity.getTextualRepresentation());
                        refEntity.setComponentId(COMPONENT_ID + " String (" + entity.getCoveredText() + ", "
                                + begin + "-" + end + ")");
                        stringMatchedEntities.add(refEntity);

                    } else
                        LOGGER.debug("doStringBased() - there is already an entity!");

                    length = entityString.length();
                }

                // A.R. 30.06.15: this option can now be turned on, just by
                // setting the config parameter
                // confidenceThresholdForConsistencyPreservation to a value
                // greater than 0
                // earlier it has been switched by commenting or
                // un-commenting the following code

                // If confidenceThresholdForConsistencyPreservation is given
                // (value != -1)
                // only add the new entities if there is enough evidence by
                // originally found entities with the same string that
                // this is indeed an entity we would like to find.
                if (confidenceThresholdForConsistencyPreservation > 0) {
                    if (!stringMatchedEntities.isEmpty()) {

                        double meanConfidence = 0;
                        for (EntityMention recognizedEntity : entityMap.get(entityString)) {
                            if (null != entity.getConfidence()) {
                                meanConfidence += Double.parseDouble(recognizedEntity.getConfidence());
                            }
                        }
                        meanConfidence /= entityMap.get(entityString).size();

                        int allMatches = stringMatchedEntities.size() + entityMap.get(entityString).size();
                        if (entityMap.get(entityString).size() >= allMatches / 3d) {
                            if (meanConfidence > confidenceThresholdForConsistencyPreservation) {
                                for (EntityMention refEntity : stringMatchedEntities) {
                                    // we have to add the new entities to
                                    // the overlap-index to avoid duplicates
                                    // by other entities that are a
                                    // substring of the current entity
                                    overlapSet.add(refEntity);
                                    refEntity.addToIndexes();
                                }
                            }
                        }
                    }
                }
                // if confidence score doesn't need to be checked, just add
                // all occurrences
                else {
                    for (EntityMention refEntity : stringMatchedEntities) {
                        // we have to add the new entities to the
                        // overlap-index to avoid duplicates by other
                        // entities that are a substring of the current
                        // entity
                        overlapSet.add(refEntity);
                        refEntity.addToIndexes();
                    }
                }
            }

        } catch (final Exception e) {
            LOGGER.error("doStringBased() - exception occured: " + e.getMessage());
            throw new AnalysisEngineProcessException();
        }

    }
}

From source file:com.compomics.colims.distributed.playground.AnnotatedSpectraParser.java

/**
 * Parse the APL files for given aplKeys and put the peaks in the spectrumPeaks list.
 *//* ww w .j  a v  a2  s. co  m*/
private void parseAplFile() throws IOException {
    for (Path aplFilePath : aplFilePaths.keySet()) {
        if (!Files.exists(aplFilePath)) {
            throw new FileNotFoundException(
                    "The apl spectrum file " + aplFilePath.toString() + " could not be found.");
        }
        try (BufferedReader bufferedReader = Files.newBufferedReader(aplFilePath)) {
            String line;
            Map<String, String> headers = new HashMap<>();

            while ((line = bufferedReader.readLine()) != null) {
                //look for a spectrum entry
                if (line.startsWith(APL_SPECTUM_START)) {
                    //go to the next line
                    line = bufferedReader.readLine();
                    //parse spectrum header part
                    while (!Character.isDigit(line.charAt(0))) {
                        String[] split = line.split(APL_HEADER_DELIMITER);
                        headers.put(split[0], split[1]);
                        line = bufferedReader.readLine();
                    }
                    //" Precursor: 0 _multi_" is removed before looking up the key in the spectra map
                    String header = org.apache.commons.lang3.StringUtils
                            .substringBefore(headers.get(APL_HEADER), " Precursor");
                    //check if the spectrum was identified and therefore can be found in the spectra map
                    if (aplKeys.contains(header)) {
                        List<Peak> peakList = new ArrayList<>();
                        while (!line.startsWith(APL_SPECTUM_END)) {
                            String[] splitLine = line.split(MaxQuantConstants.PARAM_TAB_DELIMITER.value());
                            Peak peak = new Peak(Double.parseDouble(splitLine[0]),
                                    Double.parseDouble(splitLine[1]));

                            peakList.add(peak);
                            line = bufferedReader.readLine();
                        }
                        spectrumPeaks.put(header, peakList);
                    }
                    //clear headers map
                    headers.clear();
                }
            }
        }
    }
}

From source file:edu.stanford.muse.index.Summarizer.java

/** compute tf-idf based scores for a single multi-doc */
public synchronized void scoreNamesFromMultiDoc(edu.stanford.muse.index.MultiDoc mdoc)
        throws IOException, GeneralSecurityException, ClassNotFoundException {
    // create a list of all the names in this mdoc first
    List<String> clusterNames = new ArrayList<String>();
    for (edu.stanford.muse.index.Document d : mdoc.docs) {
        String id = d.getUniqueId();
        clusterNames.addAll(indexer.getNamesForDocId(id, Indexer.QueryType.ORIGINAL));
    }/*from   w  w  w  . ja v  a  2s  .c  o m*/

    // now create a posting map
    Map<String, Posting> termMap = new LinkedHashMap<String, Posting>();
    for (String term : clusterNames) {
        // could be a multiWordTerm
        String canonicalTerm = DictUtils.canonicalizeMultiWordTerm(term, false); // we do canonicalization as usual because we need to eliminate multiple spaces... but no stemming for names

        Posting p = termMap.get(canonicalTerm);
        if (p == null) {
            p = new Posting();
            p.term = InternTable.intern(canonicalTerm);
            p.originalTerm = InternTable.intern(term);
            p.tf = 0;
            if (log.isTraceEnabled())
                log.trace("New Token: " + p);
            termMap.put(p.term, p);
        }
        p.tf++;
    }

    // termMap is what we want to index for the cards
    for (Posting p : termMap.values())
        if (p.tf > TERM_FREQ_PER_SUPERDOC_THROTTLE) {
            log.info("Throttling freq to " + TERM_FREQ_PER_SUPERDOC_THROTTLE + " for posting: " + p);
            p.tf = TERM_FREQ_PER_SUPERDOC_THROTTLE;
        }

    // set up doc index and normalize tf
    List<Posting> a = new ArrayList<Posting>(termMap.values());
    superdocToPostingsIndex.put(mdoc, a);

    // for each term in this doc, update termIndex
    for (Posting p : termMap.values()) {
        Integer I = termToSuperDocCountIndex.get(p.term);
        if (I == null)
            termToSuperDocCountIndex.put(p.term, 1);
        else
            termToSuperDocCountIndex.put(p.term, I + 1);
    }
    termMap.clear();
}

From source file:com.intel.ssg.dcst.panthera.parse.SkinDriver.java

/**
 * Cleans up remaining tasks in case of failure
 *//* w ww . j  a va  2  s. c  o  m*/
public void taskCleanup(Map<TaskResult, TaskRunner> running) {
    for (Map.Entry<TaskResult, TaskRunner> entry : running.entrySet()) {
        if (entry.getKey().isRunning()) {
            Task<?> task = entry.getValue().getTask();
            try {
                task.shutdown();
            } catch (Exception e) {
                console.printError("Exception on shutting down task " + task.getId() + ": " + e);
            }
        }
    }
    running.clear();
}

From source file:hydrograph.ui.graph.debugconverter.DebugConverter.java

public Debug getParam() throws Exception {
    Map<String, SubjobDetails> componentNameAndLink = new HashMap();
    Debug debug = new Debug();
    ViewData viewData = null;/*w ww  . j a v  a  2 s  .co m*/
    String componenetId = "";
    String socket_Id = "";

    IWorkbenchPage page = PlatformUI.getWorkbench().getActiveWorkbenchWindow().getActivePage();
    ELTGraphicalEditor editor = (ELTGraphicalEditor) page.getActiveEditor();

    if (editor != null && editor instanceof ELTGraphicalEditor) {
        GraphicalViewer graphicalViewer = (GraphicalViewer) ((GraphicalEditor) editor)
                .getAdapter(GraphicalViewer.class);
        for (Iterator<EditPart> iterator = graphicalViewer.getEditPartRegistry().values().iterator(); iterator
                .hasNext();) {
            EditPart editPart = iterator.next();
            if (editPart instanceof ComponentEditPart) {
                Component component = ((ComponentEditPart) editPart).getCastedModel();
                if (component instanceof SubjobComponent) {
                    Link link = component.getInputLinks().get(0);
                    String previousComponent = link.getSource().getComponentId();
                    traverseSubjob(component, debug, component.getComponentId(), previousComponent);

                }

                Map<String, Long> map = component.getWatcherTerminals();
                if (!map.isEmpty()) {
                    for (Entry<String, Long> entrySet : map.entrySet()) {
                        List<Link> links = ((ComponentEditPart) editPart).getCastedModel()
                                .getSourceConnections();
                        if (StringUtils.equalsIgnoreCase(component.getComponentName(),
                                Constants.SUBJOB_COMPONENT)) {
                            for (Link link : links) {
                                componentNameAndLink.clear();
                                boolean isWatch = link.getSource().getPort(link.getSourceTerminal())
                                        .isWatched();
                                if (isWatch) {
                                    ViewDataUtils.getInstance().subjobParams(componentNameAndLink, component,
                                            new StringBuilder(), link.getSourceTerminal());
                                    for (Entry<String, SubjobDetails> entry : componentNameAndLink.entrySet()) {
                                        String comp_soc = entry.getKey();
                                        String[] split = StringUtils.split(comp_soc, "/.");
                                        componenetId = split[0];
                                        for (int i = 1; i < split.length - 1; i++) {
                                            componenetId = componenetId + "." + split[i];
                                        }
                                        socket_Id = split[split.length - 1];
                                    }
                                    viewData = new ViewData();
                                    viewData.setFromComponentId(componenetId);
                                    viewData.setOutSocketId(socket_Id);
                                    String portType = socket_Id.substring(0, 3);
                                    viewData.setOutSocketType(checkPortType(portType));
                                    debug.getViewData().add(viewData);
                                }
                            }
                            break;
                        } else {
                            viewData = new ViewData();
                            viewData.setFromComponentId(component.getComponentId());
                            viewData.setOutSocketId(entrySet.getKey());
                            String portType = entrySet.getKey().substring(0, 3);
                            viewData.setOutSocketType(checkPortType(portType));
                            debug.getViewData().add(viewData);
                        }
                    }
                }
            }
        }
    }

    return debug;
}

From source file:com.esd.ps.ManagerController.java

/**
 * /*from w  ww .  ja  v  a  2 s  .  c o  m*/
 * 
 * @param userNameCondition
 * @param userType
 * @param page
 * @param beginDate
 * @param endDate
 * @param taskUpload
 * @param dateType
 * @param payOffType
 *            0: 1: 2 
 * @return
 */
@RequestMapping(value = "/workerSalary", method = RequestMethod.POST)
@ResponseBody
public Map<String, Object> workerSalaryPost(String userNameCondition, int page, String beginDate,
        String endDate, int dateType, int salaryLine, int payOffType) {
    logger.debug("page:{},userNameCondition:{},year:{},month:{},dateType:{}", page, userNameCondition,
            beginDate, endDate, dateType);
    int pre = (int) System.currentTimeMillis();
    Map<String, Object> map = new HashMap<String, Object>();
    // SimpleDateFormat sdf = new
    // SimpleDateFormat(Constants.DATETIME_FORMAT);
    List<Map<String, Object>> list = new ArrayList<Map<String, Object>>();
    int totlePage = Constants.ZERO;
    List<Map<String, Object>> salaryList = salaryService.getSalary(dateType, page, Constants.ROW, beginDate,
            endDate, userNameCondition, salaryLine, payOffType);
    if (salaryList == null) {
        map.put(Constants.LIST, "");
        return map;
    }
    // int pre1 = (int) System.currentTimeMillis();
    // logger.debug("userList:{}",(pre1 - pre));
    manager manager = managerService.selectByPrimaryKey(1);
    DecimalFormat df = new DecimalFormat("#");
    Double d = 0.00;
    for (Iterator<Map<String, Object>> iterator = salaryList.iterator(); iterator.hasNext();) {
        Map<String, Object> map2 = (Map<String, Object>) iterator.next();
        if (map2.get("markTime") == null) {
            map2.put("taskMarkTimeMonth", 0.00);
            map2.put("salary", 0);
        } else {
            d = Double.parseDouble(map2.get("markTime").toString());
            if (d < 0) {
                map2.put("taskMarkTimeMonth", -d);
            } else {
                map2.put("taskMarkTimeMonth", d);
            }
            map2.put("salary", df.format(d * manager.getSalary() / 3600));
        }
        list.add(map2);
    }
    map.clear();
    // int pre11 = (int) System.currentTimeMillis();
    int totle = salaryService.getSalary100Count(dateType, beginDate, endDate, userNameCondition, salaryLine,
            payOffType);
    // int pre12 = (int) System.currentTimeMillis();
    // logger.debug("totle:{}",(pre12 - pre11));
    totlePage = (int) Math.ceil((double) totle / (double) Constants.ROW);
    map.put(Constants.LIST, list);
    map.put(Constants.TOTLE, totle);
    map.put(Constants.TOTLE_PAGE, totlePage);
    return map;
}

From source file:com.surevine.alfresco.repo.action.SafeMoveCopyServiceImpl.java

/**
 * Constructs the properties to copy that apply to the type and default aspects 
 *//*from www . j  a va 2 s.  c om*/
private Map<QName, Serializable> buildCopyProperties(CopyDetails copyDetails, Set<QName> classQNames,
        Map<QName, CopyBehaviourCallback> callbacks) {
    Map<QName, Serializable> sourceNodeProperties = copyDetails.getSourceNodeProperties();
    Map<QName, Serializable> copyProperties = new HashMap<QName, Serializable>(sourceNodeProperties.size(),
            1.0F);
    Map<QName, Serializable> scratchProperties = new HashMap<QName, Serializable>(11);
    // Each defined callback gets a chance to say which properties get copied
    // Only model-defined properties are considered
    for (QName classQName : classQNames) {
        CopyBehaviourCallback callback = callbacks.get(classQName);
        if (callback == null) {
            throw new IllegalStateException("Source node class has no callback: " + classQName);
        }
        // Ignore if not present or if not scheduled for a copy
        if (!callback.getMustCopy(classQName, copyDetails)) {
            continue;
        }
        // Get the dictionary definition
        ClassDefinition classDef = dictionaryService.getClass(classQName);
        if (classDef == null) {
            continue;
        }
        // Get the defined properties
        Map<QName, PropertyDefinition> propertyDefs = classDef.getProperties();
        // Extract these from the source nodes properties and store in a safe (modifiable) map
        scratchProperties.clear();
        for (QName propertyQName : propertyDefs.keySet()) {
            Serializable value = sourceNodeProperties.get(propertyQName);
            if (value == null) {
                continue;
            }
            scratchProperties.put(propertyQName, value);
        }
        // What does the behaviour do with properties?
        Map<QName, Serializable> propsToCopy = callback.getCopyProperties(classQName, copyDetails,
                scratchProperties);

        // Add to the final properties
        copyProperties.putAll(propsToCopy);
    }
    // Done
    return copyProperties;
}

From source file:org.ala.dao.RankingDaoImpl.java

@Override
public void reloadRanks() throws Exception {
    SolrServer solrServer = solrUtils.getSolrServer();
    long start = System.currentTimeMillis();
    Map<String, String> compareFieldValue = new HashMap<String, String>();
    int i = 0;/*from   w w w.  j  av  a2  s .c o  m*/
    int j = 0;
    logger.debug("reload Ranks...");
    Scanner scanner = storeHelper.getScanner(RK_COLUMN_FAMILY, RK_COLUMN_FAMILY, "");
    byte[] guidAsBytes = null;
    String previousGuid = "";
    while ((guidAsBytes = scanner.getNextGuid()) != null) {
        String rowKey = new String(guidAsBytes);
        String guid = rowKey.substring(0, rowKey.indexOf("|"));

        String type = rowKey.substring(rowKey.indexOf("|") + 1, rowKey.length());
        RankingType rankingType = RankingType.getRankingTypeByColumnName(type);
        ColumnType columnType = rankingType != null ? rankingType.getColumnType() : ColumnType.IMAGE_COL;
        i++;

        if (i % 1000 == 0) {
            logger.info("Indexed records: " + i + ", current guid: " + guid);
        }
        try {
            //get taxon concept details
            //List<String> list = storeHelper.getSuperColumnsByGuid(guid, RK_COLUMN_FAMILY);
            Map<String, String> columnValues = scanner.getCurrentValues();
            for (String column : columnValues.keySet()) {
                List<BaseRanking> rankings = getRankingsFromString(columnValues.get(column));
                for (BaseRanking br : rankings) {
                    if (rankingType == null) {
                        //the old format for a rank
                        compareFieldValue.clear();
                        compareFieldValue.put("identifier", br.getUri());
                        br.setCompareFieldValue(compareFieldValue);

                    } else if (rankingType == RankingType.RK_NAME_VALUE) {
                        compareFieldValue.clear();
                        compareFieldValue.put("nameString", column);
                        compareFieldValue.put("identifier", br.getUri());
                        compareFieldValue.put("defaultValue", "100000");

                        br.setCompareFieldValue(compareFieldValue);
                        columnType = ColumnType.VERNACULAR_COL;
                    }
                    //no reindex by each common name                     
                    taxonConceptDao.setRanking(guid, columnType, br, false);
                    //System.out.println("Attempting to update the ranking " + guid + " " + columnType + " " + br);
                    j++;
                    if (j % 1000 == 0) {
                        logger.info("Indexed records: " + j + ", current guid: " + guid);
                    }
                }

            }

            try {
                //reindex whole row record
                if (!guid.equals(previousGuid)) {
                    List<SolrInputDocument> docList = taxonConceptDao.indexTaxonConcept(guid, null);
                    if (solrServer == null) {
                        solrServer = solrUtils.getSolrServer();
                    }
                    if (solrServer != null && docList != null && docList.size() > 0) {
                        solrServer.add(docList);
                    }
                }
            } catch (Exception e) {
                logger.error("***** add solr record failed. guid: " + guid + " ," + e);
            }
            previousGuid = guid;
        } catch (Exception ex) {
            logger.error("***** guid: " + guid + " ," + ex);
            ex.printStackTrace();
        }
    }
    if (solrServer == null) {
        solrServer = solrUtils.getSolrServer();
    }
    if (solrServer != null) {
        solrServer.commit();
    }

    long finish = System.currentTimeMillis();
    logger.info("Index created in: " + ((finish - start) / 1000) + " seconds with  species: " + i
            + ", column items: " + j);
    logger.debug("reload Ranks finished...");
}