Example usage for java.util LinkedHashMap remove

List of usage examples for java.util LinkedHashMap remove

Introduction

In this page you can find the example usage for java.util LinkedHashMap remove.

Prototype

V remove(Object key);

Source Link

Document

Removes the mapping for a key from this map if it is present (optional operation).

Usage

From source file:com.evolveum.midpoint.wf.impl.processors.primary.policy.ProcessSpecifications.java

static ProcessSpecifications createFromRules(List<EvaluatedPolicyRule> rules, PrismContext prismContext)
        throws ObjectNotFoundException {
    // Step 1: plain list of approval actions -> map: process-spec -> list of related actions/rules ("collected")
    LinkedHashMap<WfProcessSpecificationType, List<Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>>> collectedSpecifications = new LinkedHashMap<>();
    for (EvaluatedPolicyRule rule : rules) {
        for (ApprovalPolicyActionType approvalAction : rule.getEnabledActions(ApprovalPolicyActionType.class)) {
            WfProcessSpecificationType spec = approvalAction.getProcessSpecification();
            collectedSpecifications.computeIfAbsent(spec, s -> new ArrayList<>())
                    .add(new ImmutablePair<>(approvalAction, rule));
        }//from  w w w .j a va  2s  .  c o m
    }
    // Step 2: resolve references
    for (WfProcessSpecificationType spec : new HashSet<>(collectedSpecifications.keySet())) { // cloned to avoid concurrent modification exception
        if (spec != null && spec.getRef() != null) {
            List<Map.Entry<WfProcessSpecificationType, List<Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>>>> matching = collectedSpecifications
                    .entrySet().stream()
                    .filter(e -> e.getKey() != null && spec.getRef().equals(e.getKey().getName()))
                    .collect(Collectors.toList());
            if (matching.isEmpty()) {
                throw new IllegalStateException("Process specification named '" + spec.getRef()
                        + "' referenced from an approval action couldn't be found");
            } else if (matching.size() > 1) {
                throw new IllegalStateException("More than one process specification named '" + spec.getRef()
                        + "' referenced from an approval action: " + matching);
            } else {
                // move all actions/rules to the referenced process specification
                List<Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>> referencedSpecActions = matching
                        .get(0).getValue();
                referencedSpecActions.addAll(collectedSpecifications.get(spec));
                collectedSpecifications.remove(spec);
            }
        }
    }

    Map<String, Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>> actionsMap = null;

    // Step 3: include other actions
    for (Map.Entry<WfProcessSpecificationType, List<Pair<ApprovalPolicyActionType, EvaluatedPolicyRule>>> processSpecificationEntry : collectedSpecifications
            .entrySet()) {
        WfProcessSpecificationType spec = processSpecificationEntry.getKey();
        if (spec == null || spec.getIncludeAction().isEmpty() && spec.getIncludeActionIfPresent().isEmpty()) {
            continue;
        }
        if (actionsMap == null) {
            actionsMap = createActionsMap(collectedSpecifications.values());
        }
        for (String actionToInclude : spec.getIncludeAction()) {
            processActionToInclude(actionToInclude, actionsMap, processSpecificationEntry, true);
        }
        for (String actionToInclude : spec.getIncludeActionIfPresent()) {
            processActionToInclude(actionToInclude, actionsMap, processSpecificationEntry, false);
        }
    }

    // Step 4: sorts process specifications and wraps into ProcessSpecification objects
    ProcessSpecifications rv = new ProcessSpecifications(prismContext);
    collectedSpecifications.entrySet().stream().sorted((ps1, ps2) -> {
        WfProcessSpecificationType key1 = ps1.getKey();
        WfProcessSpecificationType key2 = ps2.getKey();
        if (key1 == null) {
            return key2 == null ? 0 : 1; // non-empty (key2) records first
        } else if (key2 == null) {
            return -1; // non-empty (key1) record first
        }
        int order1 = defaultIfNull(key1.getOrder(), Integer.MAX_VALUE);
        int order2 = defaultIfNull(key2.getOrder(), Integer.MAX_VALUE);
        return Integer.compare(order1, order2);
    }).forEach(e -> rv.specifications.add(rv.new ProcessSpecification(e)));
    return rv;
}

From source file:com.google.gwt.emultest.java.util.LinkedHashMapTest.java

/**
 * Test method for 'java.util.LinkedHashMap.size()'.
 *///from   www .  j  a v  a2  s  . co  m
public void testSize() {
    LinkedHashMap<String, String> hashMap = new LinkedHashMap<String, String>();
    checkEmptyLinkedHashMapAssumptions(hashMap);

    // Test size behavior on put
    assertEquals(hashMap.size(), SIZE_ZERO);
    hashMap.put(KEY_1, VALUE_1);
    assertEquals(hashMap.size(), SIZE_ONE);
    hashMap.put(KEY_2, VALUE_2);
    assertEquals(hashMap.size(), SIZE_TWO);
    hashMap.put(KEY_3, VALUE_3);
    assertEquals(hashMap.size(), SIZE_THREE);

    // Test size behavior on remove
    hashMap.remove(KEY_1);
    assertEquals(hashMap.size(), SIZE_TWO);
    hashMap.remove(KEY_2);
    assertEquals(hashMap.size(), SIZE_ONE);
    hashMap.remove(KEY_3);
    assertEquals(hashMap.size(), SIZE_ZERO);

    // Test size behavior on putAll
    hashMap.put(KEY_1, VALUE_1);
    hashMap.put(KEY_2, VALUE_2);
    hashMap.put(KEY_3, VALUE_3);
    LinkedHashMap<String, String> srcMap = cloneLinkedHashMap(hashMap);
    hashMap.putAll(srcMap);
    assertEquals(hashMap.size(), SIZE_THREE);

    // Test size behavior on clear
    hashMap.clear();
    assertEquals(hashMap.size(), SIZE_ZERO);
}

From source file:gate.util.reporting.DocTimeReporter.java

/**
 * Prints the document level statistics report in HTML format.
 *
 * @param reportSource//w  w  w . j  a  v a  2 s  . c om
 *          An Object of type LinkedHashMap<String, Object> containing the
 *          document names (with time in milliseconds).
 * @param outputFile
 *          An object of type File representing the output report file to
 *          which the HTML report is to be written.
 */
private void printToHTML(LinkedHashMap<String, Object> reportSource, File outputFile) {
    String htmlReport = "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\"" + NL
            + "\"http://www.w3.org/TR/html4/loose.dtd\">" + NL
            + "<html><head><title>Benchmarking Report</title>" + NL + "<meta http-equiv=\"Content-Type\""
            + " content=\"text/html; charset=utf-8\">" + NL + "<style type=\"text/css\">" + NL
            + "div { font-size:12px; margin-top: 4; }" + NL + "</style>" + NL + "</head>" + NL
            + "<body style=\"font-family:Verdana; color:navy;\">" + NL;
    String hTrace = "<div style=\"right: 0pt; border-top:1px solid #C9D7F1;" + " font-size:1px;\" ></div>" + NL;
    String reportTitle = hTrace;
    String docs = "";
    if (maxDocumentInReport != ALL_DOCS) {
        if (allDocs.size() < maxDocumentInReport) {
            docs = Integer.toString(allDocs.size());
        } else {
            docs = Integer.toString(maxDocumentInReport);
        }
    } else {
        docs = "All";
    }
    if (PRMatchingRegex.equals(MATCH_ALL_PR_REGEX)) {
        reportTitle = reportTitle + "<div style=\"font-size:15px;font-family:Verdana; color:navy;\">Top " + docs
                + " expensive documents matching All PRs in <b>" + pipelineName + "</b></div>" + NL;
    } else {
        if (matchingPRs.size() > 0) {
            reportTitle = reportTitle + "<div style=\"font-size:15px;font-family:Verdana; color:navy;\">Top "
                    + docs + " expensive documents matching following PRs in <b>" + pipelineName + "</b> <ul>"
                    + NL;
            for (String pr : matchingPRs) {
                reportTitle = reportTitle + "<li>" + pr + "</li>";
            }
            reportTitle = reportTitle + "</ul></div>";
        } else {
            reportTitle += "<div style=\"font-size:15px;font-family:Verdana; color:navy;\">"
                    + "No PRs matched to search string \"" + getPRMatchingRegex() + " \" in " + pipelineName
                    + "</div>";
        }
    }
    reportTitle = reportTitle + hTrace;

    if (allDocs.size() > 0) {
        String htmlReportTitle = reportTitle + "<table><tr bgcolor=\"#eeeeff\">"
                + "<td><b>Document Name</b></td>" + "<td><b>Time in seconds</b></td>"
                + "<td><b>% Time taken</b></td>" + "</tr><tr>" + NL;
        String documentNameHTMLString = "<td rowspan = '112' width = '550'>";
        String timeTakenHTMLString = "<td width = '100'>";
        String timeInPercentHTMLString = "<td width = '100'>";
        LinkedHashMap<String, Object> rcHash = reportSource;
        rcHash.remove("total");
        Iterator<String> i = rcHash.keySet().iterator();
        int count = 0;
        while (i.hasNext()) {
            Object key = i.next();
            if (!((String) key).equals("total")) {
                int value = Integer.parseInt((String) rcHash.get(key));
                if (maxDocumentInReport == ALL_DOCS) {
                    documentNameHTMLString += "<div>" + key + "</div>";
                    timeTakenHTMLString += "<div>" + value / 1000.0 + "</div>";
                    timeInPercentHTMLString += "<div>" + Math.round(((value / globalTotal) * 100) * 10) / 10.0
                            + "</div>" + NL;
                } else if (count < maxDocumentInReport) {
                    documentNameHTMLString += "<div>" + key + "</div>";
                    timeTakenHTMLString += "<div>" + value / 1000.0 + "</div>";
                    timeInPercentHTMLString += "<div>" + Math.round(((value / globalTotal) * 100) * 10) / 10.0
                            + "</div>" + NL;
                }
            }
            count++;
        }
        documentNameHTMLString += "<div bgcolor=\"#eeeeff\" style = \"font-size:15px;margin-left:400px;\">"
                + "<b>Total</b></div></td>" + NL;
        timeTakenHTMLString += "<div bgcolor=\"#eeeeff\" style = \"font-size:15px;\"><b>" + globalTotal / 1000.0
                + "</b></div></td>" + NL;
        timeInPercentHTMLString += "<div bgcolor=\"#eeeeff\" style = \"font-size:15px;\">"
                + "<b>100</b></div></td>" + NL;

        if (!outputFile.exists()) {
            htmlReport += htmlReportTitle + documentNameHTMLString + timeTakenHTMLString
                    + timeInPercentHTMLString + "</tr></table>";
        } else {
            htmlReport = "<br/><br/>" + htmlReportTitle + documentNameHTMLString + timeTakenHTMLString
                    + timeInPercentHTMLString + "</tr></table></body></html>";
        }
    } else {
        htmlReport += reportTitle + "</body></html>";
    }

    BufferedWriter out = null;
    try {
        out = new BufferedWriter(new FileWriter(outputFile));
        out.write(htmlReport);

    } catch (IOException e) {
        e.printStackTrace();

    } finally {
        try {
            if (out != null) {
                out.close();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

From source file:org.apache.tez.mapreduce.input.TestMultiMRInput.java

@Test(timeout = 5000)
public void testSingleSplit() throws Exception {

    Path workDir = new Path(TEST_ROOT_DIR, "testSingleSplit");
    JobConf jobConf = new JobConf(defaultConf);
    jobConf.setInputFormat(org.apache.hadoop.mapred.SequenceFileInputFormat.class);
    FileInputFormat.setInputPaths(jobConf, workDir);

    MRInputUserPayloadProto.Builder builder = MRInputUserPayloadProto.newBuilder();
    builder.setGroupingEnabled(false);//from  w w  w. j  a  v  a  2s.  c  om
    builder.setConfigurationBytes(TezUtils.createByteStringFromConf(jobConf));
    byte[] payload = builder.build().toByteArray();

    InputContext inputContext = createTezInputContext(payload);

    MultiMRInput input = new MultiMRInput(inputContext, 1);
    input.initialize();
    List<Event> eventList = new ArrayList<Event>();

    String file1 = "file1";
    LinkedHashMap<LongWritable, Text> data1 = createInputData(localFs, workDir, jobConf, file1, 0, 10);
    SequenceFileInputFormat<LongWritable, Text> format = new SequenceFileInputFormat<LongWritable, Text>();
    InputSplit[] splits = format.getSplits(jobConf, 1);
    assertEquals(1, splits.length);

    MRSplitProto splitProto = MRInputHelpers.createSplitProto(splits[0]);
    InputDataInformationEvent event = InputDataInformationEvent.createWithSerializedPayload(0,
            splitProto.toByteString().asReadOnlyByteBuffer());

    eventList.clear();
    eventList.add(event);
    input.handleEvents(eventList);

    int readerCount = 0;
    for (KeyValueReader reader : input.getKeyValueReaders()) {
        readerCount++;
        while (reader.next()) {
            if (data1.size() == 0) {
                fail("Found more records than expected");
            }
            Object key = reader.getCurrentKey();
            Object val = reader.getCurrentValue();
            assertEquals(val, data1.remove(key));
        }
    }
    assertEquals(1, readerCount);
}

From source file:gov.nih.nci.cabig.caaers.domain.report.Report.java

public void removeFromMetaData(String k) {
    LinkedHashMap<String, String> map = getMetaDataAsMap();
    StringBuilder sb = new StringBuilder();
    map.remove(k);
    for (Map.Entry<String, String> e : map.entrySet()) {
        if (sb.length() > 0)
            sb.append("|~");
        sb.append(e.getKey()).append(":~").append(e.getValue());
    }/*from w w  w.  j av a  2s .c om*/

    setMetaData(sb.toString());

}

From source file:com.espertech.esper.epl.spec.PatternStreamSpecRaw.java

private static MatchEventSpec analyzeMatchEvent(EvalFactoryNode relativeNode) {
    LinkedHashMap<String, Pair<EventType, String>> taggedEventTypes = new LinkedHashMap<String, Pair<EventType, String>>();
    LinkedHashMap<String, Pair<EventType, String>> arrayEventTypes = new LinkedHashMap<String, Pair<EventType, String>>();

    // Determine all the filter nodes used in the pattern
    EvalNodeAnalysisResult evalNodeAnalysisResult = EvalNodeUtil.recursiveAnalyzeChildNodes(relativeNode);

    // collect all filters underneath
    for (EvalFilterFactoryNode filterNode : evalNodeAnalysisResult.getFilterNodes()) {
        String optionalTag = filterNode.getEventAsName();
        if (optionalTag != null) {
            taggedEventTypes.put(optionalTag,
                    new Pair<EventType, String>(filterNode.getFilterSpec().getFilterForEventType(),
                            filterNode.getFilterSpec().getFilterForEventTypeName()));
        }//from   ww w  .j  av a  2  s .c o m
    }

    // collect those filters under a repeat since they are arrays
    Set<String> arrayTags = new HashSet<String>();
    for (EvalMatchUntilFactoryNode matchUntilNode : evalNodeAnalysisResult.getRepeatNodes()) {
        EvalNodeAnalysisResult matchUntilAnalysisResult = EvalNodeUtil
                .recursiveAnalyzeChildNodes(matchUntilNode.getChildNodes().get(0));
        for (EvalFilterFactoryNode filterNode : matchUntilAnalysisResult.getFilterNodes()) {
            String optionalTag = filterNode.getEventAsName();
            if (optionalTag != null) {
                arrayTags.add(optionalTag);
            }
        }
    }

    // for each array tag change collection
    for (String arrayTag : arrayTags) {
        if (taggedEventTypes.get(arrayTag) != null) {
            arrayEventTypes.put(arrayTag, taggedEventTypes.get(arrayTag));
            taggedEventTypes.remove(arrayTag);
        }
    }

    return new MatchEventSpec(taggedEventTypes, arrayEventTypes);
}

From source file:ubic.gemma.core.datastructure.matrix.ExpressionDataMatrixColumnSort.java

/**
 * Divide the biomaterials up into chunks based on the experimental factor given, keeping everybody in order. If the
 * factor is continuous, there is just one chunk.
 *
 * @return ordered map of fv->bm where fv is of ef, or null if it couldn't be done properly.
 *//*from ww  w.  j av  a2  s  .co m*/
private static LinkedHashMap<FactorValue, List<BioMaterial>> chunkOnFactor(ExperimentalFactor ef,
        List<BioMaterial> bms) {

    if (bms == null) {
        return null;
    }

    LinkedHashMap<FactorValue, List<BioMaterial>> chunks = new LinkedHashMap<>();

    /*
     * Get the factor values in the order we have things right now
     */
    for (BioMaterial bm : bms) {
        for (FactorValue fv : bm.getFactorValues()) {
            if (!ef.getFactorValues().contains(fv)) {
                continue;
            }
            if (chunks.keySet().contains(fv)) {
                continue;
            }
            chunks.put(fv, new ArrayList<BioMaterial>());
        }
    }

    /*
     * What if bm doesn't have a value for the factorvalue. Need a dummy value.
     */
    FactorValue dummy = FactorValue.Factory.newInstance(ef);
    dummy.setValue("");
    dummy.setId(-1L);
    chunks.put(dummy, new ArrayList<BioMaterial>());

    for (BioMaterial bm : bms) {
        boolean found = false;
        for (FactorValue fv : bm.getFactorValues()) {
            if (ef.getFactorValues().contains(fv)) {
                found = true;
                assert chunks.containsKey(fv);
                chunks.get(fv).add(bm);
            }
        }

        if (!found) {
            if (ExpressionDataMatrixColumnSort.log.isDebugEnabled())
                ExpressionDataMatrixColumnSort.log
                        .debug(bm + " has no value for factor=" + ef + "; using dummy value");
            chunks.get(dummy).add(bm);
        }

    }

    if (chunks.get(dummy).size() == 0) {
        if (ExpressionDataMatrixColumnSort.log.isDebugEnabled())
            ExpressionDataMatrixColumnSort.log.debug("removing dummy");
        chunks.remove(dummy);
    }

    ExpressionDataMatrixColumnSort.log
            .debug(chunks.size() + " chunks for " + ef + ", from current chunk of size " + bms.size());

    /*
     * Sanity check
     */
    int total = 0;
    for (FactorValue fv : chunks.keySet()) {
        List<BioMaterial> chunk = chunks.get(fv);
        total += chunk.size();
    }

    assert total == bms.size() : "expected " + bms.size() + ", got " + total;

    return chunks;
}

From source file:org.egov.collection.integration.pgi.AxisAdaptor.java

/**
 * This method parses the given response string into a AXIS payment response object.
 *
 * @param a <code>String</code> representation of the response.
 * @return an instance of <code></code> containing the response information
 *//*from  w  ww  .  j  a va 2 s.co  m*/
@Override
public PaymentResponse parsePaymentResponse(final String response) {
    LOGGER.info("Response message from Axis Payment gateway: " + response);
    final String[] keyValueStr = response.replace("{", "").replace("}", "").split(",");
    final LinkedHashMap<String, String> fields = new LinkedHashMap<>(0);

    for (final String pair : keyValueStr) {
        final String[] entry = pair.split("=");
        if (entry.length == 2)
            fields.put(entry[0].trim(), entry[1].trim());
    }
    /*
     * If there has been a merchant secret set then sort and loop through all the data in the Virtual Payment Client response.
     * while we have the data, we can append all the fields that contain values (except the secure hash) so that we can create
     * a hash and validate it against the secure hash in the Virtual Payment Client response. NOTE: If the vpc_TxnResponseCode
     * in not a single character then there was a Virtual Payment Client error and we cannot accurately validate the incoming
     * data from the secure hash.
     */

    // remove the vpc_TxnResponseCode code from the response fields as we do
    // not
    // want to include this field in the hash calculation
    final String vpcTxnSecureHash = null2unknown(fields.remove(CollectionConstants.AXIS_SECURE_HASH));
    // defines if error message should be output
    final String axisSecureSecret = collectionApplicationProperties.axisSecureSecret();
    if (axisSecureSecret != null && (fields.get(CollectionConstants.AXIS_TXN_RESPONSE_CODE) != null
            || NO_VALUE_RETURNED.equals(fields.get(CollectionConstants.AXIS_TXN_RESPONSE_CODE)))) {

        // create secure hash and append it to the hash map if it was
        // created
        // remember if SECURE_SECRET = "" it wil not be created
        final String secureHash = hashAllFields(fields);

        // Validate the Secure Hash (remember MD5 hashes are not case
        // sensitive)
        if (!vpcTxnSecureHash.equalsIgnoreCase(secureHash)) {
            // Secure Hash validation failed, add a data field to be
            // displayed later.
            // throw new ApplicationRuntimeException("Axis Bank Payment
            // Secure Hash validation failed");
        }
    }
    return preparePaymentResponse(fields);
}

From source file:pt.lsts.neptus.util.logdownload.LogsDownloaderWorkerActions.java

private void orderAndFilterOutTheActiveLog(LinkedHashMap<FTPFile, String> retList) {
    if (retList.size() > 0) {
        String[] ordList = retList.values().toArray(new String[retList.size()]);
        Arrays.sort(ordList);/*www  .j a  v  a  2 s. co  m*/
        String activeLogName = ordList[ordList.length - 1];
        for (FTPFile fFile : retList.keySet().toArray(new FTPFile[retList.size()])) {
            if (retList.get(fFile).equals(activeLogName)) {
                retList.remove(fFile);
                break;
            }
        }
    }
}

From source file:nl.systemsgenetics.eqtlannotation.EncodeMultipleTfbsOverlap.java

private static LinkedHashMap<String, HashMap<String, ArrayList<EncodeNarrowPeak>>> readMultipleTfbsInformation(
        String inputFolderTfbsData) throws IOException {
    LinkedHashMap<String, HashMap<String, ArrayList<EncodeNarrowPeak>>> data = new LinkedHashMap<>();
    File file = new File(inputFolderTfbsData);
    File[] files = file.listFiles();
    ArrayList<String> vecFiles = new ArrayList<>();
    for (File f : files) {
        //            System.out.println(f.getAbsolutePath());
        vecFiles.add(f.getAbsolutePath());
    }//from w ww  . jav a  2  s.c om

    for (String fileToRead : vecFiles) {
        TextFile reader = new TextFile(fileToRead, TextFile.R);

        String[] storingInformation = fileToRead.split("_");
        //            String cellLine = storingInformation[1].replace("TFBS\\","");
        String transcriptionFactor = storingInformation[2].replace(".narrowPeak", "");
        if (storingInformation.length > 4) {
            for (int i = 3; i < (storingInformation.length - 1); ++i) {
                transcriptionFactor = transcriptionFactor + "_"
                        + storingInformation[i].replace(".narrowPeak", "");
            }
        }

        String row;
        while ((row = reader.readLine()) != null) {

            String[] parts = StringUtils.split(row, '\t');
            if (!data.containsKey(transcriptionFactor)) {
                data.put(transcriptionFactor, new HashMap<String, ArrayList<EncodeNarrowPeak>>());
            }
            if (!data.get(transcriptionFactor).containsKey(parts[0])) {
                data.get(transcriptionFactor).put(parts[0], new ArrayList<EncodeNarrowPeak>());
            }
            data.get(transcriptionFactor).get(parts[0]).add(new EncodeNarrowPeak(parts, fileToRead));
        }

        reader.close();

    }
    ArrayList<String> cleanList = new ArrayList<>();
    for (Entry<String, HashMap<String, ArrayList<EncodeNarrowPeak>>> tfInformation : data.entrySet()) {
        System.out.println("Transcription factor: " + tfInformation.getKey());
        int counter = 0;
        for (Entry<String, ArrayList<EncodeNarrowPeak>> tfEntry : tfInformation.getValue().entrySet()) {
            Collections.sort(tfEntry.getValue());
            counter += tfEntry.getValue().size();
        }
        System.out.println("\tcontacts: " + counter);

        //remove all with less than 750 contacts
        //            if(counter<750){
        //                cleanList.add(tfInformation.getKey());
        //            }
    }

    for (String k : cleanList) {
        data.remove(k);
    }

    return data;
}