List of usage examples for java.util HashMap containsKey
public boolean containsKey(Object key)
From source file:dk.statsbiblioteket.doms.licensemodule.validation.LicenseValidator.java
public static ArrayList<ConfiguredDomLicenseGroupType> buildGroups(ArrayList<String> groups) { ArrayList<ConfiguredDomLicenseGroupType> filteredGroups = new ArrayList<ConfiguredDomLicenseGroupType>(); ArrayList<ConfiguredDomLicenseGroupType> configuredGroups = LicenseCache .getConfiguredDomLicenseGroupTypes(); HashMap<String, ConfiguredDomLicenseGroupType> configuredGroupsNamesMap = new HashMap<String, ConfiguredDomLicenseGroupType>(); for (ConfiguredDomLicenseGroupType current : configuredGroups) { configuredGroupsNamesMap.put(current.getKey(), current); }/*from www .ja va 2s. c om*/ for (String currentGroup : groups) { if (configuredGroupsNamesMap.containsKey(currentGroup)) { filteredGroups.add(configuredGroupsNamesMap.get(currentGroup)); } else { log.error("Group not found in Group configuration:" + currentGroup); throw new IllegalArgumentException("Unknown group:" + currentGroup); } } return filteredGroups; }
From source file:net.sf.jabref.exporter.FileActions.java
/** * Write all strings in alphabetical order, modified to produce a safe (for * BibTeX) order of the strings if they reference each other. * * @param fw The Writer to send the output to. * @param database The database whose strings we should write. * @throws IOException If anthing goes wrong in writing. *///www . j a v a 2 s . c o m private static void writeStrings(Writer fw, BibDatabase database) throws IOException { FileActions.previousStringType = BibtexString.Type.AUTHOR; List<BibtexString> strings = new ArrayList<>(); for (String s : database.getStringKeySet()) { strings.add(database.getString(s)); } Collections.sort(strings, new BibtexStringComparator(true)); // First, make a Map of all entries: HashMap<String, BibtexString> remaining = new HashMap<>(); int maxKeyLength = 0; for (BibtexString string : strings) { remaining.put(string.getName(), string); maxKeyLength = Math.max(maxKeyLength, string.getName().length()); } for (BibtexString.Type t : BibtexString.Type.values()) { for (BibtexString bs : strings) { if (remaining.containsKey(bs.getName()) && (bs.getType() == t)) { FileActions.writeString(fw, bs, remaining, maxKeyLength); } } } }
From source file:com.ibm.bi.dml.runtime.controlprogram.parfor.RemoteDPParForMR.java
/** * Result file contains hierarchy of workerID-resultvar(incl filename). We deduplicate * on the workerID. Without JVM reuse each task refers to a unique workerID, so we * will not find any duplicates. With JVM reuse, however, each slot refers to a workerID, * and there are duplicate filenames due to partial aggregation and overwrite of fname * (the RemoteParWorkerMapper ensures uniqueness of those files independent of the * runtime implementation). /*from w w w. ja v a 2 s .c o m*/ * * @param job * @param fname * @return * @throws DMLRuntimeException */ @SuppressWarnings("deprecation") public static LocalVariableMap[] readResultFile(JobConf job, String fname) throws DMLRuntimeException, IOException { HashMap<Long, LocalVariableMap> tmp = new HashMap<Long, LocalVariableMap>(); FileSystem fs = FileSystem.get(job); Path path = new Path(fname); LongWritable key = new LongWritable(); //workerID Text value = new Text(); //serialized var header (incl filename) int countAll = 0; for (Path lpath : MatrixReader.getSequenceFilePaths(fs, path)) { SequenceFile.Reader reader = new SequenceFile.Reader(FileSystem.get(job), lpath, job); try { while (reader.next(key, value)) { //System.out.println("key="+key.get()+", value="+value.toString()); if (!tmp.containsKey(key.get())) tmp.put(key.get(), new LocalVariableMap()); Object[] dat = ProgramConverter.parseDataObject(value.toString()); tmp.get(key.get()).put((String) dat[0], (Data) dat[1]); countAll++; } } finally { if (reader != null) reader.close(); } } LOG.debug("Num remote worker results (before deduplication): " + countAll); LOG.debug("Num remote worker results: " + tmp.size()); //create return array return tmp.values().toArray(new LocalVariableMap[0]); }
From source file:frequencyanalysis.FrequencyAnalysis.java
public static Map getQuadgramDataFromWarAndPeace(String warAndPeaceString) { HashMap QuadGramCount = new HashMap<String, Integer>(); for (int i = 0; i < warAndPeaceString.length(); i++) { if (i + 3 < warAndPeaceString.length()) { String quadGram = String.valueOf(warAndPeaceString.charAt(i)) + String.valueOf(warAndPeaceString.charAt(i + 1)) + String.valueOf(warAndPeaceString.charAt(i + 2)) + String.valueOf(warAndPeaceString.charAt(i + 3)); if (!QuadGramCount.containsKey(quadGram)) { QuadGramCount.put(quadGram, 1); } else { int tempCount = (int) QuadGramCount.get(quadGram); tempCount++;//from w w w .j a v a 2 s .c om QuadGramCount.put(quadGram, tempCount); } } } return QuadGramCount; }
From source file:com.vmware.qe.framework.datadriven.utils.XMLUtil.java
/** * Finds all the children node whose tag names are in tags ArrayList. For example to get <one> * and <two> in Vector of Node from the following XML node : <parent> <one> something </one> * <two> something </two> <three> something </three> </parent> you pass <parent> node along with * this list {"one","two"}/* w ww .java 2 s. c o m*/ * * @param parentNode : parentNode to lookup into for tags * @param tags : list of tags to be found among parentNode children * @return map of <NodeName, Node> that is found based on tags given in tags arrayList */ public static HashMap<String, Node> getChildrenByTagNames(Node parentNode, ArrayList<String> tags) { HashMap<String, Node> resultList = new HashMap<String, Node>(); String tagFromList = null; Node foundNode = null; if (tags.size() > 0 || parentNode != null) { for (int tagNo = 0; tagNo < tags.size(); tagNo++) { tagFromList = tags.get(tagNo).toString(); foundNode = XMLUtil.getInnerNodeByTagName(parentNode, tagFromList); if (!resultList.containsKey(tagFromList)) { resultList.put(tagFromList, foundNode); } } } return resultList.size() == 0 ? null : resultList; }
From source file:ezbake.deployer.utilities.ArtifactHelpers.java
/** * Append to the given ArchiveInputStream writing to the given outputstream, the given entries to add. * This will duplicate the InputStream to the Output. * * @param inputStream - archive input to append to * @param output - what to copy the modified archive to * @param filesToAdd - what entries to append. *///from ww w . ja v a 2s . c o m private static void appendFilesInTarArchive(ArchiveInputStream inputStream, OutputStream output, Iterable<ArtifactDataEntry> filesToAdd) throws DeploymentException { ArchiveStreamFactory asf = new ArchiveStreamFactory(); try { HashMap<String, ArtifactDataEntry> newFiles = new HashMap<>(); for (ArtifactDataEntry entry : filesToAdd) { newFiles.put(entry.getEntry().getName(), entry); } GZIPOutputStream gzs = new GZIPOutputStream(output); TarArchiveOutputStream aos = (TarArchiveOutputStream) asf .createArchiveOutputStream(ArchiveStreamFactory.TAR, gzs); aos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); // copy the existing entries ArchiveEntry nextEntry; while ((nextEntry = inputStream.getNextEntry()) != null) { //If we're passing in the same file, don't copy into the new archive if (!newFiles.containsKey(nextEntry.getName())) { aos.putArchiveEntry(nextEntry); IOUtils.copy(inputStream, aos); aos.closeArchiveEntry(); } } for (ArtifactDataEntry entry : filesToAdd) { aos.putArchiveEntry(entry.getEntry()); IOUtils.write(entry.getData(), aos); aos.closeArchiveEntry(); } aos.finish(); gzs.finish(); } catch (ArchiveException | IOException e) { log.error(e.getMessage(), e); throw new DeploymentException(e.getMessage()); } }
From source file:de.tudarmstadt.ukp.dkpro.tc.mallet.util.MalletUtils.java
public static HashMap<String, Integer> getFeatureOffsetIndex(FeatureStore instanceList) { HashMap<String, Integer> featureOffsetIndex = new HashMap<String, Integer>(); for (int i = 0; i < instanceList.getNumberOfInstances(); i++) { Instance instance = instanceList.getInstance(i); for (Feature feature : instance.getFeatures()) { String featureName = feature.getName(); if (!featureOffsetIndex.containsKey(featureName)) { featureOffsetIndex.put(featureName, featureOffsetIndex.size()); }// w w w. j a v a 2s .com } } return featureOffsetIndex; }
From source file:de.tudarmstadt.ukp.dkpro.tc.mallet.util.MalletUtils.java
public static void writeFeatureNamesToFile(FeatureStore instanceList, File outputFile) throws IOException { BufferedWriter bw = new BufferedWriter( new OutputStreamWriter(new GZIPOutputStream(new FileOutputStream(outputFile)), "UTF-8")); HashMap<String, Integer> featureOffsetIndex = new HashMap<String, Integer>(); for (int i = 0; i < instanceList.getNumberOfInstances(); i++) { Instance instance = instanceList.getInstance(i); for (Feature feature : instance.getFeatures()) { String featureName = feature.getName(); if (!featureOffsetIndex.containsKey(featureName)) { featureOffsetIndex.put(featureName, featureOffsetIndex.size()); bw.write(featureName + " "); }/*from w w w .java 2s. c om*/ } } bw.write(MalletTestTask.OUTCOME_CLASS_LABEL_NAME); bw.close(); }
From source file:com.vmware.identity.idm.server.LocalOsIdentityProviderTest.java
private static void validateGroupsSubset(Set<GroupInfo> expectedSubset, Set<Group> actualSet, String domainName, String domainAlias) {//from w ww.j a v a 2 s . c o m HashMap<String, Group> superSet = new HashMap<String, Group>(); if (actualSet != null) { for (Group g : actualSet) { Assert.assertNotNull(g); superSet.put(g.getName(), g); } } if (expectedSubset != null) { for (GroupInfo gd : expectedSubset) { Assert.assertTrue(superSet.containsKey(gd.getName())); Group g = superSet.get(gd.getName()); assertEqualsString(gd.getName(), g.getName()); if (providerHasAlias(domainAlias)) { Assert.assertNotNull(g.getAlias()); assertEqualsString(gd.getName(), g.getAlias().getName()); assertEqualsString(domainAlias, g.getAlias().getDomain()); } assertEqualsString(domainName, g.getDomain()); Assert.assertNotNull(g.getDetail()); // assertEqualsString( gd.getDescription(), g.getDetail().getDescription() ); } } }
From source file:marytts.server.http.MivoqSynthesisRequestHandler.java
private static void parseEffectsIntoHashMap(HashMap<String, ParamParser> registry, HashMap<String, Object> effects_values, JSONObject effects) { for (String name : JSONObject.getNames(effects)) { // System.out.println("----------"); // System.out.println(name); ParamParser parser = registry.get(name); if (parser != null) { Object param = parser.parse(effects.get(name)); // System.out.println(param); if (effects_values.containsKey(name)) { Object o = effects_values.get(name); param = parser.merge(o, param); }/*from ww w . j a v a 2s . c om*/ // System.out.println(param); effects_values.put(name, param); } } }