List of usage examples for java.util LinkedHashMap keySet
public Set<K> keySet()
From source file:org.i3xx.step.uno.impl.service.builtin.ContextAdministrationService.java
/** * Reads the key value pairs from a JSON String, clears the existing map * and puts the pairs into the map./*from w ww . j a va 2 s. co m*/ * * @param json * @throws Exception */ public void fromJSON(String json) throws Exception { Map<String, Object> values = context.getValues(); values.clear(); GsonBuilder gsonBuilder = new GsonBuilder(); gsonBuilder.registerTypeAdapter(LinkedHashMap.class, new GsonHashMapDeserializer()); Gson gson = gsonBuilder.create(); final Callable rev = new Callable() { public Object call(Context cx, Scriptable scope, Scriptable thisObj, Object[] args) { // // sh, 12.12.2014 // not really understood what happens, but // @see http://stackoverflow.com/questions/10856154/rhino-return-json-from-within-java // tested some toJSON/fromJSON sequences with success. // return args[1]; } }; Context jscx = Context.getCurrentContext(); boolean jsf = jscx != null; if (!jsf) jscx = Context.enter(); LinkedHashMap<?, ?> map = gson.fromJson(json, LinkedHashMap.class); Iterator<?> iter = map.keySet().iterator(); while (iter.hasNext()) { String key = (String) iter.next(); Object val = map.get(key); if (val == null) { //values.put(key, null); } else if (val instanceof Number) { values.put(key, val); } else if (val instanceof String) { values.put(key, val); } else if (val instanceof Map<?, ?>) { LinkedHashMap<?, ?> mo2 = (LinkedHashMap<?, ?>) val; if (mo2.containsKey("Object")) { String stmt = (String) mo2.get("Object"); if (stmt != null) { Object obj = writeValue(Base64.decodeBase64(stmt)); if (obj != null) { values.put(key, obj); } //fi } //fi } else if (mo2.containsKey("Scriptable")) { String stmt = (String) mo2.get("Scriptable"); if (stmt != null) { Scriptable scope = context.getScope(); Object obj = NativeJSON.parse(jscx, scope, stmt, rev); if (obj != null) { values.put(key, obj); } //fi } //fi } else { continue; } } //fi } //while if (!jsf) Context.exit(); }
From source file:org.openmeetings.cli.OmHelpFormatter.java
@Override protected StringBuffer renderOptions(StringBuffer sb, int width, Options options, int leftPad, int descPad) { final String dpad = createPadding(descPad); final String optional = "(optional) "; LinkedHashMap<String, List<OmOption>> optList = getOptions(options, leftPad); char[] delimiter = new char[width - 2]; Arrays.fill(delimiter, '-'); for (String key : optList.keySet()) { if (GENERAL_OPTION_GROUP.equals(key)) { sb.append("General options:").append(getNewLine()); }/*ww w. j a v a 2 s . c om*/ for (OmOption option : optList.get(key)) { StringBuilder optBuf = new StringBuilder(option.getHelpPrefix()); if (optBuf.length() < maxPrefixLength) { optBuf.append(createPadding(maxPrefixLength - optBuf.length())); } optBuf.append(dpad); int nextLineTabStop = maxPrefixLength + descPad; if (option.isOptional(key)) { optBuf.append(optional); } if (option.getDescription() != null) { optBuf.append(option.getDescription()); } renderWrappedText(sb, width, nextLineTabStop, optBuf.toString()); sb.append(getNewLine()); } sb.append(delimiter).append(getNewLine()); } return sb; }
From source file:org.neo4j.nlp.impl.util.VectorUtil.java
public static Map<String, Object> getCosineSimilarityVector(GraphDatabaseService db) { Map<String, List<LinkedHashMap<String, Object>>> documents = getFeaturesForAllClasses(db); Map<String, List<LinkedHashMap<String, Object>>> results = new HashMap<>(); List<Integer> featureIndexList = getFeatureIndexList(db); List<String> documentList = documents.keySet().stream().collect(Collectors.toList()); Collections.sort(documentList, (a, b) -> a.compareToIgnoreCase(b)); for (String key : documentList) { List<LinkedHashMap<String, Object>> resultList = new ArrayList<>(); LinkedHashMap<String, Double> classMap = new LinkedHashMap<>(); List<Double> v1 = featureIndexList.stream() .map(i -> documents.get(key).contains(i) ? featureIndexList.indexOf(i) : 0.0) .collect(Collectors.toList()); documents.keySet().stream().forEach(otherKey -> { List<Double> v2 = featureIndexList.stream() .map(i -> documents.get(otherKey).contains(i) ? featureIndexList.indexOf(i) : 0.0) .collect(Collectors.toList()); classMap.put(otherKey, cosineSimilarity(v1, v2)); });//from www . j ava2 s . c om final List<LinkedHashMap<String, Object>> finalResultList = resultList; classMap.keySet().forEach(ks -> { LinkedHashMap<String, Object> localMap = new LinkedHashMap<>(); localMap.put("class", ks); localMap.put("similarity", classMap.get(ks)); finalResultList.add(localMap); }); Collections.sort(finalResultList, (a, b) -> ((String) a.get("class")).compareToIgnoreCase((String) b.get("class"))); results.put(key, finalResultList); } List<LinkedHashMap<String, Object>> similarityVector = new ArrayList<>(); for (String key : results.keySet()) { List<Double> cosineVector; cosineVector = results.get(key).stream() .map(a -> Convert.toDouble(Math.round(100000 * (Double) a.get("similarity")))) .collect(Collectors.toList()); LinkedHashMap<String, Object> row = new LinkedHashMap<>(); row.put("class", key); row.put("vector", cosineVector); similarityVector.add(row); } Collections.sort(similarityVector, (a, b) -> ((String) a.get("class")).compareToIgnoreCase((String) b.get("class"))); Map<String, Object> vectorMap = new LinkedHashMap<>(); List<ArrayList<Double>> vectors = new ArrayList<>(); List<String> classNames = new ArrayList<>(); for (LinkedHashMap<String, Object> val : similarityVector) { vectors.add((ArrayList<Double>) val.get("vector")); classNames.add((String) val.get("class")); } vectorMap.put("classes", classNames); vectorMap.put("vectors", vectors); return vectorMap; }
From source file:com.grarak.kerneladiutor.database.tools.profiles.Profiles.java
public void putProfile(String name, LinkedHashMap<String, String> commands) { try {/*from www . j ava 2 s. com*/ JSONObject items = new JSONObject(); items.put("name", name); ProfileItem profileItem = new ProfileItem(items); for (String id : commands.keySet()) { profileItem.putCommand(new ProfileItem.CommandItem(id, commands.get(id))); } putItem(items); } catch (JSONException e) { e.printStackTrace(); } }
From source file:edu.jhuapl.openessence.web.util.MapQueryUtil.java
public int performUpdate(OeDataEntrySource mapLayerDataEntrySource, LinkedHashMap<String, Object> updateMap, Collection<String> placeholders) { JdbcTemplate pgdb = new JdbcTemplate(mapDataSource); StringBuilder sb = new StringBuilder(); sb.append("insert into ").append(mapLayerDataEntrySource.getTableName()); sb.append(" ("); sb.append(StringUtils.join(updateMap.keySet(), ", ")); sb.append(") values ("); sb.append(StringUtils.join(placeholders, ", ")); sb.append(")"); String sql = sb.toString();/*from www . j a va 2 s . c o m*/ log.debug(sql); return pgdb.update(sql, updateMap.values().toArray()); }
From source file:org.envirocar.wps.DataTransformProcess.java
private Set<String> gatherPropertiesForFeatureTypeBuilder(ArrayList<?> features) { Set<String> distinctPhenomenonNames = new HashSet<String>(); for (Object object : features) { if (object instanceof LinkedHashMap<?, ?>) { LinkedHashMap<?, ?> featureMap = (LinkedHashMap<?, ?>) object; Object propertiesObject = featureMap.get("properties"); if (propertiesObject instanceof LinkedHashMap<?, ?>) { LinkedHashMap<?, ?> propertiesMap = (LinkedHashMap<?, ?>) propertiesObject; Object phenomenonsObject = propertiesMap.get("phenomenons"); if (phenomenonsObject instanceof LinkedHashMap<?, ?>) { LinkedHashMap<?, ?> phenomenonsMap = (LinkedHashMap<?, ?>) phenomenonsObject; for (Object phenomenonKey : phenomenonsMap.keySet()) { Object phenomenonValue = phenomenonsMap.get(phenomenonKey); if (phenomenonValue instanceof LinkedHashMap<?, ?>) { LinkedHashMap<?, ?> phenomenonValueMap = (LinkedHashMap<?, ?>) phenomenonValue; String unit = phenomenonValueMap.get("unit").toString(); distinctPhenomenonNames.add(phenomenonKey.toString() + " (" + unit + ")"); }// www .ja v a 2s . co m } } } } } return distinctPhenomenonNames; }
From source file:org.lambda3.indra.core.lucene.LuceneVectorSpace.java
@Override public Map<String, RealVector> getNearestVectors(AnalyzedTerm term, int topk, Filter filter) { TermQuery query = new TermQuery(new Term(term.getFirstToken())); try {//from w w w . ja va 2s .c o m TopDocs topDocs = termsSearcher.search(query, 1); if (topDocs.totalHits == 1) { Document doc = termsSearcher.doc(topDocs.scoreDocs[0].doc); String[] indexes = doc.getValues(INDEXES_FIELD); RealVector termVector = BinaryCodecs.unmarshall(doc.getBinaryValue(VECTOR_FIELD).bytes, true, -1); Map<String, RealVector> candidates = collectVectors(Arrays.asList(indexes), INDEXES_FIELD); Map<String, Double> results = new ConcurrentHashMap<>(); candidates.entrySet().stream().parallel().forEach(e -> { double score = func.sim(termVector, e.getValue(), true); results.put(e.getKey(), score); }); LinkedHashMap<String, Double> sortedResults = MapUtils.entriesSortedByValues(results); Map<String, RealVector> nearest = new LinkedHashMap<>(); int count = 0; for (String key : sortedResults.keySet()) { Map<Integer, Double> mapVector = RealVectorUtil.vectorToMap(candidates.get(key)); int size = (int) getMetadata().dimensions; OpenMapRealVector vector = new OpenMapRealVector(size); for (int index : mapVector.keySet()) { vector.setEntry(index, mapVector.get(index)); } nearest.put(key, vector); } return nearest; } } catch (IOException e) { logger.error(e.getMessage()); //TODO throw new expection here. } return null; }
From source file:org.skb.lang.dal.DalPass2_Ast.java
public void testAtom(Token tk) { this.atoms.scope.push(tk); String atomScope = this.atoms.scope.toString(); String scopeSep = this.atoms.scope.separator(); LinkedHashMap<String, String> path = this.buildPathList(atomScope, scopeSep); ArrayList<String> keys = new ArrayList<String>(path.keySet()); int pathSize = StringUtils.split(atomScope, scopeSep).length; if (path.size() == 0) { System.err.println("error: ID not known [" + tk.getText() + "]"); return;// www .jav a 2 s . c om } //first can be Repository or Package if (path.get(keys.get(0)).equals(DalConstants.Tokens.dalREPOSITORY)) { //in Repository, we have only tables, in there lots of fields (nothing to test) and optionally a sequence if (path.get(keys.get(2)).equals(DalConstants.Tokens.dalSEQUENCE)) { //in sequence we have many fields (level 4) if (path.get(keys.get(3)).equals(DalConstants.Tokens.dalFIELD)) { //now remove "sequence@@" and test if Atom exists String t = atomScope.replace("sequence" + scopeSep, ""); if (!this.atoms.containsKey(t)) System.err.println("erorr in repository: field in sequence not defined for table"); } } } else if (path.get(keys.get(0)).equals(DalConstants.Tokens.dalPACKAGE)) { //first check for definitions for a repository table if (path.get(keys.get(1)).equals(DalConstants.Tokens.dalREPOSITORY) && path.get(keys.get(2)).equals(DalConstants.Tokens.dalTABLE)) { //remove the first path entry (current package) and test for the repository, print error only for the actual repo Atom if (pathSize == 3 && !this.atoms.containsKey(keys.get(1).substring(keys.get(1).indexOf(scopeSep) + 2))) { System.err.println("unknown repository referenced in package"); } //remove the first path entry (current package) and test for the repository table, print error only for the actual repo-table Atom if (pathSize == 4 && !this.atoms.containsKey(keys.get(2).substring(keys.get(2).indexOf(scopeSep) + 2))) { System.err.println("unknown repository-table referenced in package"); } //check for referenced field in table for repo, error if field is not defined in repo-table if (pathSize == 5) { String[] split = StringUtils.split(atomScope, scopeSep); String field = StringUtils.join(new String[] { split[1], split[2], split[4] }, scopeSep); if (!this.atoms.containsKey(field)) System.err.println("unknown field for repository-table referenced in package"); } } //next check if we are defining a package table if (path.get(keys.get(1)).equals(DalConstants.Tokens.dalTABLE)) { //in a table, we have lots of fields and optionally a sequence (s=3), but we can only check on the sequence at the end if (path.get(keys.get(2)).equals(DalConstants.Tokens.dalFIELD)) { //System.err.println(pathSize+" = table field = "+atomScope); } if (path.get(keys.get(2)).equals(DalConstants.Tokens.dalSEQUENCE)) { //in sequence we only care about size of 4 if (pathSize == 4) { String t = atomScope.replace("sequence" + scopeSep, ""); if (!this.atoms.containsKey(t)) System.err.println("erorr in repository: field in sequence not defined for table"); } } } //next check if we are adding actions to the package if (path.get(keys.get(1)).equals(DalConstants.Tokens.dalACTIONS)) { //check for the referenced table, size=4 if (pathSize == 4) { String[] split = StringUtils.split(atomScope, scopeSep); String field = StringUtils.join(new String[] { split[0], split[3] }, scopeSep); if (!this.atoms.containsKey(field)) System.err.println("unknown table referenced in action for package"); } //check for the individual fields of the actions, if keys exist in the named table if (pathSize == 5) { String[] split = StringUtils.split(atomScope, scopeSep); String field = StringUtils.join(new String[] { split[0], split[3], split[4] }, scopeSep); if (!this.atoms.containsKey(field)) System.err.println("unknown key for table referenced in action for package"); } } //last check if we are adding data to the package if (path.get(keys.get(1)).equals(DalConstants.Tokens.dalDATA)) { //first check if referenced table exists in the package if (pathSize == 4) { String[] split = StringUtils.split(atomScope, scopeSep); String field = StringUtils.join(new String[] { split[0], split[3] }, scopeSep); if (!this.atoms.containsKey(field)) System.err.println("unknown table referenced in data for package"); } if (pathSize == 5) { String[] split = StringUtils.split(atomScope, scopeSep); String field = StringUtils.join(new String[] { split[0], split[3], split[4] }, scopeSep); if (!this.atoms.containsKey(field)) System.err.println("unknown key for table referenced in data for package"); } } } }
From source file:org.netflux.core.RecordMetadata.java
/** * Retains all the field metadata with names included in the suppled collection. In other words, removes from this metadata all the * field metadata with names not included in the supplied collection. * //from w w w. j a v a 2 s . co m * @param fieldNames the names of the field metadata to keep. * @throws NullPointerException if the specified collection is <code>null</code>. */ public void retain(Collection<String> fieldNames) { LinkedHashMap<String, Integer> fieldsToRemove = (LinkedHashMap<String, Integer>) this.fieldIndexes.clone(); fieldsToRemove.keySet().removeAll(fieldNames); this.remove(fieldsToRemove.keySet()); }
From source file:org.broad.igv.track.CombinedFeatureSource.java
/** * Perform the actual combination operation between the constituent data * sources. This implementation re-runs the operation each call. * * @param chr/*from w w w . j a v a 2 s .co m*/ * @param start * @param end * @return * @throws IOException */ @Override public Iterator<Feature> getFeatures(String chr, int start, int end) throws IOException { String cmd = Globals.BEDtoolsPath + " " + this.operation.getCmd(); LinkedHashMap<String, Integer> tempFiles = createTempFiles(chr, start, end); String[] fiNames = tempFiles.keySet().toArray(new String[0]); if (operation == Operation.MULTIINTER) { assert tempFiles.size() >= 2; cmd += " -i " + StringUtils.join(tempFiles.keySet(), " "); } else { assert tempFiles.size() == 2; cmd += " -a " + fiNames[0] + " -b " + fiNames[1]; } //Start bedtools process Process pr = RuntimeUtils.startExternalProcess(cmd, null, null); //Read back in the data which bedtools output BufferedReader in = new BufferedReader(new InputStreamReader(pr.getInputStream())); BufferedReader err = new BufferedReader(new InputStreamReader(pr.getErrorStream())); List<Feature> featuresList = new ArrayList<Feature>(); IGVBEDCodec codec = new IGVBEDCodec(); String line; Feature feat; int numCols0 = tempFiles.get(fiNames[0]); int numCols1 = tempFiles.get(fiNames[1]); while ((line = in.readLine()) != null) { System.out.println(line); String[] tokens = line.split("\t"); if (operation.getCmd().contains("-split")) { //When we split, the returned feature still has the exons //We don't want to plot them all a zillion times tokens = Arrays.copyOfRange(tokens, 0, Math.min(6, tokens.length)); } if (operation == Operation.WINDOW || operation == Operation.CLOSEST) { String[] closest = Arrays.copyOfRange(tokens, numCols0, numCols0 + numCols1); //If not found, bedtools returns -1 for positions if (closest[1].trim().equalsIgnoreCase("-1")) { continue; } feat = codec.decode(closest); } else if (operation == Operation.MULTIINTER) { //We only look at regions common to ALL inputs //Columns: chr \t start \t \end \t # of files which contained this feature \t comma-separated list files +many more int numRegions = Integer.parseInt(tokens[3]); if (numRegions < sources.length) { continue; } String[] intersection = Arrays.copyOf(tokens, 3); feat = codec.decode(intersection); } else { feat = codec.decode(tokens); } featuresList.add(feat); } in.close(); while ((line = err.readLine()) != null) { log.error(line); } err.close(); return featuresList.iterator(); }