List of usage examples for java.util Map size
int size();
From source file:edu.cmu.cs.lti.ark.fn.parsing.DataPrep.java
public static void addFeature(String key, Map<String, Integer> freqmap) { if (!freqmap.containsKey(key)) { final int numFeatures = freqmap.size(); freqmap.put(key, numFeatures + 1); }/* w w w .j a v a2 s. c om*/ }
From source file:Main.java
/** * Sort a map by supplied comparator logic. * * @return new instance of {@link LinkedHashMap} contained sorted entries of supplied map. * @author Maxim Veksler//w w w . j a va 2 s .co m */ public static <K, V> LinkedHashMap<K, V> sortMap(final Map<K, V> map, final Comparator<Map.Entry<K, V>> comparator) { // Convert the map into a list of key,value pairs. List<Map.Entry<K, V>> mapEntries = new LinkedList<Map.Entry<K, V>>(map.entrySet()); // Sort the converted list according to supplied comparator. Collections.sort(mapEntries, comparator); // Build a new ordered map, containing the same entries as the old map. LinkedHashMap<K, V> result = new LinkedHashMap<K, V>(map.size() + (map.size() / 20)); for (Map.Entry<K, V> entry : mapEntries) { // We iterate on the mapEntries list which is sorted by the comparator putting new entries into // the targeted result which is a sorted map. result.put(entry.getKey(), entry.getValue()); } return result; }
From source file:com.datumbox.framework.common.dataobjects.MatrixDataframe.java
/** * Parses a testing dataset and converts it to MatrixDataframe by using an already existing mapping between feature names and column ids. Typically used * to parse the testing or validation dataset. * //from w ww. java2s. co m * @param newData * @param recordIdsReference * @param featureIdsReference * @return */ public static MatrixDataframe parseDataset(Dataframe newData, Map<Integer, Integer> recordIdsReference, Map<Object, Integer> featureIdsReference) { if (featureIdsReference.isEmpty()) { throw new IllegalArgumentException("The featureIdsReference map should not be empty."); } int n = newData.size(); int d = featureIdsReference.size(); MatrixDataframe m = new MatrixDataframe(new OpenMapRealMatrix(n, d), new ArrayRealVector(n)); if (newData.isEmpty()) { return m; } boolean extractY = (newData.getYDataType() == TypeInference.DataType.NUMERICAL); boolean addConstantColumn = featureIdsReference.containsKey(Dataframe.COLUMN_NAME_CONSTANT); int rowId = 0; for (Map.Entry<Integer, Record> e : newData.entries()) { Integer rId = e.getKey(); Record r = e.getValue(); if (recordIdsReference != null) { recordIdsReference.put(rId, rowId); } if (extractY) { m.Y.setEntry(rowId, TypeInference.toDouble(r.getY())); } if (addConstantColumn) { m.X.setEntry(rowId, 0, 1.0); //add the constant column } for (Map.Entry<Object, Object> entry : r.getX().entrySet()) { Object feature = entry.getKey(); Double value = TypeInference.toDouble(entry.getValue()); if (value != null) { Integer featureId = featureIdsReference.get(feature); if (featureId != null) {//if the feature exists in our database m.X.setEntry(rowId, featureId, value); } } //else the X matrix maintains the 0.0 default value } ++rowId; } return m; }
From source file:tradeok.HttpTool.java
public static String map2json(Map<?, ?> map) { StringBuilder json = new StringBuilder(); json.append("{"); if (map != null && map.size() > 0) { for (Object key : map.keySet()) { json.append(object2json(key)); json.append(":"); json.append(object2json(map.get(key))); json.append(","); }/* ww w . j a v a 2 s .c o m*/ json.setCharAt(json.length() - 1, '}'); } else { json.append("}"); } return json.toString(); }
From source file:de.thischwa.pmcms.tool.compression.Zip.java
/** * A wrapper to {@link #compress(File, Map, IProgressMonitor)}. * /* w ww.j a va2 s .c o m*/ * @param zip * @param entries * @param monitor Must be initialized by the caller. * @throws IOException */ public static void compressFiles(final File zip, final Map<File, String> entries, final IProgressMonitor monitor) throws IOException { if (zip == null || entries == null || CollectionUtils.isEmpty(entries.keySet())) throw new IllegalArgumentException("One ore more parameters are empty!"); Map<InputStream, String> newEntries = new HashMap<InputStream, String>(entries.size()); for (File file : entries.keySet()) { newEntries.put(new FileInputStream(file), entries.get(file)); } compress(zip, newEntries, monitor); }
From source file:de.tudarmstadt.ukp.dkpro.core.mallet.lda.MalletLdaTopicModelUtils.java
/** * Retrieve the top n topic words for each topic in the given model. * /*from www .ja v a 2s .c o m*/ * @param modelFile * the model file * @param nWords * the maximum number of words to retrieve * @param normalize * normalize the word weights ? * * @return a list of maps where each map represents a topic, mapping words to weights * @throws IOException * if the model cannot be read */ public static List<Map<String, Double>> getTopWords(File modelFile, int nWords, boolean normalize) throws IOException { LOGGER.info("Reading model file " + modelFile + "..."); ParallelTopicModel model; try { model = ParallelTopicModel.read(modelFile); } catch (Exception e) { throw new IOException(e); } Alphabet alphabet = model.getAlphabet(); List<Map<String, Double>> topics = new ArrayList<>(model.getNumTopics()); /* iterate over topics */ for (TreeSet<IDSorter> topic : model.getSortedWords()) { Map<String, Double> topicWords = new HashMap<>(nWords); /* iterate over word IDs in topic (sorted by weight) */ for (IDSorter id : topic) { double weight = normalize ? id.getWeight() / alphabet.size() : id.getWeight(); // normalize String word = (String) alphabet.lookupObject(id.getID()); topicWords.put(word, weight); if (topicWords.size() >= nWords) { break; // go to next topic } } topics.add(topicWords); } return topics; }
From source file:com.bluexml.side.util.libs.eclipse.pages.PageControlsHelper.java
public static void initializeCombo(final String id, Map<String, Object> allowedValues, final Map<String, Object> values, final Combo archetypeIdControl) { String[] items = allowedValues.keySet().toArray(new String[allowedValues.size()]); archetypeIdControl.setItems(items);//from w w w. j a va 2s . c o m Object string = values.get(id); if (string != null) { archetypeIdControl.select(ArrayUtils.indexOf(items, string)); } }
From source file:gemlite.core.internal.index.IndexHelper.java
public final static IIndexContext createIndex(String def) { IIndexContext context = null;//from w ww . j a v a 2s . c o m if (StringUtils.isEmpty(def)) return context; LogUtil.getCoreLog().trace("Create index use defition: {}", def); // ?index?IndexClass IDefLoader defLoader = new IndexDefLoader(); Map<String, byte[]> clazzMap = defLoader.parseDefs(def); String indexName = defLoader.getName(); LogUtil.getCoreLog().trace("Compile index success,indexName: {},clssSize={}", indexName, clazzMap.size()); // IIndexContext oldContext = IndexHelper.getIndexContext(indexName); // if (oldContext != null) // { // LogUtil.getCoreLog().error("Index Name: " + indexName + // " already exists."); // return null; // } if (clazzMap != null && clazzMap.size() > 0) { GemliteSibingsLoader parentLoader = GemliteContext.getCoreLoader(); if (parentLoader == null) { LogUtil.getCoreLog().error("No Core Loader found. indexName={}", indexName); return context; } GemliteSibingsLoader loader = new GemliteSibingsLoader(parentLoader); loader.addDynamicClasses(clazzMap); GemliteClassScannerPro scanner = new GemliteIndexClassScanner(); // ??Module ScannerIterator scannerIterator = null; try { scannerIterator = new ScannerIterator(clazzMap); RegistryMatchedContext matchedRegistry = scanner.scan(loader, scannerIterator); matchedRegistry.setModuleInfo(indexName + ":" + def, null); context = (IIndexContext) scanner.createModuleContext(loader, matchedRegistry); if (context != null) { GemliteIndexContext topIdxContext = GemliteContext.getTopIndexContext(); topIdxContext.putIndexContext(indexName, context); scanner.register(context, matchedRegistry); IndexHelper.saveIndexToDB(indexName, def); LogUtil.getCoreLog().info(context.getIndexName() + " created.\n"); } else LogUtil.getCoreLog().error("No Index definition found. Def: " + def); } catch (Exception e) { throw new GemliteException("Scan error , loader url is {}" + loader.getURL(), e); } finally { try { if (scannerIterator != null) { scannerIterator.close(); } } catch (IOException e) { LogUtil.getCoreLog().warn("Close jar inputstream error."); } } } //IndexHelper.printLoaderClassInfo("AfterCreateIndex"); return context; }
From source file:fr.pasteque.client.utils.URLTextGetter.java
public static void getText(final String url, final Map<String, String> getParams, final Map<String, String> postParams, final Handler h) { new Thread() { @Override//from ww w .j ava 2 s . com public void run() { try { String fullUrl = url; if (getParams != null && getParams.size() > 0) { fullUrl += "?"; for (String param : getParams.keySet()) { fullUrl += URLEncoder.encode(param, "utf-8") + "=" + URLEncoder.encode(getParams.get(param), "utf-8") + "&"; } } if (fullUrl.endsWith("&")) { fullUrl = fullUrl.substring(0, fullUrl.length() - 1); } HttpClient client = new DefaultHttpClient(); HttpResponse response = null; if (postParams == null) { HttpGet req = new HttpGet(fullUrl); response = client.execute(req); } else { HttpPost req = new HttpPost(fullUrl); List<NameValuePair> args = new ArrayList<NameValuePair>(); for (String key : postParams.keySet()) { String value = postParams.get(key); args.add(new BasicNameValuePair(key, value)); } UrlEncodedFormEntity entity = new UrlEncodedFormEntity(args, HTTP.UTF_8); req.setEntity(entity); response = client.execute(req); } int status = response.getStatusLine().getStatusCode(); if (status == HttpStatus.SC_OK) { // Get http response String content = ""; try { final int size = 10240; ByteArrayOutputStream bos = new ByteArrayOutputStream(size); byte[] buffer = new byte[size]; BufferedInputStream bis = new BufferedInputStream(response.getEntity().getContent(), size); int read = bis.read(buffer, 0, size); while (read != -1) { bos.write(buffer, 0, read); read = bis.read(buffer, 0, size); } content = new String(bos.toByteArray()); } catch (IOException ioe) { ioe.printStackTrace(); } if (h != null) { Message m = h.obtainMessage(); m.what = SUCCESS; m.obj = content; m.sendToTarget(); } } else { if (h != null) { Message m = h.obtainMessage(); m.what = STATUS_NOK; m.obj = new Integer(status); m.sendToTarget(); } } } catch (IOException e) { if (h != null) { Message m = h.obtainMessage(); m.what = ERROR; m.obj = e; m.sendToTarget(); } } } }.start(); }
From source file:com.espertech.esper.epl.join.base.JoinSetComposerPrototypeFactory.java
/** * Builds join tuple composer./* w w w . j a va 2s . c o m*/ * @param outerJoinDescList - list of descriptors for outer join criteria * @param optionalFilterNode - filter tree for analysis to build indexes for fast access * @param streamTypes - types of streams * @param streamNames - names of streams * @return composer implementation * @throws ExprValidationException is thrown to indicate that * validation of view use in joins failed. */ public static JoinSetComposerPrototype makeComposerPrototype(String statementName, String statementId, OuterJoinDesc[] outerJoinDescList, ExprNode optionalFilterNode, EventType[] streamTypes, String[] streamNames, StreamJoinAnalysisResult streamJoinAnalysisResult, boolean queryPlanLogging, Annotation[] annotations, HistoricalViewableDesc historicalViewableDesc, ExprEvaluatorContext exprEvaluatorContext, boolean selectsRemoveStream, boolean hasAggregations) throws ExprValidationException { // Determine if there is a historical stream, and what dependencies exist DependencyGraph historicalDependencyGraph = new DependencyGraph(streamTypes.length, false); for (int i = 0; i < streamTypes.length; i++) { if (historicalViewableDesc.getHistorical()[i]) { SortedSet<Integer> streamsThisStreamDependsOn = historicalViewableDesc .getDependenciesPerHistorical()[i]; historicalDependencyGraph.addDependency(i, streamsThisStreamDependsOn); } } if (log.isDebugEnabled()) { log.debug("Dependency graph: " + historicalDependencyGraph); } // Handle a join with a database or other historical data source for 2 streams if ((historicalViewableDesc.isHasHistorical()) && (streamTypes.length == 2)) { return makeComposerHistorical2Stream(outerJoinDescList, optionalFilterNode, streamTypes, historicalViewableDesc, queryPlanLogging, exprEvaluatorContext); } boolean isOuterJoins = !OuterJoinDesc.consistsOfAllInnerJoins(outerJoinDescList); // Query graph for graph relationships between streams/historicals // For outer joins the query graph will just contain outer join relationships QueryGraph queryGraph = new QueryGraph(streamTypes.length); if (outerJoinDescList.length > 0) { OuterJoinAnalyzer.analyze(outerJoinDescList, queryGraph); if (log.isDebugEnabled()) { log.debug(".makeComposer After outer join queryGraph=\n" + queryGraph); } } // Let the query graph reflect the where-clause if (optionalFilterNode != null) { // Analyze relationships between streams using the optional filter expression. // Relationships are properties in AND and EQUALS nodes of joins. FilterExprAnalyzer.analyze(optionalFilterNode, queryGraph, isOuterJoins); if (log.isDebugEnabled()) { log.debug(".makeComposer After filter expression queryGraph=\n" + queryGraph); } // Add navigation entries based on key and index property equivalency (a=b, b=c follows a=c) QueryGraph.fillEquivalentNav(streamTypes, queryGraph); if (log.isDebugEnabled()) { log.debug(".makeComposer After fill equiv. nav. queryGraph=\n" + queryGraph); } } // Historical index lists HistoricalStreamIndexList[] historicalStreamIndexLists = new HistoricalStreamIndexList[streamTypes.length]; QueryPlan queryPlan = QueryPlanBuilder.getPlan(streamTypes, outerJoinDescList, queryGraph, streamNames, historicalViewableDesc, historicalDependencyGraph, historicalStreamIndexLists, streamJoinAnalysisResult, queryPlanLogging, annotations, exprEvaluatorContext); // remove unused indexes - consider all streams or all unidirectional HashSet<String> usedIndexes = new HashSet<String>(); QueryPlanIndex[] indexSpecs = queryPlan.getIndexSpecs(); for (int streamNum = 0; streamNum < queryPlan.getExecNodeSpecs().length; streamNum++) { QueryPlanNode planNode = queryPlan.getExecNodeSpecs()[streamNum]; if (planNode != null) { planNode.addIndexes(usedIndexes); } } for (QueryPlanIndex indexSpec : indexSpecs) { if (indexSpec == null) { continue; } Map<String, QueryPlanIndexItem> items = indexSpec.getItems(); String[] indexNames = items.keySet().toArray(new String[items.size()]); for (String indexName : indexNames) { if (!usedIndexes.contains(indexName)) { items.remove(indexName); } } } if (queryPlanLogging && queryPlanLog.isInfoEnabled()) { queryPlanLog.info("Query plan: " + queryPlan.toQueryPlan()); QueryPlanIndexHook hook = QueryPlanIndexHookUtil.getHook(annotations); if (hook != null) { hook.join(queryPlan); } } boolean joinRemoveStream = selectsRemoveStream || hasAggregations; return new JoinSetComposerPrototypeImpl(statementName, statementId, outerJoinDescList, optionalFilterNode, streamTypes, streamNames, streamJoinAnalysisResult, annotations, historicalViewableDesc, exprEvaluatorContext, indexSpecs, queryPlan, historicalStreamIndexLists, joinRemoveStream, isOuterJoins); }