List of usage examples for java.util HashMap get
public V get(Object key)
From source file:net.triptech.metahive.model.Definition.java
/** * Count the definitions./* w ww .ja v a 2 s . c o m*/ * * @param filter the filter * @return the long */ public static long countDefinitions(final DefinitionFilter filter) { StringBuilder sql = new StringBuilder("SELECT COUNT(d) FROM Definition d JOIN d.category c"); sql.append(buildWhere(filter)); TypedQuery<Long> q = entityManager().createQuery(sql.toString(), Long.class); HashMap<String, String> variables = buildVariables(filter); for (String variable : variables.keySet()) { q.setParameter(variable, variables.get(variable)); } return q.getSingleResult(); }
From source file:eu.planets_project.tb.impl.serialization.ExperimentRecord.java
/** * Factory that does everything required when importing an experiment from an ExperimentRecord. * // www . j a v a 2 s. c o m * FIXME Test this comment loader! * * @param er * @return */ static public long importExperimentRecord(ExperimentRecord er) { // Persist the experiment: long eid = edao.persistExperiment(er.experiment); // Also remember the comments, to make it easier to patch up the lists: HashMap<Long, Comment> cmts = new HashMap<Long, Comment>(); // Persist the comments, using the correct experiment ID: for (CommentImpl c : er.comments) { // Update the comments to the new experiment id: c.setExperimentID(eid); // Persist the comments: long cid = cmp.persistComment(c); // Retrieve it again, for cross-reference resolution: cmts.put(new Long(c.getXmlCommentID()), cmp.findComment(cid)); } // Go through the comments and correct the parent IDs: for (Comment c1 : cmts.values()) { // For this old identifier, look for it's parent comment: Comment c2 = cmts.get(c1.getParentID()); if (c2 != null) { // Update the parent ID to the new comment ID: c1.setParentID(c2.getCommentID()); // Don't forget to persist the id changes to the DB: cmp.updateComment(c1); } } // return the experiment id: return eid; }
From source file:net.triptech.metahive.model.Definition.java
/** * Find definition entries.//from ww w .ja v a 2s. c om * * @param filter the definition filter * @param firstResult the first result * @param maxResults the max results * @return the list */ public static List<Definition> findDefinitionEntries(final DefinitionFilter filter, final int firstResult, final int maxResults) { StringBuilder sql = new StringBuilder("SELECT d FROM Definition d JOIN d.category c"); sql.append(buildWhere(filter)); sql.append(" ORDER BY d.name ASC"); TypedQuery<Definition> q = entityManager().createQuery(sql.toString(), Definition.class) .setFirstResult(firstResult).setMaxResults(maxResults); HashMap<String, String> variables = buildVariables(filter); for (String variable : variables.keySet()) { q.setParameter(variable, variables.get(variable)); } return q.getResultList(); }
From source file:com.twentyn.patentExtractor.PatentDocument.java
public static PatentDocument patentDocumentFromXMLStream(InputStream iStream) throws IOException, ParserConfigurationException, SAXException, TransformerConfigurationException, TransformerException, XPathExpressionException { // Create XPath objects for validating that this document is actually a patent. XPath xpath = Util.getXPathFactory().newXPath(); XPathExpression versionXPath = xpath.compile(PATH_DTD_VERSION); XPathExpression versionXPathApp = xpath.compile(PATH_DTD_VERSION_APP); DocumentBuilderFactory docFactory = Util.mkDocBuilderFactory(); DocumentBuilder docBuilder = docFactory.newDocumentBuilder(); Document doc = docBuilder.parse(iStream); Util.DocumentType docType = Util.identifyDocType(doc); if (docType != Util.DocumentType.PATENT && docType != Util.DocumentType.APPLICATION) { LOGGER.warn("Found unexpected document type: " + docType); return null; }//from w ww .java2s . c o m boolean isApplication = docType == Util.DocumentType.APPLICATION; // Yes this is in fact the way suggested by the XPath API. String version; if (!isApplication) { version = (String) versionXPath.evaluate(doc, XPathConstants.STRING); } else { version = (String) versionXPathApp.evaluate(doc, XPathConstants.STRING); } if (version == null || !VERSION_MAP.containsKey(version)) { LOGGER.warn(String.format("Unrecognized patent DTD version: %s", version)); return null; } HashMap<String, String> paths = VERSION_MAP.get(version); /* Create XPath objects for extracting the fields of interest based on the version information. * TODO: extract these into some sharable, thread-safe place, maybe via dependency injection. */ XPathExpression idXPath = xpath.compile(paths.get(PATH_KEY_FILE_ID)); XPathExpression dateXPath = xpath.compile(paths.get(PATH_KEY_DATE)); XPathExpression titleXPath = xpath.compile(paths.get(PATH_KEY_TITLE)); XPathExpression classificationXPath = xpath.compile(paths.get(PATH_KEY_MAIN_CLASSIFICATION)); XPathExpression furtherClassificationsXPath = xpath.compile(paths.get(PATH_KEY_FURTHER_CLASSIFICATIONS)); XPathExpression searchedClassificationsXPath = xpath.compile(paths.get(PATH_KEY_SEARCHED_CLASSIFICATIONS)); String fileId = (String) idXPath.evaluate(doc, XPathConstants.STRING); String date = (String) dateXPath.evaluate(doc, XPathConstants.STRING); NodeList titleNodes = (NodeList) titleXPath.evaluate(doc, XPathConstants.NODESET); String title = StringUtils.join(" ", extractTextFromHTML(docBuilder, titleNodes)); String classification = (String) classificationXPath.evaluate(doc, XPathConstants.STRING); NodeList furtherClassificationNodes = (NodeList) furtherClassificationsXPath.evaluate(doc, XPathConstants.NODESET); ArrayList<String> furtherClassifications = null; if (furtherClassificationNodes != null) { furtherClassifications = new ArrayList<>(furtherClassificationNodes.getLength()); for (int i = 0; i < furtherClassificationNodes.getLength(); i++) { Node n = furtherClassificationNodes.item(i); String txt = n.getTextContent(); if (txt != null) { furtherClassifications.add(i, txt); } } } else { furtherClassifications = new ArrayList<>(0); } NodeList otherClassificationNodes = (NodeList) searchedClassificationsXPath.evaluate(doc, XPathConstants.NODESET); ArrayList<String> otherClassifications = null; if (otherClassificationNodes != null) { otherClassifications = new ArrayList<>(otherClassificationNodes.getLength()); for (int i = 0; i < otherClassificationNodes.getLength(); i++) { Node n = otherClassificationNodes.item(i); String txt = n.getTextContent(); if (txt != null) { otherClassifications.add(i, txt); } } } else { otherClassifications = new ArrayList<>(0); } // Extract text content for salient document paths. List<String> allTextList = getRelevantDocumentText(docBuilder, PATHS_TEXT, xpath, doc); List<String> claimsTextList = getRelevantDocumentText(docBuilder, new String[] { PATH_CLAIMS }, xpath, doc); return new PatentDocument(fileId, date, title, classification, furtherClassifications, otherClassifications, allTextList, claimsTextList, isApplication); }
From source file:gov.tva.sparky.util.indexer.IndexingAgent.java
public static void Index_HDFS_File(String strHdfsPath) throws Exception { LOG.info("Indexing File: " + strHdfsPath); long lBytesUploaded = 0; int iBucketsUpdated = 0; boolean bExistsAlreadyInRegistry = false; String strHBaseRegistryFileID = ""; int iHBaseRegistryFileID = -1; HistorianArchiveLookupTable oLookupTable = new HistorianArchiveLookupTable(); try {//from w ww. jav a 2 s . c o m strHBaseRegistryFileID = oLookupTable.Lookup_FileID_byPath(strHdfsPath); System.out.println("Found File ID: " + strHBaseRegistryFileID + " For Path: " + strHdfsPath); bExistsAlreadyInRegistry = true; } catch (Exception e) { bExistsAlreadyInRegistry = false; } // if its not already registered, register it if (!bExistsAlreadyInRegistry) { try { HistorianArchiveLookupTable.InsertEntryInto_Hbase_LookupTables(strHdfsPath); strHBaseRegistryFileID = oLookupTable.Lookup_FileID_byPath(strHdfsPath); iHBaseRegistryFileID = HistorianArchiveLookupTableEntry.GetIntFromBytes(strHBaseRegistryFileID); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); strHBaseRegistryFileID = ""; } } if (strHBaseRegistryFileID.equals("") || iHBaseRegistryFileID < 0) { throw new Exception("Unable to register hdfs file in Index Registry!"); } // We need to group each range of block pointers into buckets FileIndex archive_index = new FileIndex(strHdfsPath, iHBaseRegistryFileID); HashMap<String, PriorityQueue<HDFSPointBlockPointer>> map = archive_index.GetKeyMap(); //int iPass = 0; Set<String> Keys = map.keySet(); Iterator It = Keys.iterator(); while (It.hasNext()) { // if (It.hasNext()) { String key_str = (String) (It.next()); IndexBucket oBucket = new IndexBucket(map.get(key_str)); lBytesUploaded += oBucket.GetSerializedByteSize(); HBaseRestInterface.Update_HDFS_FileIndex(oBucket); iBucketsUpdated++; } // log summary stats... LOG.info("Indexer > Summary > Total Bytes Uploaded: " + lBytesUploaded + ", Buckets Updated: " + iBucketsUpdated); }
From source file:com.iggroup.oss.restdoclet.doclet.util.DocTypeUtils.java
/** * Returns as a string a list of attributes plus comments for the given * iggroup complex type (or empty string if not iggroup), formatted in an * HTML table. This method will recurse if attributes are iggroup complex * types/*from w ww . j a va2 s.c om*/ * * @param type * @param processedTypes * @param leafType * @return */ private static String getTypeDoc(final Type type, HashMap<String, String> processedTypes, Boolean leafType) { LOG.info("getTypeDoc " + type + " leafType=" + leafType); String typeInfo = ""; if (isRelevantType(type)) { ClassDoc typeDoc = type.asClassDoc(); typeInfo = processedTypes.get(type.toString()); if (typeInfo != null) { LOG.debug("Found cached typedoc for " + type.typeName() + " - " + typeInfo); } if (typeInfo == null && typeDoc != null) { typeInfo = ""; // if this is a generic type then recurse with the first type argument if (isParameterisedType(type)) { LOG.debug("Parameterised type"); if (type.asClassDoc() != null) { typeInfo = getTypeDoc( type.asParameterizedType() .typeArguments()[type.asParameterizedType().typeArguments().length - 1], processedTypes, true); } } else { logType(type); // put placeholder to stop recursion for self-referential types LOG.debug("Starting to cache: " + type.typeName()); processedTypes.put(type.toString(), ""); LOG.debug(typeDoc.commentText() + " " + leafType); if (leafType && !typeDoc.commentText().isEmpty()) { typeInfo += "<tr><span class=\"javadoc-header\">" + typeDoc.commentText() + "</span></tr>"; LOG.debug(typeInfo); } if (typeDoc.isEnum()) { LOG.debug("Enum type"); typeInfo += getEnumDoc(type); } else { // class LOG.debug("Class"); // first do base class if (typeDoc.superclass() != null) { LOG.debug("base type = " + typeDoc.superclass().qualifiedName()); String baseTypeDoc = getTypeDoc(type.asClassDoc().superclassType(), processedTypes, false); if (!baseTypeDoc.isEmpty()) { LOG.debug("base type DOC = " + baseTypeDoc); typeInfo += baseTypeDoc; } } typeInfo += getPublicConstantDoc(type); Collection<String> getterNames = getGetterNames(type); for (MethodDoc method : typeDoc.methods()) { if (method.isPublic() && getterNames.contains(method.name()) && !ignore(method)) { String attributeInfo = ""; String attributeType = method.returnType().simpleTypeName(); // check if is this a parameterised type ParameterizedType pt = method.returnType().asParameterizedType(); if (pt != null && pt.typeArguments().length > 0) { attributeType += "["; for (int i = 0; i < pt.typeArguments().length; i++) { attributeType += pt.typeArguments()[i].simpleTypeName(); if (i < pt.typeArguments().length - 1) { attributeType += ", "; } } attributeType += "]"; } // Check if this is an array attributeType += method.returnType().dimension(); final String attributeName = getAttributeNameFromMethod(method.name()); attributeInfo += "<td>" + attributeType + " " + attributeName + "</td>"; // If type or parameterised type then recurse LOG.debug("Generating attribute doc for " + method.returnType()); String attributeTypeDoc = getTypeDoc(method.returnType(), processedTypes, true); if (!attributeTypeDoc.isEmpty()) { LOG.debug("Found attribute doc for " + method.returnType()); attributeInfo += "<td>" + attributeTypeDoc + "</td>"; } else { // no useful type information, so use whatever's on the attribute doc LOG.debug("Found no attribute doc for " + method.returnType()); String fieldDoc = getFieldDoc(type, attributeName, method.commentText()); attributeInfo += "<td>" + fieldDoc + "</td>"; } if (!attributeInfo.isEmpty()) { typeInfo += "<tr>" + attributeInfo + "</tr>"; } } } } // Wrap in a table tag if this is concrete type if (leafType && !typeInfo.isEmpty()) { typeInfo = "<table>" + typeInfo + "</table>"; } } } LOG.debug("Caching: " + type); processedTypes.put(type.toString(), typeInfo); } if (typeInfo == null) { typeInfo = ""; } LOG.debug("XXX " + type.typeName() + " XXX " + typeInfo); return typeInfo; }
From source file:com.ushahidi.android.app.net.MainHttpClient.java
/** * Upload files to server 0 - success, 1 - missing parameter, 2 - invalid * parameter, 3 - post failed, 5 - access denied, 6 - access limited, 7 - no * data, 8 - api disabled, 9 - no task found, 10 - json is wrong *//* ww w .j a v a 2 s. co m*/ public static int PostFileUpload(String URL, HashMap<String, String> params) throws IOException { Log.d(CLASS_TAG, "PostFileUpload(): upload file to server."); entity = new MultipartEntity(); // Dipo Fix try { // wrap try around because this constructor can throw Error final HttpPost httpost = new HttpPost(URL); if (params != null) { entity.addPart("task", new StringBody(params.get("task"))); entity.addPart("incident_title", new StringBody(params.get("incident_title"), Charset.forName("UTF-8"))); entity.addPart("incident_description", new StringBody(params.get("incident_description"), Charset.forName("UTF-8"))); entity.addPart("incident_date", new StringBody(params.get("incident_date"))); entity.addPart("incident_hour", new StringBody(params.get("incident_hour"))); entity.addPart("incident_minute", new StringBody(params.get("incident_minute"))); entity.addPart("incident_ampm", new StringBody(params.get("incident_ampm"))); entity.addPart("incident_category", new StringBody(params.get("incident_category"))); entity.addPart("latitude", new StringBody(params.get("latitude"))); entity.addPart("longitude", new StringBody(params.get("longitude"))); entity.addPart("location_name", new StringBody(params.get("location_name"), Charset.forName("UTF-8"))); entity.addPart("person_first", new StringBody(params.get("person_first"), Charset.forName("UTF-8"))); entity.addPart("person_last", new StringBody(params.get("person_last"), Charset.forName("UTF-8"))); entity.addPart("person_email", new StringBody(params.get("person_email"), Charset.forName("UTF-8"))); if (!TextUtils.isEmpty(params.get("filename"))) { File file = new File(params.get("filename")); if (file.exists()) { entity.addPart("incident_photo[]", new FileBody(new File(params.get("filename")))); } } // NEED THIS NOW TO FIX ERROR 417 httpost.getParams().setBooleanParameter("http.protocol.expect-continue", false); httpost.setEntity(entity); HttpResponse response = httpClient.execute(httpost); Preferences.httpRunning = false; HttpEntity respEntity = response.getEntity(); if (respEntity != null) { InputStream serverInput = respEntity.getContent(); return Util.extractPayloadJSON(GetText(serverInput)); } } } catch (MalformedURLException ex) { Log.d(CLASS_TAG, "PostFileUpload(): MalformedURLException"); ex.printStackTrace(); return 11; // fall through and return false } catch (IllegalArgumentException ex) { Log.e(CLASS_TAG, ex.toString()); //invalid URI return 12; } catch (IOException e) { Log.e(CLASS_TAG, e.toString()); //timeout return 13; } return 10; }
From source file:ISMAGS.CommandLineInterface.java
public static Motif getMotif(String motifspec, HashMap<Character, LinkType> typeTranslation) { int l = motifspec.length(); int nrNodes = (int) Math.ceil(Math.sqrt(2 * l)); int l2 = nrNodes * (nrNodes - 1) / 2; if (l != l2) { Die("Error: motif \"" + motifspec + "\" has invalid length"); }//from w w w .j a va 2 s.c om int counter = 0; Motif m = new Motif(nrNodes); for (int i = 1; i < nrNodes; i++) { for (int j = 0; j < i; j++) { // System.out.println("("+(1+i)+","+(1+j)+")"); char c = motifspec.charAt(counter); counter++; if (c == '0') { continue; } LinkType lt = typeTranslation.get(Character.toUpperCase(c)); if (Character.isUpperCase(c)) { m.addMotifLink(j, i, lt); } else { m.addMotifLink(i, j, lt); } } } m.finaliseMotif(); return m; }
From source file:co.cask.cdap.data.tools.ReplicationStatusTool.java
private static Map<String, Long> getMapFromTable(String rowType) throws IOException { HBaseTableUtil tableUtil = new HBaseTableUtilFactory(cConf).get(); HTable hTable = tableUtil.createHTable(hConf, getReplicationStateTableId(tableUtil)); // Scan the table to scan for all regions. ScanBuilder scan = getScanBuilder(tableUtil, rowType); Result result;// w w w . j a v a 2 s . c o m HashMap<String, Long> timeMap = new HashMap<>(); try (ResultScanner resultScanner = hTable.getScanner(scan.build())) { while ((result = resultScanner.next()) != null) { ReplicationStatusKey key = new ReplicationStatusKey(result.getRow()); String region = key.getRegionName(); Long timestamp = getTimeFromResult(result, rowType); if (timeMap.get(region) == null || timestamp > timeMap.get(region)) { timeMap.put(region, timestamp); } } } catch (Exception e) { LOG.error("Error while reading table.", e); throw Throwables.propagate(e); } finally { hTable.close(); } return timeMap; }
From source file:com.act.reachables.Network.java
public static JSONObject get(MongoDB db, Set<Node> nodes, Set<Edge> edges, HashMap<Long, Long> parentIds, HashMap<Long, Edge> toParentEdges) throws JSONException { // init the json object with structure: // {/* w w w . j av a 2 s . co m*/ // "name": "nodeid" // "children": [ // { "name": "childnodeid", toparentedge: {}, nodedata:.. }, ... // ] // } HashMap<Long, Node> nodeById = new HashMap<>(); for (Node n : nodes) nodeById.put(n.id, n); HashMap<Long, JSONObject> nodeObjs = new HashMap<>(); // un-deconstruct tree... for (Long nid : parentIds.keySet()) { JSONObject nObj = JSONHelper.nodeObj(db, nodeById.get(nid)); nObj.put("name", nid); if (toParentEdges.get(nid) != null) { JSONObject eObj = JSONHelper.edgeObj(toParentEdges.get(nid), null /* no ordering reqd for referencing nodeMapping */); nObj.put("edge_up", eObj); } else { } nodeObjs.put(nid, nObj); } // now that we know that each node has an associated obj // link the objects together into the tree structure // put each object inside its parent HashSet<Long> unAssignedToParent = new HashSet<>(parentIds.keySet()); for (Long nid : parentIds.keySet()) { JSONObject child = nodeObjs.get(nid); // append child to "children" key within parent JSONObject parent = nodeObjs.get(parentIds.get(nid)); if (parent != null) { parent.append("children", child); unAssignedToParent.remove(nid); } else { } } // outputting a single tree makes front end processing easier // we can always remove the root in the front end and get the forest again // if many trees remain, assuming they indicate a disjoint forest, // add then as child to a proxy root. // if only one tree then return it JSONObject json; if (unAssignedToParent.size() == 0) { json = null; throw new RuntimeException("All nodeMapping have parents! Where is the root? Abort."); } else if (unAssignedToParent.size() == 1) { json = unAssignedToParent.toArray(new JSONObject[0])[0]; // return the only element in the set } else { json = new JSONObject(); for (Long cid : unAssignedToParent) { json.put("name", "root"); json.append("children", nodeObjs.get(cid)); } } return json; }