List of usage examples for java.util EnumMap EnumMap
public EnumMap(Map<K, ? extends V> m)
From source file:com.adobe.acs.commons.mcp.impl.processes.AssetFolderCreator.java
private void record(ReportRowStatus status, String path, String title) { final EnumMap<ReportColumns, Object> row = new EnumMap<>(ReportColumns.class); row.put(ReportColumns.STATUS, StringUtil.getFriendlyName(status.name())); row.put(ReportColumns.ASSET_FOLDER_PATH, path); row.put(ReportColumns.ASSET_FOLDER_TITLE, title); reportRows.add(row);/*from w ww .ja va 2 s. c o m*/ }
From source file:nl.strohalm.cyclos.services.ads.AdServiceImpl.java
@Override public Map<Ad.Status, Integer> getNumberOfAds(final Calendar date, final Member member) { final Map<Ad.Status, Integer> numberOfAds = new EnumMap<Ad.Status, Integer>(Ad.Status.class); final AdQuery query = new AdQuery(); query.setOwner(member);/*from ww w .j a v a2s . c o m*/ query.setPageForCount(); // date is for history if (date != null) { query.setHistoryDate(date); query.setIncludeDeleted(true); } for (final Ad.Status status : Ad.Status.values()) { query.setStatus(status); final int totalCount = PageHelper.getTotalCount(search(query)); numberOfAds.put(status, totalCount); } return numberOfAds; }
From source file:org.openecomp.sdc.be.externalapi.servlet.AssetsDataServlet.java
@GET @Path("/{assetType}/{uuid}/toscaModel") @Produces(MediaType.APPLICATION_OCTET_STREAM) @ApiOperation(value = "Fetch asset csar", httpMethod = "GET", notes = "Returns asset csar", response = Response.class) @ApiResponses(value = { @ApiResponse(code = 200, message = "Asset Model Fetched"), @ApiResponse(code = 401, message = "Authorization required"), @ApiResponse(code = 403, message = "Restricted operation"), @ApiResponse(code = 404, message = "Asset not found") }) public Response getToscaModel(@PathParam("uuid") final String uuid, @ApiParam(value = "valid values: resources / services", allowableValues = ComponentTypeEnum.RESOURCE_PARAM_NAME + "," + ComponentTypeEnum.SERVICE_PARAM_NAME) @PathParam("assetType") final String assetType, @HeaderParam(value = Constants.AUTHORIZATION_HEADER) String authorization) { String url = request.getRequestURI(); log.debug("Start handle request of {} {}", request.getMethod(), url); Response response = null;/*from w w w. ja v a2 s . c o m*/ ResponseFormat responseFormat = null; ServletContext context = request.getSession().getServletContext(); ComponentTypeEnum componentType = ComponentTypeEnum.findByParamName(assetType); AuditingActionEnum auditingActionEnum = AuditingActionEnum.GET_TOSCA_MODEL; String userId = request.getHeader(Constants.X_ECOMP_INSTANCE_ID_HEADER); EnumMap<AuditingFieldsKeysEnum, Object> additionalParam = new EnumMap<AuditingFieldsKeysEnum, Object>( AuditingFieldsKeysEnum.class); additionalParam.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_CONSUMER_ID, userId); additionalParam.put(AuditingFieldsKeysEnum.AUDIT_DISTRIBUTION_RESOURCE_URL, url); additionalParam.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_TYPE, componentType.getValue()); additionalParam.put(AuditingFieldsKeysEnum.AUDIT_SERVICE_INSTANCE_ID, uuid); if (userId == null || userId.isEmpty()) { log.debug("getToscaModel: Missing X-ECOMP-InstanceID header"); responseFormat = getComponentsUtils().getResponseFormat(ActionStatus.MISSING_X_ECOMP_INSTANCE_ID); getComponentsUtils().auditExternalGetAsset(responseFormat, auditingActionEnum, additionalParam); return buildErrorResponse(responseFormat); } try { ComponentBusinessLogic componentBL = getComponentBL(componentType, context); Either<ImmutablePair<String, byte[]>, ResponseFormat> csarArtifact = componentBL .getToscaModelByComponentUuid(componentType, uuid, additionalParam); if (csarArtifact.isRight()) { responseFormat = csarArtifact.right().value(); getComponentsUtils().auditExternalGetAsset(responseFormat, auditingActionEnum, additionalParam); response = buildErrorResponse(responseFormat); } else { byte[] value = csarArtifact.left().value().getRight(); InputStream is = new ByteArrayInputStream(value); String contenetMD5 = GeneralUtility.calculateMD5ByByteArray(value); Map<String, String> headers = new HashMap<>(); headers.put(Constants.CONTENT_DISPOSITION_HEADER, getContentDispositionValue(csarArtifact.left().value().getLeft())); headers.put(Constants.MD5_HEADER, contenetMD5); responseFormat = getComponentsUtils().getResponseFormat(ActionStatus.OK); getComponentsUtils().auditExternalGetAsset(responseFormat, auditingActionEnum, additionalParam); response = buildOkResponse(responseFormat, is, headers); } return response; } catch (Exception e) { BeEcompErrorManager.getInstance().logBeRestApiGeneralError("Get asset tosca model"); log.debug("falied to get asset tosca model", e); responseFormat = getComponentsUtils().getResponseFormat(ActionStatus.GENERAL_ERROR); response = buildErrorResponse(responseFormat); getComponentsUtils().auditExternalGetAsset(responseFormat, auditingActionEnum, additionalParam); return response; } }
From source file:gov.nih.nci.firebird.service.registration.ProtocolRegistrationServiceBean.java
private FirebirdMessage getSubInvestigatorRemovedMessage( List<SubInvestigatorRegistration> deletedRegistrations) { Map<FirebirdTemplateParameter, Object> parameterValues = new EnumMap<FirebirdTemplateParameter, Object>( FirebirdTemplateParameter.class); parameterValues.put(FirebirdTemplateParameter.REGISTRATIONS, deletedRegistrations); parameterValues.put(FirebirdTemplateParameter.INVESTIGATOR, deletedRegistrations.iterator().next().getPrimaryRegistration().getProfile().getPerson()); return getTemplateService().generateMessage( FirebirdMessageTemplate.REMOVE_SUBINVESTIGATOR_NOTIFICATION_EMAIL, parameterValues); }
From source file:gov.nih.nci.firebird.service.annual.registration.AnnualRegistrationServiceBean.java
private FirebirdMessage getCoordinatorAndNotificationListEmailMessage(AnnualRegistration registration) { Map<FirebirdTemplateParameter, Object> parameterValues = new EnumMap<FirebirdTemplateParameter, Object>( FirebirdTemplateParameter.class); parameterValues.put(ANNUAL_REGISTRATION, registration); return getTemplateService().generateMessage( FirebirdMessageTemplate.COORDINATOR_ANNUAL_REGISTRATION_SUBMISSION_NOTIFICATION_EMAIL, parameterValues);/*from w ww.j a v a2 s .c o m*/ }
From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer.java
/** * store cardinality information in metadata * @param localHLL the hll estimate for this hoplog only *///w ww .ja va2 s . co m private EnumMap<Meta, byte[]> buildMetaData(ICardinality localHLL) throws IOException { EnumMap<Meta, byte[]> map = new EnumMap<Hoplog.Meta, byte[]>(Meta.class); map.put(Meta.LOCAL_CARDINALITY_ESTIMATE_V2, localHLL.getBytes()); return map; }
From source file:it.unimi.di.big.mg4j.index.Index.java
/** Returns a new index using the given URI. * /*from w w w . j ava 2 s . c om*/ * @param ioFactory the factory that will be used to perform I/O, or <code>null</code> (implying the {@link IOFactory#FILESYSTEM_FACTORY} for disk-based indices). * @param uri the URI defining the index. * @param randomAccess whether the index should be accessible randomly. * @param documentSizes if true, document sizes will be loaded (note that sometimes document sizes * might be loaded anyway because the compression method for positions requires it). * @param maps if true, {@linkplain StringMap term} and {@linkplain PrefixMap prefix} maps will be guessed and loaded (this * feature might not be available with some kind of index). */ public static Index getInstance(IOFactory ioFactory, final CharSequence uri, final boolean randomAccess, final boolean documentSizes, final boolean maps) throws IOException, ConfigurationException, URISyntaxException, ClassNotFoundException, SecurityException, InstantiationException, IllegalAccessException, InvocationTargetException, NoSuchMethodException { /* If the scheme is mg4j, then we are creating a remote * index. If it is null, we assume it is a property file and load it. Otherwise, we * assume it is a valid property file URI and try to download it. */ final String uriString = uri.toString(); /*if ( uriString.startsWith( "mg4j:" ) ) { if ( ioFactory != null ) throw new IllegalAccessError( "You cannot specify a factory for a remote index" ); final URI u = new URI( uriString ); return IndexServer.getIndex( u.getHost(), u.getPort(), randomAccess, documentSizes ); }*/ final String basename, query; if (ioFactory == null) ioFactory = IOFactory.FILESYSTEM_FACTORY; if (uriString.startsWith("file:")) { final URI u = new URI(uriString); basename = u.getPath(); query = u.getQuery(); } else { final int questionMarkPos = uriString.indexOf('?'); basename = questionMarkPos == -1 ? uriString : uriString.substring(0, questionMarkPos); query = questionMarkPos == -1 ? null : uriString.substring(questionMarkPos + 1); } LOGGER.debug("Searching for an index with basename " + basename + "..."); final Properties properties = IOFactories.loadProperties(ioFactory, basename + DiskBasedIndex.PROPERTIES_EXTENSION); LOGGER.debug("Properties: " + properties); // We parse the key/value pairs appearing in the query part. final EnumMap<UriKeys, String> queryProperties = new EnumMap<UriKeys, String>(UriKeys.class); if (query != null) { String[] keyValue = query.split(";"); for (int i = 0; i < keyValue.length; i++) { String[] piece = keyValue[i].split("="); if (piece.length != 2) throw new IllegalArgumentException("Malformed key/value pair: " + keyValue[i]); // Convert to standard keys boolean found = false; for (UriKeys key : UriKeys.values()) if (found = PropertyBasedDocumentFactory.sameKey(key, piece[0])) { queryProperties.put(key, piece[1]); break; } if (!found) throw new IllegalArgumentException("Unknown key: " + piece[0]); } } // Compatibility with previous versions String className = properties.getString(Index.PropertyKeys.INDEXCLASS, "(missing index class)") .replace(".dsi.", ".di."); Class<?> indexClass = Class.forName(className); // It is a cluster. if (IndexCluster.class.isAssignableFrom(indexClass)) return IndexCluster.getInstance(basename, randomAccess, documentSizes, queryProperties); // It is a disk-based index. return DiskBasedIndex.getInstance(ioFactory, basename, properties, randomAccess, documentSizes, maps, queryProperties); }
From source file:gov.nih.nci.firebird.service.sponsor.SponsorServiceBean.java
private FirebirdMessage getProtocolRegistrationNotificationEmailMessage( AbstractProtocolRegistration registration) { Map<FirebirdTemplateParameter, Object> parameterValues = new EnumMap<FirebirdTemplateParameter, Object>( FirebirdTemplateParameter.class); parameterValues.put(REGISTRATION, registration); parameterValues.put(FIREBIRD_LINK, generateReviewProtocolRegistrationLink(registration)); return templateService.generateMessage(FirebirdMessageTemplate.SPONSOR_SUBMISSION_NOTIFICATION_EMAIL, parameterValues);//from ww w . j a va 2 s . c om }
From source file:com.example.app.support.address.AddressParser.java
/** * This is the main method that calls all the other method * to successfully split up and address into separate fields. * * Returns a string key,value map containing address parts * along with their string values./*from w w w . j a v a 2 s . c om*/ * * @param address the address. * @param autoCorrectStateSpelling flag to autocorrect spelling. * * @return the parsed mapping. */ @Nonnull public static Map<AddressComponent, String> parseAddress(String address, boolean autoCorrectStateSpelling) { /*First we replace individual fields with temporary escaped value. * The reason we do this separately is because we can * strip out certain characters in the split up fields more accurately than * in the constructed address (commas for instance can easily be stripped out of * the split up fields, but are important to maintain in the address) */ // There are some words that we want to remove / permanently change in the address (undefined / undefd) Map<String, String> replacementStrings = getReplacementStrings(); address = escapeString(address, replacementStrings); // create the map of regexp string to temporarily replace with fillers, since the addressparser doesn't like them HashBiMap<String, String> constStringMap = setupConstStringMap(); // create the map of regexp string to temporarily replace with fillers, since the addressparser doesn't like them HashBiMap<String, String> regexpStringMap = formatRegexpStringMap(); // create the map of regexp strings that we explicitly do not want to escape HashBiMap<String, String> doNotEscapeRegexpStringMap = formatdoNotEscapeRegexpStringMap(); // Store the strings that we do not want to escape HashBiMap<String, String> doNotEscapeFilledStringMap = getMatchingStrings(address, doNotEscapeRegexpStringMap); // temporarily move the parts of the string that we explicitly do not want to escape address = escapeString(address, doNotEscapeRegexpStringMap); // Store the strings that match the regular expression fillers in the map for replacement later HashBiMap<String, String> filledRegexpStringMap = getMatchingStrings(address, regexpStringMap); // replace the strings that match within the address with fillers address = escapeString(address, filledRegexpStringMap); address = escapeString(address, constStringMap); // Put the strings we do not want to escape back in address = escapeString(address, doNotEscapeFilledStringMap.inverse()); // parse the address into separate fields Map<AddressComponent, String> results = prepareAddressForParsingAndParse(address, autoCorrectStateSpelling); if (results == null) { // there was a problem parsing the address return new EnumMap<>(AddressComponent.class); } // Replace the fillers in the split up fields with the original values (requires an inverse) results = replaceOriginalStringsInSplitUpFields(results, filledRegexpStringMap.inverse()); results = replaceOriginalStringsInSplitUpFields(results, constStringMap.inverse()); return results; }
From source file:org.apache.stanbol.workflow.jersey.resource.ContentItemResource.java
private void initOccurrences() { MGraph graph = contentItem.getMetadata(); LiteralFactory lf = LiteralFactory.getInstance(); Map<UriRef, Collection<NonLiteral>> suggestionMap = new HashMap<UriRef, Collection<NonLiteral>>(); // 1) get Entity Annotations Map<NonLiteral, Map<EAProps, Object>> entitySuggestionMap = new HashMap<NonLiteral, Map<EAProps, Object>>(); Iterator<Triple> entityAnnotations = graph.filter(null, RDF.type, ENHANCER_ENTITYANNOTATION); while (entityAnnotations.hasNext()) { NonLiteral entityAnnotation = entityAnnotations.next().getSubject(); //to avoid multiple lookups (e.g. if one entityAnnotation links to+ //several TextAnnotations) we cache the data in an intermediate Map Map<EAProps, Object> eaData = new EnumMap<EAProps, Object>(EAProps.class); eaData.put(EAProps.entity, getReference(graph, entityAnnotation, ENHANCER_ENTITY_REFERENCE)); eaData.put(EAProps.label, getString(graph, entityAnnotation, ENHANCER_ENTITY_LABEL)); eaData.put(EAProps.confidence,/*from w ww .jav a 2 s.c o m*/ EnhancementEngineHelper.get(graph, entityAnnotation, ENHANCER_CONFIDENCE, Double.class, lf)); entitySuggestionMap.put(entityAnnotation, eaData); Iterator<UriRef> textAnnotations = getReferences(graph, entityAnnotation, DC_RELATION); while (textAnnotations.hasNext()) { UriRef textAnnotation = textAnnotations.next(); Collection<NonLiteral> suggestions = suggestionMap.get(textAnnotation); if (suggestions == null) { suggestions = new ArrayList<NonLiteral>(); suggestionMap.put(textAnnotation, suggestions); } suggestions.add(entityAnnotation); } } // 2) get the TextAnnotations Iterator<Triple> textAnnotations = graph.filter(null, RDF.type, ENHANCER_TEXTANNOTATION); while (textAnnotations.hasNext()) { NonLiteral textAnnotation = textAnnotations.next().getSubject(); //we need to process those to show multiple mentions // if (graph.filter(textAnnotation, DC_RELATION, null).hasNext()) { // // this is not the most specific occurrence of this name: skip // continue; // } String text = getString(graph, textAnnotation, Properties.ENHANCER_SELECTED_TEXT); //TextAnnotations without fise:selected-text are no longer ignored // if(text == null){ // //ignore text annotations without text // continue; // } Integer start = EnhancementEngineHelper.get(graph, textAnnotation, ENHANCER_START, Integer.class, lf); Integer end = EnhancementEngineHelper.get(graph, textAnnotation, ENHANCER_END, Integer.class, lf); Double confidence = EnhancementEngineHelper.get(graph, textAnnotation, ENHANCER_CONFIDENCE, Double.class, lf); Iterator<UriRef> types = getReferences(graph, textAnnotation, DC_TYPE); if (!types.hasNext()) { //create an iterator over null in case no types are present types = Collections.singleton((UriRef) null).iterator(); } while (types.hasNext()) { UriRef type = types.next(); Map<EntityExtractionSummary, EntityExtractionSummary> occurrenceMap = extractionsByTypeMap .get(type); if (occurrenceMap == null) { occurrenceMap = new TreeMap<EntityExtractionSummary, EntityExtractionSummary>(); extractionsByTypeMap.put(type, occurrenceMap); } //in case of a language annotation use the detected language as label if (DC_LINGUISTIC_SYSTEM.equals(type)) { text = EnhancementEngineHelper.getString(graph, textAnnotation, DC_LANGUAGE); } EntityExtractionSummary entity = new EntityExtractionSummary(text, type, start, end, confidence, defaultThumbnails); Collection<NonLiteral> suggestions = suggestionMap.get(textAnnotation); if (suggestions != null) { for (NonLiteral entityAnnotation : suggestions) { Map<EAProps, Object> eaData = entitySuggestionMap.get(entityAnnotation); entity.addSuggestion((UriRef) eaData.get(EAProps.entity), (String) eaData.get(EAProps.label), (Double) eaData.get(EAProps.confidence), graph); } } EntityExtractionSummary existingSummary = occurrenceMap.get(entity); if (existingSummary == null) {//new extraction summary occurrenceMap.put(entity, entity); } else { //extraction summary with this text and suggestions already //present ... only add a mention to the existing existingSummary.addMention(new Mention(text, start, end, confidence)); } } } }