List of usage examples for java.util Map getOrDefault
default V getOrDefault(Object key, V defaultValue)
From source file:com.ikanow.aleph2.graph.titan.utils.TestTitanGraphBuildingUtils.java
@Test public void test_finalEdgeGrouping() { final TitanGraph titan = getSimpleTitanGraph(); final TitanTransaction tx = titan.buildTransaction().start(); final Vertex v1 = tx.addVertex("1.1.1.1"); final Vertex v2 = tx.addVertex("host.com"); final ObjectNode key1 = _mapper.createObjectNode().put(GraphAnnotationBean.name, "1.1.1.1") .put(GraphAnnotationBean.type, "ip-addr"); final ObjectNode key2 = _mapper.createObjectNode().put(GraphAnnotationBean.name, "host.com") .put(GraphAnnotationBean.type, "domain-name"); final List<ObjectNode> mutable_edges = Arrays.asList( (ObjectNode) ((ObjectNode) _mapper.createObjectNode() .put(GraphAnnotationBean.type, GraphAnnotationBean.ElementType.edge.toString()) .put(GraphAnnotationBean.label, "dns-connection") .put(GraphAnnotationBean.inVLabel, "host.com") // (this gets removed the first call - these props are not otherwise used) .set(GraphAnnotationBean.inV, _mapper.createObjectNode().put(GraphAnnotationBean.name, "host.com") .put(GraphAnnotationBean.type, "domain-name"))).set( GraphAnnotationBean.outV, _mapper.createObjectNode().put(GraphAnnotationBean.name, "1.1.1.1") .put(GraphAnnotationBean.type, "ip-addr")), (ObjectNode) ((ObjectNode) _mapper.createObjectNode() .put(GraphAnnotationBean.type, GraphAnnotationBean.ElementType.edge.toString()) .put(GraphAnnotationBean.label, "self-connect") .put(GraphAnnotationBean.outVLabel, "1.1.1.1") // (this gets removed the first call - these props are not otherwise used) .set(GraphAnnotationBean.inV, _mapper.createObjectNode().put(GraphAnnotationBean.name, "1.1.1.1") .put(GraphAnnotationBean.type, "ip-addr"))).set(GraphAnnotationBean.outV, _mapper.createObjectNode().put(GraphAnnotationBean.name, "1.1.1.1") .put(GraphAnnotationBean.type, "ip-addr"))); // Some other handy objects final List<ObjectNode> dup_mutable_edges = mutable_edges.stream().map(o -> o.deepCopy()) .collect(Collectors.toList()); final ObjectNode self_link = (ObjectNode) ((ObjectNode) _mapper.createObjectNode() .set(GraphAnnotationBean.inV, key1)).set(GraphAnnotationBean.outV, key1); final ObjectNode normal_link = (ObjectNode) ((ObjectNode) _mapper.createObjectNode() .set(GraphAnnotationBean.inV, key2)).set(GraphAnnotationBean.outV, key1); // First pass, should grab the self-connector {/*from w w w.j a v a 2s. co m*/ final Map<ObjectNode, List<ObjectNode>> ret_val_pass = TitanGraphBuildingUtils.finalEdgeGrouping(key1, v1, mutable_edges); // First pass through, snags the edge that points to itself final ObjectNode full_group = self_link.deepCopy().put(GraphAnnotationBean.label, "self-connect"); assertEquals(1, ret_val_pass.size()); assertEquals(1, ret_val_pass.getOrDefault(full_group, Collections.emptyList()).size()); final ObjectNode val = ret_val_pass.get(full_group).get(0); assertEquals("self-connect", val.get(GraphAnnotationBean.label).asText()); assertEquals(v1.id(), val.get(GraphAnnotationBean.inV).asLong()); assertEquals(v1.id(), val.get(GraphAnnotationBean.outV).asLong()); } // Second pass, connects the other edge up { final Map<ObjectNode, List<ObjectNode>> ret_val_pass = TitanGraphBuildingUtils.finalEdgeGrouping(key2, v2, mutable_edges); // First pass through, snags the edge that points to itself final ObjectNode full_group = normal_link.deepCopy().put(GraphAnnotationBean.label, "dns-connection"); assertEquals(2, ret_val_pass.size()); assertEquals("Should hav enormal key: " + ret_val_pass.keySet() + " vs " + normal_link, 1, ret_val_pass.getOrDefault(full_group, Collections.emptyList()).size()); final ObjectNode val = ret_val_pass.get(full_group).get(0); assertEquals("dns-connection", val.get(GraphAnnotationBean.label).asText()); assertEquals(v2.id(), val.get(GraphAnnotationBean.inV).asLong()); assertEquals(v1.id(), val.get(GraphAnnotationBean.outV).asLong()); } // Another first pass, but with the other key so nothing emits { final Map<ObjectNode, List<ObjectNode>> ret_val_pass = TitanGraphBuildingUtils.finalEdgeGrouping(key2, v2, dup_mutable_edges); assertEquals(0, ret_val_pass.size()); } tx.commit(); }
From source file:org.wso2.extension.siddhi.store.rdbms.RDBMSEventTable.java
/** * Method for creating a table on the data store in question, if it does not exist already. * * @param storeAnnotation the "@Store" annotation that contains the connection properties. * @param primaryKeys the unique keys that should be set for the table. * @param indices the DB indices that should be set for the table. *//*from ww w .j a v a 2 s . c o m*/ private void createTable(Annotation storeAnnotation, Annotation primaryKeys, Annotation indices) { StringBuilder builder = new StringBuilder(); List<Element> primaryKeyList = (primaryKeys == null) ? new ArrayList<>() : primaryKeys.getElements(); List<Element> indexElementList = (indices == null) ? new ArrayList<>() : indices.getElements(); List<String> queries = new ArrayList<>(); Map<String, String> fieldLengths = RDBMSTableUtils .processFieldLengths(storeAnnotation.getElement(ANNOTATION_ELEMENT_FIELD_LENGTHS)); this.validateFieldLengths(fieldLengths); this.attributes.forEach(attribute -> { builder.append(attribute.getName()).append(WHITESPACE); switch (attribute.getType()) { case BOOL: builder.append(booleanType); break; case DOUBLE: builder.append(doubleType); break; case FLOAT: builder.append(floatType); break; case INT: builder.append(integerType); break; case LONG: builder.append(longType); break; case OBJECT: builder.append(binaryType); break; case STRING: String fieldLengthAsString = fieldLengths.getOrDefault(attribute.getName(), stringSize); int fieldLength = fieldLengthAsString != null ? Integer.parseInt(fieldLengthAsString) : 0; if (fieldLength > fieldSizeLimit && bigStringType != null) { builder.append(bigStringType); } else { builder.append(stringType); if (null != stringSize) { builder.append(OPEN_PARENTHESIS); builder.append(fieldLengths.getOrDefault(attribute.getName(), stringSize)); builder.append(CLOSE_PARENTHESIS); } } break; } if (this.queryConfigurationEntry.isKeyExplicitNotNull()) { builder.append(WHITESPACE).append(SQL_NOT_NULL); } if (this.attributes.indexOf(attribute) != this.attributes.size() - 1 || !primaryKeyList.isEmpty()) { builder.append(SEPARATOR); } }); if (primaryKeyList != null && !primaryKeyList.isEmpty()) { builder.append(SQL_PRIMARY_KEY_DEF).append(OPEN_PARENTHESIS) .append(RDBMSTableUtils.flattenAnnotatedElements(primaryKeyList)).append(CLOSE_PARENTHESIS); } queries.add(createQuery.replace(PLACEHOLDER_COLUMNS_FOR_CREATE, builder.toString())); if (indexElementList != null && !indexElementList.isEmpty()) { queries.add(indexQuery.replace(PLACEHOLDER_INDEX, RDBMSTableUtils.flattenAnnotatedElements(indexElementList))); } try { // Setting autocommit to true if the JDBC connection does not support transactions. this.executeDDQueries(queries, !this.transactionSupported); if (log.isDebugEnabled()) { log.debug("Table '" + this.tableName + "' created."); } } catch (SQLException e) { throw new RDBMSTableException("Unable to initialize table '" + this.tableName + "': " + e.getMessage(), e); } }
From source file:net.maritimecloud.identityregistry.utils.KeycloakAdminUtil.java
private void createIdpMappers(String idpName, Map<String, String> idpAtrMap, String orgMrn) { String providerType = idpAtrMap.get("providerType"); IdentityProviderResource newIdpRes = getBrokerRealm().identityProviders().get(idpName); // Delete any existing mapper for (IdentityProviderMapperRepresentation mapper : newIdpRes.getMappers()) { newIdpRes.delete(mapper.getId()); }//from w ww .j a v a 2 s.com // Create mapper for hardcoded org value String orgMapperName = "org mapper"; IdentityProviderMapperRepresentation orgMapper = new IdentityProviderMapperRepresentation(); orgMapper.setIdentityProviderAlias(idpName); orgMapper.setIdentityProviderMapper("hardcoded-attribute-idp-mapper"); orgMapper.setName(orgMapperName); Map<String, String> orgMapperConf = new HashMap<>(); orgMapperConf.put("attribute.value", orgMrn); orgMapperConf.put("attribute", "org"); orgMapper.setConfig(orgMapperConf); newIdpRes.addMapper(orgMapper); // Create username mapper String usernameMapperName = "username mapper"; IdentityProviderMapperRepresentation usernameMapper = new IdentityProviderMapperRepresentation(); usernameMapper.setIdentityProviderAlias(idpName); usernameMapper.setName(usernameMapperName); Map<String, String> usernameMapperConf = new HashMap<>(); String mrnPrefix = MrnUtil.getMrnPrefix(orgMrn); if ("oidc".equals(providerType)) { // Create OIDC specific mapper usernameMapper.setIdentityProviderMapper("oidc-username-idp-mapper"); // Import username to an mrn in the form: urn:mrn:mcl:user:<org-id>:<user-id> usernameMapperConf.put("template", mrnPrefix + ":user:${ALIAS}:${CLAIM." + idpAtrMap.getOrDefault("usernameAttr", "preferred_username") + "}"); } else { usernameMapper.setIdentityProviderMapper("saml-username-idp-mapper"); // Import username to an mrn in the form: urn:mrn:mcl:user:<org-id>:<user-id> usernameMapperConf.put("template", mrnPrefix + ":user:${ALIAS}:${" + idpAtrMap.getOrDefault("usernameAttr", "NAMEID") + "}"); } usernameMapper.setConfig(usernameMapperConf); newIdpRes.addMapper(usernameMapper); // Add other mappers as needed // The mappers are set up differently based on the provider type Map<String, String> defaultMappers; String mapperConfKey; if ("oidc".equals(providerType)) { defaultMappers = oidcDefaultMappers; mapperConfKey = "claim"; } else { defaultMappers = samlDefaultMappers; mapperConfKey = "attribute.name"; } String mapperType = providerType + "-user-attribute-idp-mapper"; for (Map.Entry<String, String> entry : defaultMappers.entrySet()) { String attrName = attrNames2Keycloak.get(entry.getKey()); String attrValue = idpAtrMap.getOrDefault(entry.getKey(), entry.getValue()); // Skip creating this mapper if no value is defined if (attrValue == null) { continue; } String attrMapperName = attrName + " mapper"; IdentityProviderMapperRepresentation mapper = new IdentityProviderMapperRepresentation(); mapper.setIdentityProviderAlias(idpName); mapper.setIdentityProviderMapper(mapperType); mapper.setName(attrMapperName); Map<String, String> mapperConf = new HashMap<>(); mapperConf.put(mapperConfKey, attrValue); mapperConf.put("user.attribute", attrName); mapper.setConfig(mapperConf); newIdpRes.addMapper(mapper); } }
From source file:org.opencb.opencga.storage.core.variant.adaptors.VariantDBAdaptorTest.java
@Test public void testGetAllVariants_proteinKeywords() { //ANNOT_PROTEIN_KEYWORDS Query query;// ww w . j ava 2s . c om Map<String, Integer> keywords = new HashMap<>(); int combinedKeywordsOr = 0; int combinedKeywordsAnd = 0; int combinedKeywordsAndNot = 0; for (Variant variant : allVariants.getResult()) { Set<String> keywordsInVariant = new HashSet<>(); if (variant.getAnnotation().getConsequenceTypes() != null) { for (ConsequenceType consequenceType : variant.getAnnotation().getConsequenceTypes()) { if (consequenceType.getProteinVariantAnnotation() != null && consequenceType.getProteinVariantAnnotation().getKeywords() != null) { keywordsInVariant.addAll(consequenceType.getProteinVariantAnnotation().getKeywords()); } } } for (String flag : keywordsInVariant) { keywords.put(flag, keywords.getOrDefault(flag, 0) + 1); } if (keywordsInVariant.contains("Complete proteome") || keywordsInVariant.contains("Transmembrane helix")) { combinedKeywordsOr++; } if (keywordsInVariant.contains("Complete proteome") && keywordsInVariant.contains("Transmembrane helix")) { combinedKeywordsAnd++; } if (keywordsInVariant.contains("Complete proteome") && !keywordsInVariant.contains("Transmembrane helix")) { combinedKeywordsAndNot++; } } assertTrue(combinedKeywordsOr > 0); assertTrue(combinedKeywordsAnd > 0); assertTrue(combinedKeywordsAndNot > 0); query = new Query(ANNOT_PROTEIN_KEYWORDS.key(), "Complete proteome,Transmembrane helix"); assertEquals(combinedKeywordsOr, dbAdaptor.count(query).first().intValue()); query = new Query(ANNOT_PROTEIN_KEYWORDS.key(), "Complete proteome;Transmembrane helix"); assertEquals(combinedKeywordsAnd, dbAdaptor.count(query).first().intValue()); query = new Query(ANNOT_PROTEIN_KEYWORDS.key(), "Complete proteome;!Transmembrane helix"); assertEquals(combinedKeywordsAndNot, dbAdaptor.count(query).first().intValue()); for (Map.Entry<String, Integer> entry : keywords.entrySet()) { System.out.println(entry); query = new Query(ANNOT_PROTEIN_KEYWORDS.key(), entry.getKey()); queryResult = dbAdaptor.get(query, null); assertEquals(entry.getValue().intValue(), queryResult.getNumResults()); } }
From source file:org.opencb.opencga.storage.core.variant.adaptors.VariantDBAdaptorTest.java
@Test public void testGetAllVariants_geneTraits() { //ANNOT_GENE_TRAITS_ID //ANNOT_GENE_TRAITS_NAME Query query;/*w w w. java 2 s. c om*/ Map<String, Integer> idsMap = new HashMap<>(); Map<String, Integer> namesMap = new HashMap<>(); Map<String, Integer> hposMap = new HashMap<>(); for (Variant variant : allVariants.getResult()) { Set<String> ids = new HashSet<>(); Set<String> names = new HashSet<>(); Set<String> hpos = new HashSet<>(); if (variant.getAnnotation().getGeneTraitAssociation() != null) { for (GeneTraitAssociation geneTrait : variant.getAnnotation().getGeneTraitAssociation()) { ids.add(geneTrait.getId()); names.add(geneTrait.getName()); if (StringUtils.isNotEmpty(geneTrait.getHpo())) { hpos.add(geneTrait.getHpo()); } } } for (String id : ids) { idsMap.put(id, idsMap.getOrDefault(id, 0) + 1); } for (String name : names) { namesMap.put(name, namesMap.getOrDefault(name, 0) + 1); } for (String hpo : hpos) { hposMap.put(hpo, hposMap.getOrDefault(hpo, 0) + 1); } } System.out.println(idsMap.size()); System.out.println(namesMap.size()); System.out.println(hposMap.size()); // for (Map.Entry<String, Integer> entry : namesMap.entrySet()) { // query = new Query(VariantDBAdaptor.VariantQueryParams.ANNOT_GENE_TRAITS_NAME.key(), "~="+entry.getKey()); // queryResult = dbAdaptor.get(query, null); // assertEquals(entry.getKey(), entry.getValue().intValue(), queryResult.getNumResults()); // } int i = 0; for (Map.Entry<String, Integer> entry : idsMap.entrySet()) { query = new Query(ANNOT_GENE_TRAITS_ID.key(), entry.getKey()); queryResult = dbAdaptor.get(query, null); assertEquals(entry.getValue().intValue(), queryResult.getNumResults()); if (i++ == 400) { break; } } i = 0; for (Map.Entry<String, Integer> entry : hposMap.entrySet()) { query = new Query(ANNOT_HPO.key(), entry.getKey()); queryResult = dbAdaptor.get(query, null); assertEquals(entry.getKey(), entry.getValue().intValue(), queryResult.getNumResults()); if (i++ == 400) { break; } } }
From source file:org.apache.storm.localizer.AsyncLocalizer.java
@VisibleForTesting AsyncLocalizer(Map<String, Object> conf, AdvancedFSOps ops, String baseDir, AtomicReference<Map<Long, LocalAssignment>> currAssignment, Map<Integer, LocalAssignment> portToAssignments) throws IOException { this.conf = conf; isLocalMode = ConfigUtils.isLocalMode(conf); fsOps = ops;//from www. j av a 2s. c o m localBaseDir = baseDir; // default cache size 10GB, converted to Bytes cacheTargetSize = ObjectReader .getInt(conf.get(DaemonConfig.SUPERVISOR_LOCALIZER_CACHE_TARGET_SIZE_MB), 10 * 1024) .longValue() << 20; // default 30 seconds. (we cache the size so it is cheap to do) cacheCleanupPeriod = ObjectReader .getInt(conf.get(DaemonConfig.SUPERVISOR_LOCALIZER_CACHE_CLEANUP_INTERVAL_MS), 30 * 1000) .longValue(); // if we needed we could make config for update thread pool size int threadPoolSize = ObjectReader.getInt(conf.get(DaemonConfig.SUPERVISOR_BLOBSTORE_DOWNLOAD_THREAD_COUNT), 5); blobDownloadRetries = ObjectReader.getInt(conf.get(DaemonConfig.SUPERVISOR_BLOBSTORE_DOWNLOAD_MAX_RETRIES), 3); execService = Executors.newScheduledThreadPool(threadPoolSize, new ThreadFactoryBuilder().setNameFormat("AsyncLocalizer Executor - %d").build()); reconstructLocalizedResources(); symlinksDisabled = (boolean) conf.getOrDefault(Config.DISABLE_SYMLINKS, false); blobPending = new HashMap<>(); this.currAssignment = currAssignment; recoverBlobReferences(portToAssignments); }
From source file:org.mskcc.cbio.portal.servlet.CancerStudyView.java
/** * This method builds the response for the servlet request. * @param request/* w w w . ja v a2 s. c o m*/ * @return boolean value( whether success or not) * @throws DaoException * @throws JsonProcessingException * @throws IOException */ private boolean buildResponse(HttpServletRequest request) throws DaoException, JsonProcessingException, IOException { Map<String, HashSet<String>> inputStudyMap = getStudyIds(request); Set<VirtualStudy> studies = getProcessedStudyMap(inputStudyMap); Map<String, Set<String>> studySampleMap = new HashMap<String, Set<String>>(); UserDetails ud = accessControl.getUserDetails(); if (ud != null) { LOG.info("CancerStudyView.validate: Query initiated by user: " + ud.getUsername() + " : Study(s): " + inputStudyMap.keySet()); } Set<String> knowIds = studies.stream().map(obj -> obj.getId()).collect(Collectors.toSet()); //add unknow input ids Set<String> unKnownIds = inputStudyMap.keySet().stream().filter(id -> !knowIds.contains(id)) .collect(Collectors.toSet()); //add unauthorized ids unKnownIds.addAll(studies.stream().filter(obj -> { if (inputStudyMap.containsKey(obj.getId())) { for (VirtualStudySamples _study : obj.getData().getStudies()) { try { if (accessControl.isAccessibleCancerStudy(_study.getId()).size() != 1) return true; } catch (Exception e) { return true; } } return false; } else { return true; } }).map(obj -> obj.getId()).collect(Collectors.toSet())); if (unKnownIds.size() > 0) { request.setAttribute(ERROR, "Unknown/Unauthorized studies in: " + StringUtils.join(unKnownIds, ",") + "."); return false; } //prepare sample response map studies.stream().forEach(data -> { for (VirtualStudySamples _study : data.getData().getStudies()) { Set<String> sampleIdsToAdd = _study.getSamples(); if (sampleIdsToAdd == null) { SampleList sampleList; try { sampleList = daoSampleList.getSampleListByStableId(_study.getId() + "_all"); } catch (DaoException e) { throw new RuntimeException(e); } sampleIdsToAdd = new HashSet<String>(sampleList.getSampleList()); } Set<String> updatedSampleList = studySampleMap.getOrDefault(_study.getId(), new HashSet<>()); updatedSampleList.addAll(sampleIdsToAdd); studySampleMap.put(_study.getId(), updatedSampleList); } }); ObjectMapper mapper = new ObjectMapper(); String studySampleMapString = mapper.writeValueAsString(studySampleMap); request.setAttribute(STUDY_SAMPLE_MAP, studySampleMapString); request.setAttribute(ID, inputStudyMap.keySet()); return true; }
From source file:org.openecomp.sdc.asdctool.impl.migration.v1604.ServiceMigration.java
private boolean fixDerivedVf() { Map<String, Object> props = new HashMap<String, Object>(); props.put(GraphPropertiesDictionary.RESOURCE_TYPE.getProperty(), ResourceTypeEnum.VF.name()); Either<List<ResourceMetadataData>, TitanOperationStatus> allVF = titanGenericDao .getByCriteria(NodeTypeEnum.Resource, props, ResourceMetadataData.class); if (allVF.isRight()) { if (allVF.right().value().equals(TitanOperationStatus.NOT_FOUND)) { log.debug("fixDerivedVf - no VFs"); return true; }//from w w w . j a v a 2s. c om log.debug("fixDerivedFv failed fetch all VF resources,error {}", allVF.right().value()); return false; } Map<String, String> vfUuidToVfcUuid = new HashMap<String, String>(); for (ResourceMetadataData metadata : allVF.left().value()) { Either<Resource, StorageOperationStatus> eitherResource = resourceOperation .getResource(metadata.getMetadataDataDefinition().getUniqueId(), true); if (eitherResource.isRight()) { log.error("failed to migrate VF {} from version 1602 to version 1604. error is: {}", metadata.getMetadataDataDefinition().getUniqueId(), eitherResource.right().value().name()); return false; } Resource vfResource = eitherResource.left().value(); if (vfResource.getDerivedFrom() == null || vfResource.getDerivedFrom().isEmpty()) { continue; } Boolean isVfDeleted = vfResource.getIsDeleted(); String vfUUID = vfResource.getUUID(); String vfcUUID = vfUuidToVfcUuid.getOrDefault(vfUUID, null); if (vfcUUID == null) { vfcUUID = UUID.randomUUID().toString(); vfUuidToVfcUuid.put(vfUUID, vfcUUID); } // handle lifecycle String vfUniqueId = vfResource.getUniqueId(); LifecycleStateEnum vfcTargetState = vfResource.getLifecycleState(); if (vfcTargetState.equals(LifecycleStateEnum.READY_FOR_CERTIFICATION) || vfcTargetState.equals(LifecycleStateEnum.CERTIFICATION_IN_PROGRESS)) { User user = new User(); user.setUserId(vfResource.getLastUpdaterUserId()); Either<? extends Component, StorageOperationStatus> checkinComponent = lifecycleOperaion .checkinComponent(NodeTypeEnum.Resource, vfResource, user, user, true); if (checkinComponent.isRight()) { log.error("failed to checkin VF {}. error={}", vfUniqueId, checkinComponent.right().value().name()); return false; } } else if (vfcTargetState.equals(LifecycleStateEnum.NOT_CERTIFIED_CHECKOUT)) { vfcTargetState = LifecycleStateEnum.NOT_CERTIFIED_CHECKIN; } // delete VF Properties List<PropertyDefinition> properties = vfResource.getProperties(); if (properties != null && !properties.isEmpty()) { Either<Map<String, PropertyDefinition>, StorageOperationStatus> deleteAllProperties = propertyOperation .deleteAllPropertiesAssociatedToNode(NodeTypeEnum.Resource, vfUniqueId); if (deleteAllProperties.isRight() && !deleteAllProperties.right().value().equals(StorageOperationStatus.NOT_FOUND) && !deleteAllProperties.right().value().equals(StorageOperationStatus.OK)) { log.error("failed to delete properties of VF {} . error is: {}", metadata.getMetadataDataDefinition().getUniqueId(), deleteAllProperties.right().value().name()); return false; } } // delete VF Additional Info List<AdditionalInformationDefinition> additionalInformation = vfResource.getAdditionalInformation(); if (additionalInformation != null && !additionalInformation.isEmpty()) { Either<AdditionalInformationDefinition, StorageOperationStatus> deleteAllAdditionalInformationParameters = additionalInformationOperation .deleteAllAdditionalInformationParameters(NodeTypeEnum.Resource, vfUniqueId, true); if (deleteAllAdditionalInformationParameters.isRight() && !deleteAllAdditionalInformationParameters.right().value() .equals(StorageOperationStatus.OK) && !deleteAllAdditionalInformationParameters.right().value() .equals(StorageOperationStatus.NOT_FOUND)) { log.error("failed to delete properties of VF {} . error is: {}", metadata.getMetadataDataDefinition().getUniqueId(), deleteAllAdditionalInformationParameters.right().value().name()); return false; } } // delete VF derivedFrom GraphRelation derivedFromRelation = new GraphRelation(GraphEdgeLabels.DERIVED_FROM.getProperty()); derivedFromRelation.setFrom(new RelationEndPoint(NodeTypeEnum.Resource, UniqueIdBuilder.getKeyByNodeType(NodeTypeEnum.Resource), vfUniqueId)); Either<GraphRelation, TitanOperationStatus> deleteDerivedFromRelation = titanGenericDao .deleteOutgoingRelation(derivedFromRelation); if (deleteDerivedFromRelation.isRight()) { log.error("failed to delete derivedFrom relation of VF {} . error is: {}", metadata.getMetadataDataDefinition().getUniqueId(), deleteDerivedFromRelation.right().value().name()); return false; } // create VFC Either<Resource, StorageOperationStatus> createVFC = createVFC(metadata, vfResource, vfcUUID, vfcTargetState); if (createVFC.isRight()) { log.error("failed to split VF {} to VFC. error is: {}", metadata.getMetadataDataDefinition().getUniqueId(), createVFC.right().value().name()); return false; } Resource vfcResource = createVFC.left().value(); if (!createVfcInstanceOnVf(vfcResource, vfUniqueId)) { return false; } // update VFC to deleted if required if (isVfDeleted != null && isVfDeleted) { Either<Component, StorageOperationStatus> markResourceToDelete = resourceOperation .markComponentToDelete(vfcResource, true); if (markResourceToDelete.isRight()) { log.error("failed to mark isDeleted on VFC {} . error is: {}", vfcResource.getUniqueId(), markResourceToDelete.right().value().name()); return false; } } } return true; }
From source file:org.opennms.features.topology.plugins.topo.linkd.internal.EnhancedLinkdTopologyProvider.java
private void getLldpLinks(Map<Integer, OnmsNode> nodemap, Map<Integer, List<OnmsSnmpInterface>> nodesnmpmap, Map<Integer, OnmsIpInterface> ipprimarymap) { // Index the nodes by sysName final Map<String, OnmsNode> nodesbysysname = new HashMap<>(); for (OnmsNode node : nodemap.values()) { if (node.getSysName() != null) { nodesbysysname.putIfAbsent(node.getSysName(), node); }// w w w . j a v a 2s . c o m } // Index the LLDP elements by node id Map<Integer, LldpElement> lldpelementmap = new HashMap<Integer, LldpElement>(); for (LldpElement lldpelement : m_lldpElementDao.findAll()) { lldpelementmap.put(lldpelement.getNode().getId(), lldpelement); } // Pull all of the LLDP links and index them by remote chassis id List<LldpLink> allLinks = m_lldpLinkDao.findAll(); Map<String, List<LldpLink>> linksByRemoteChassisId = new HashMap<>(); for (LldpLink link : allLinks) { final String remoteChassisId = link.getLldpRemChassisId(); List<LldpLink> linksWithRemoteChassisId = linksByRemoteChassisId.get(remoteChassisId); if (linksWithRemoteChassisId == null) { linksWithRemoteChassisId = new ArrayList<>(); linksByRemoteChassisId.put(remoteChassisId, linksWithRemoteChassisId); } linksWithRemoteChassisId.add(link); } Set<LldpLinkDetail> combinedLinkDetails = new HashSet<LldpLinkDetail>(); Set<Integer> parsed = new HashSet<Integer>(); for (LldpLink sourceLink : allLinks) { if (parsed.contains(sourceLink.getId())) { continue; } LOG.debug("loadtopology: lldp link with id '{}' link '{}' ", sourceLink.getId(), sourceLink); LldpElement sourceLldpElement = lldpelementmap.get(sourceLink.getNode().getId()); LldpLink targetLink = null; // Limit the candidate links by only choosing those have a remote chassis id matching the chassis id of the source link for (LldpLink link : linksByRemoteChassisId.getOrDefault(sourceLldpElement.getLldpChassisId(), Collections.emptyList())) { if (parsed.contains(link.getId())) { continue; } if (sourceLink.getId().intValue() == link.getId().intValue()) { continue; } LOG.debug("loadtopology: checking lldp link with id '{}' link '{}' ", link.getId(), link); LldpElement element = lldpelementmap.get(link.getNode().getId()); // Compare the chassis id on the other end of the link if (!sourceLink.getLldpRemChassisId().equals(element.getLldpChassisId())) { continue; } boolean bool1 = sourceLink.getLldpRemPortId().equals(link.getLldpPortId()) && link.getLldpRemPortId().equals(sourceLink.getLldpPortId()); boolean bool3 = sourceLink.getLldpRemPortIdSubType() == link.getLldpPortIdSubType() && link.getLldpRemPortIdSubType() == sourceLink.getLldpPortIdSubType(); if (bool1 && bool3) { targetLink = link; LOG.info("loadtopology: found lldp mutual link: '{}' and '{}' ", sourceLink, targetLink); break; } } if (targetLink == null && sourceLink.getLldpRemSysname() != null) { final OnmsNode node = nodesbysysname.get(sourceLink.getLldpRemSysname()); if (node != null) { targetLink = reverseLldpLink(node, sourceLldpElement, sourceLink); LOG.info("loadtopology: found lldp link using lldp rem sysname: '{}' and '{}'", sourceLink, targetLink); } } if (targetLink == null) { LOG.info("loadtopology: cannot found target node for link: '{}'", sourceLink); continue; } parsed.add(sourceLink.getId()); parsed.add(targetLink.getId()); Vertex source = getOrCreateVertex(nodemap.get(sourceLink.getNode().getId()), ipprimarymap.get(sourceLink.getNode().getId())); Vertex target = getOrCreateVertex(nodemap.get(targetLink.getNode().getId()), ipprimarymap.get(targetLink.getNode().getId())); combinedLinkDetails.add(new LldpLinkDetail( Math.min(sourceLink.getId(), targetLink.getId()) + "|" + Math.max(sourceLink.getId(), targetLink.getId()), source, sourceLink, target, targetLink)); } for (LldpLinkDetail linkDetail : combinedLinkDetails) { LinkdEdge edge = connectVertices(linkDetail, LLDP_EDGE_NAMESPACE); edge.setTooltipText(getEdgeTooltipText(linkDetail, nodesnmpmap)); } }
From source file:org.apache.gobblin.publisher.BaseDataPublisher.java
private void mergeMetadataAndCollectPartitionNames(Collection<? extends WorkUnitState> states, Set<String> partitionPaths) { for (WorkUnitState workUnitState : states) { // First extract the partition paths and metrics from the work unit. This is essentially // equivalent to grouping FsWriterMetrics by {partitionKey, branchId} and extracting // all partitionPaths into a set. Map<PartitionIdentifier, Set<FsWriterMetrics>> metricsByPartition = new HashMap<>(); boolean partitionFound = false; for (Map.Entry<Object, Object> property : workUnitState.getProperties().entrySet()) { if (((String) property.getKey()).startsWith(ConfigurationKeys.WRITER_PARTITION_PATH_KEY)) { partitionPaths.add((String) property.getValue()); partitionFound = true;//from ww w . ja v a 2 s .c o m } else if (((String) property.getKey()).startsWith(FsDataWriter.FS_WRITER_METRICS_KEY)) { try { FsWriterMetrics parsedMetrics = FsWriterMetrics.fromJson((String) property.getValue()); partitionPaths.add(parsedMetrics.getPartitionInfo().getPartitionKey()); Set<FsWriterMetrics> metricsForPartition = metricsByPartition .computeIfAbsent(parsedMetrics.getPartitionInfo(), k -> new HashSet<>()); metricsForPartition.add(parsedMetrics); } catch (IOException e) { LOG.warn("Error parsing metrics from property {} - ignoring", (String) property.getValue()); } } } // no specific partitions - add null as a placeholder if (!partitionFound) { partitionPaths.add(null); } final String configBasedMetadata = getMetadataFromWorkUnitState(workUnitState); // Now update all metadata mergers with branch metadata + partition metrics for (int branchId = 0; branchId < numBranches; branchId++) { for (String partition : partitionPaths) { PartitionIdentifier partitionIdentifier = new PartitionIdentifier(partition, branchId); final int branch = branchId; MetadataMerger<String> mdMerger = metadataMergers.computeIfAbsent(partitionIdentifier, k -> buildMetadataMergerForBranch(configBasedMetadata, branch, getMetadataOutputFileForBranch(workUnitState, branch))); if (shouldPublishWriterMetadataForBranch(branchId)) { String md = getIntermediateMetadataFromState(workUnitState, branchId); mdMerger.update(md); Set<FsWriterMetrics> metricsForPartition = metricsByPartition .getOrDefault(partitionIdentifier, Collections.emptySet()); for (FsWriterMetrics metrics : metricsForPartition) { mdMerger.update(metrics); } } } } } }