List of usage examples for java.util Map containsValue
boolean containsValue(Object value);
From source file:services.object.ObjectService.java
public void readBuildoutDatatable(DatatableVisitor buildoutTable, Planet planet, float x1, float z1) throws InstantiationException, IllegalAccessException { CrcStringTableVisitor crcTable = ClientFileManager.loadFile("misc/object_template_crc_string_table.iff", CrcStringTableVisitor.class); String planetName = planet.getName(); Map<Long, Long> duplicate = new HashMap<Long, Long>(); for (int i = 0; i < buildoutTable.getRowCount(); i++) { String template;/*w ww . j a va2 s .c o m*/ if (buildoutTable.getColumnCount() <= 11) template = crcTable.getTemplateString((Integer) buildoutTable.getObject(i, 0)); else template = crcTable.getTemplateString((Integer) buildoutTable.getObject(i, 3)); if (template != null) { float px, py, pz, qw, qx, qy, qz, radius; long objectId = 0, containerId = 0, objectId2 = 0, containerId2 = 0; int type = 0, cellIndex = 0, portalCRC; if (buildoutTable.getColumnCount() <= 11) { objectId2 = (((Integer) buildoutTable.getObject(i, 0) == 0) ? 0 : Delta.createBuffer(8).putInt((Integer) buildoutTable.getObject(i, 0)) .putInt(0xF986FFFF).flip().getLong()); cellIndex = (Integer) buildoutTable.getObject(i, 1); px = (Float) buildoutTable.getObject(i, 2); py = (Float) buildoutTable.getObject(i, 3); pz = (Float) buildoutTable.getObject(i, 4); qw = (Float) buildoutTable.getObject(i, 5); qx = (Float) buildoutTable.getObject(i, 6); qy = (Float) buildoutTable.getObject(i, 7); qz = (Float) buildoutTable.getObject(i, 8); radius = (Float) buildoutTable.getObject(i, 9); portalCRC = (Integer) buildoutTable.getObject(i, 10); } else { // Since the ids are just ints, they append 0xFFFF86F9 to them // This is demonstated in the packet sent to the server when you /target client-spawned objects objectId = (((Integer) buildoutTable.getObject(i, 0) == 0) ? 0 : Delta.createBuffer(8).putInt((Integer) buildoutTable.getObject(i, 0)) .putInt(0xF986FFFF).flip().getLong()); objectId2 = Long.valueOf((Integer) buildoutTable.getObjectByColumnNameAndIndex("objid", i)); containerId = (((Integer) buildoutTable.getObject(i, 1) == 0) ? 0 : Delta.createBuffer(8).putInt((Integer) buildoutTable.getObject(i, 1)) .putInt(0xF986FFFF).flip().getLong()); containerId2 = Long .valueOf((Integer) buildoutTable.getObjectByColumnNameAndIndex("container", i)); type = (Integer) buildoutTable.getObject(i, 2); cellIndex = (Integer) buildoutTable.getObject(i, 4); px = (Float) buildoutTable.getObject(i, 5); py = (Float) buildoutTable.getObject(i, 6); pz = (Float) buildoutTable.getObject(i, 7); qw = (Float) buildoutTable.getObject(i, 8); qx = (Float) buildoutTable.getObject(i, 9); qy = (Float) buildoutTable.getObject(i, 10); qz = (Float) buildoutTable.getObject(i, 11); radius = (Float) buildoutTable.getObject(i, 12); portalCRC = (Integer) buildoutTable.getObject(i, 13); } // Treeku - Refactored to work around duplicate objectIds // Required for instances/heroics which are duplicated ie. 10 times //if(!template.equals("object/cell/shared_cell.iff") && objectId != 0 && getObject(objectId) != null) { if (!template.equals("object/cell/shared_cell.iff") && objectId != 0 && checkIfObjectAlreadyInList(objectId)) { SWGObject object = getObject(objectId); // Same coordinates is a true duplicate if ((px + ((containerId == 0) ? 0 : x1)) == object.getPosition().x && py == object.getPosition().y && (pz + ((containerId == 0) ? 0 : z1)) == object.getPosition().z) { //System.out.println("Duplicate buildout object: " + template); continue; } } if (duplicate.containsKey(containerId)) { containerId = duplicate.get(containerId); } // TODO needs to a way to work for mustafar and kashyyyk which both have instances //if (objectId != 0 && getObject(objectId) != null && (planetName.contains("dungeon") || planetName.contains("adventure"))) { if (objectId != 0 && checkIfObjectAlreadyInList(objectId) && (planetName.contains("dungeon") || planetName.contains("adventure"))) { SWGObject container = getObject(containerId); float x = (px + ((container == null) ? x1 : container.getPosition().x)); float z = (pz + ((container == null) ? z1 : container.getPosition().z)); String key = "" + CRC.StringtoCRC(planet.getName()) + CRC.StringtoCRC(template) + type + containerId + cellIndex + x + py + z; long newObjectId = 0; if (core.getDuplicateIdODB().contains(key)) { newObjectId = ((DuplicateId) core.getDuplicateIdODB().get(key)).getObjectId(); } else { newObjectId = generateObjectID(); core.getDuplicateIdODB().put(key, new DuplicateId(key, newObjectId)); } duplicate.put(objectId, newObjectId); objectId = newObjectId; } List<Long> containers = new ArrayList<Long>(); SWGObject object; if (objectId != 0 && containerId == 0) { if (portalCRC != 0) { // Is building //if (core.getSWGObjectODB().contains(objectId) && !duplicate.containsValue(objectId)){ if (core.getSWGObjectODB().contains(objectId)) { if (buildoutDEBUG) System.err.println("core.getSWGObjectODB().contains(objectId)" + template + " " + Long.toHexString(objectId)); continue; } if (duplicate.containsValue(objectId)) { if (buildoutDEBUG) System.err.println("duplicate.containsValue(objectId)" + template + " " + Long.toHexString(objectId)); continue; } if (checkIfObjectAlreadyInList(objectId)) { if (buildoutDEBUG) System.err.println("checkIfObjectAlreadyInList(objectId) " + template + " " + Long.toHexString(objectId)); continue; } containers.add(objectId); object = createObject(template, objectId, planet, new Point3D(px + x1, py, pz + z1), new Quaternion(qw, qx, qy, qz), null, true, true); object.setAttachment("childObjects", null); // must use the objectListId to identify the building later with cellMap.get(containerId) buildingMap.put(objectId, ((BuildingObject) object)); //System.out.println("buildingMap put " + Long.toHexString(objectId)); /*if (!duplicate.containsValue(objectId)) { ((BuildingObject) object).createTransaction(core.getBuildingODB().getEnvironment()); core.getBuildingODB().put((BuildingObject) object, Long.class, BuildingObject.class, ((BuildingObject) object).getTransaction()); ((BuildingObject) object).getTransaction().commitSync(); }*/ } else { // building without portal, Seems to never happen object = createObject(template, 0, planet, new Point3D(px + x1, py, pz + z1), new Quaternion(qw, qx, qy, qz), null, false, true); } if (object == null) { //System.err.println("Buildout table contained an entry that can't be instantiated!"); continue; } object.setContainerPermissions(WorldPermissions.WORLD_PERMISSIONS); if (radius > 256) object.setAttachment("bigSpawnRange", new Boolean(true)); if (!duplicate.containsValue(objectId) && object instanceof BuildingObject && portalCRC != 0) { synchronized (persistentBuildings) { persistentBuildings.add((BuildingObject) object); } } } else if (containerId != 0) { object = createObject(template, 0, planet, new Point3D(px, py, pz), new Quaternion(qw, qx, qy, qz), null, false, true); if (containers.contains(containerId)) { object.setContainerPermissions(WorldPermissions.WORLD_PERMISSIONS); object.setisInSnapshot(false); //containers.add(objectId); // ?!?!?! } if (object instanceof CellObject && cellIndex != 0) { object.setContainerPermissions(WorldCellPermissions.WORLD_CELL_PERMISSIONS); ((CellObject) object).setCellNumber(cellIndex); List<CellObject> cellList = cellMap.get(containerId); //System.out.println("Cell containerId " + Long.toHexString(containerId)); if (cellList != null) { cellList.add(((CellObject) object)); cellMap.put(containerId, cellList); } else { cellList = new ArrayList<CellObject>(); cellList.add(((CellObject) object)); cellMap.put(containerId, cellList); } } // SWGObject parent = getObject(containerId); // // if(parent != null && object != null) { // if(parent instanceof BuildingObject && ((BuildingObject) parent).getCellByCellNumber(cellIndex) != null) // continue; // parent.add(object); // } } else { object = createObject(template, 0, planet, new Point3D(px + x1, py, pz + z1), new Quaternion(qw, qx, qy, qz), null, false, true); object.setContainerPermissions(WorldPermissions.WORLD_PERMISSIONS); } if (object != null && object instanceof TangibleObject && !(object instanceof CreatureObject)) { ((TangibleObject) object).setStaticObject(true); } //System.out.println("Spawning: " + template + " at: X:" + object.getPosition().x + " Y: " + object.getPosition().y + " Z: " + object.getPosition().z); if (object != null) object.setAttachment("isBuildout", new Boolean(true)); } } // for(BuildingObject building : persistentBuildings) { // building.setAttachment("buildoutBuilding", true); // core.getSWGObjectODB().put(building.getObjectID(), building); // destroyObject(building); // } }
From source file:com.mss.mirage.employee.general.EmployeeAction.java
public String teamQuaterAppraisalSearch() { resultType = LOGIN;/*from w ww.j a v a 2 s .c o m*/ if (httpServletRequest.getSession(false) != null && httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.SESSION_USER_ID) != null) { userRoleId = Integer.parseInt(httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.SESSION_ROLE_ID).toString()); // String workingCountry = httpServletRequest.getSession(false).getAttribute(ApplicationConstants.WORKING_COUNTRY).toString(); String empId = httpServletRequest.getSession(false).getAttribute(ApplicationConstants.SESSION_EMP_ID) .toString(); String loginId1 = httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.SESSION_USER_ID).toString(); if (AuthorizationManager.getInstance().isAuthorizedUser("QUARTERLY_APPRAISAL", userRoleId)) { try { GregorianCalendar cal = new GregorianCalendar(); int year = cal.get(Calendar.YEAR); if (getYear() == 0) { setYear(year); } Map rolesMap = (Map) httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.SESSION_MY_ROLES); // queryString = "SELECT tblQuarterlyAppraisals.Id,EmpId,AppraisalId,CONCAT(fname,' ',mName,'.',lName) AS empName,DATE_FORMAT(tblQuarterlyAppraisals.CreatedDate,'%M') AS mnth, YEAR(tblQuarterlyAppraisals.CreatedDate) AS yer,tblQuarterlyAppraisals.CreatedDate,Quarterly,STATUS,SubmittedDate,ApprovedDate,OpperationTeamStatus,CONCAT(emp2.fname,'.',emp2.lName) AS approvedBy FROM tblQuarterlyAppraisals LEFT JOIN tblEmployee emp1 ON(tblQuarterlyAppraisals.EmpId=emp1.Id) LEFT JOIN tblEmployee emp2 ON(tblQuarterlyAppraisals.ApprovedBy=emp2.LoginId) where year(tblQuarterlyAppraisals.CreatedDate)=" + getYear(); queryString = "SELECT * FROM vwQuarterlyAppraisalsList where year(CreatedDate)=" + getYear(); Map myTeamMemebrs = new HashMap(); // System.out.println("httpServletRequest.getSession(false).getAttribute(ApplicationConstants.SESSION_ROLE_NAME).toString()===" + httpServletRequest.getSession(false).getAttribute(ApplicationConstants.SESSION_ROLE_NAME).toString()); if (httpServletRequest.getSession(false).getAttribute(ApplicationConstants.SESSION_ROLE_NAME) .toString().equalsIgnoreCase("Employee")) { if (loginId1.equals("rkalaga")) { // myTeamMemebrs = DataSourceDataProvider.getInstance().getAllEmployees(); myTeamMemebrs = DataSourceDataProvider.getInstance().getAllEmployeesByCountry("India"); } else { myTeamMemebrs = (Map) httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.SESSION_MY_TEAM_MAP); } } else if (httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.SESSION_ROLE_NAME).toString() .equalsIgnoreCase("Operations")) { String department = httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.SESSION_MY_DEPT_ID).toString(); int isManager = Integer.parseInt(httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.SESSION_IS_USER_MANAGER).toString()); String access[] = Properties.getProperty("QuarterlyAppraisal.Access") .split(Pattern.quote(",")); List accessList = Arrays.asList(access); if (accessList.contains(loginId1) || (department.equals("Operations") && isManager == 1) || rolesMap.containsValue("Admin")) { setAccessCount(1); } if (getAccessCount() == 1) { // myTeamMemebrs = DataSourceDataProvider.getInstance().getAllEmployees(); myTeamMemebrs = DataSourceDataProvider.getInstance().getAllEmployeesByCountry("India"); } else { myTeamMemebrs = DataSourceDataProvider.getInstance().getInstance() .getEmployeeNamesByOperationsContactId((httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.SESSION_EMP_ID).toString())); } // myTeamMemebrs = (Map) httpServletRequest.getSession(false).getAttribute(ApplicationConstants.SESSION_MY_TEAM_MAP); } String teamList = DataSourceDataProvider.getInstance().getTeamLoginIdList(myTeamMemebrs); setMyTeamMembers(myTeamMemebrs); if (loginId != null && !"".equals(loginId)) { queryString = queryString + " AND loginId ='" + loginId + "'"; } else { if (!"".equals(teamList)) { queryString = queryString + " AND loginId IN(" + teamList + ")"; } else { queryString = queryString + " AND loginId IN('')"; } } if (getQuarterly() != null && !"".equals(quarterly)) { queryString = queryString + " AND Quarterly='" + getQuarterly() + "' "; } if (status != null && !"".equals(status)) { queryString = queryString + " AND status='" + status + "' "; } if (departmentId != null && !"".equals(departmentId)) { queryString = queryString + " AND DepartmentId='" + departmentId + "' "; } if (practiceId != null && !"".equals(practiceId) && !"All".equals(practiceId)) { queryString = queryString + " AND Practice='" + practiceId + "' "; } if (subPractice != null && !"".equals(subPractice) && !"All".equals(subPractice)) { queryString = queryString + " AND SubPractice='" + subPractice + "' "; } if (opsContactId != null && !"".equals(opsContactId)) { queryString = queryString + " AND OpsContactId=" + opsContactId + " "; } if (location != null && !"".equals(location)) { queryString = queryString + " AND Location='" + location + "' "; } if (titleId != null && !"".equals(titleId)) { String ManagerList = Properties.getProperty("Management.Quarter"); if (titleId.equals("Management")) { queryString = queryString + " AND FIND_IN_SET(Title,'" + ManagerList + "') "; } else { queryString = queryString + " AND !FIND_IN_SET(Title,'" + ManagerList + "') "; } } /* if (httpServletRequest.getSession(false).getAttribute(ApplicationConstants.SESSION_ROLE_NAME).toString().equalsIgnoreCase("Operations") || loginId1.equals("rkalaga")) { queryString = queryString + " AND STATUS NOT IN ('Submitted','Entered') "; }else{ queryString = queryString + " AND STATUS NOT IN ('Entered') "; }*/ // queryString=queryString+" ORDER BY yer,Quarterly"; String Country = (String) httpServletRequest.getSession(false) .getAttribute(ApplicationConstants.Living_COUNTRY); if (rolesMap.containsValue("Admin")) { setOpsContactIdMap(DataSourceDataProvider.getInstance().getOpsContactId(Country, "Yes")); } else { setOpsContactIdMap(DataSourceDataProvider.getInstance().getOpsContactId(Country, "No")); } if (rolesMap.containsValue("Admin")) { setLocationsMap(DataSourceDataProvider.getInstance().getEmployeeLocationsList("%")); } else { setLocationsMap(DataSourceDataProvider.getInstance().getEmployeeLocationsList(Country)); } httpServletRequest.getSession(false).setAttribute(ApplicationConstants.QS_EMP_APPRAISAL_LIST, queryString); resultType = SUCCESS; } catch (Exception ex) { ex.printStackTrace(); //List errorMsgList = ExceptionToListUtility.errorMessages(ex); httpServletRequest.getSession(false).setAttribute("errorMessage", ex.toString()); resultType = ERROR; } } //END-Authorization Checking } //Close Session Checking return resultType; }
From source file:com.cloud.hypervisor.vmware.resource.VmwareResource.java
protected Answer execute(MigrateWithStorageCommand cmd) { if (s_logger.isInfoEnabled()) { s_logger.info("Executing resource MigrateWithStorageCommand: " + _gson.toJson(cmd)); }/*from w w w . j a va 2 s . c o m*/ VirtualMachineTO vmTo = cmd.getVirtualMachine(); String vmName = vmTo.getName(); VmwareHypervisorHost srcHyperHost = null; VmwareHypervisorHost tgtHyperHost = null; VirtualMachineMO vmMo = null; ManagedObjectReference morDsAtTarget = null; ManagedObjectReference morDsAtSource = null; ManagedObjectReference morDc = null; ManagedObjectReference morDcOfTargetHost = null; ManagedObjectReference morTgtHost = new ManagedObjectReference(); ManagedObjectReference morTgtDatastore = new ManagedObjectReference(); VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); List<VirtualMachineRelocateSpecDiskLocator> diskLocators = new ArrayList<VirtualMachineRelocateSpecDiskLocator>(); VirtualMachineRelocateSpecDiskLocator diskLocator = null; String tgtDsName = ""; String tgtDsHost; String tgtDsPath; int tgtDsPort; VolumeTO volume; StorageFilerTO filerTo; Set<String> mountedDatastoresAtSource = new HashSet<String>(); List<VolumeObjectTO> volumeToList = new ArrayList<VolumeObjectTO>(); Map<Long, Integer> volumeDeviceKey = new HashMap<Long, Integer>(); List<Pair<VolumeTO, StorageFilerTO>> volToFiler = cmd.getVolumeToFilerAsList(); String tgtHost = cmd.getTargetHost(); String tgtHostMorInfo = tgtHost.split("@")[0]; morTgtHost.setType(tgtHostMorInfo.split(":")[0]); morTgtHost.setValue(tgtHostMorInfo.split(":")[1]); try { srcHyperHost = getHyperHost(getServiceContext()); tgtHyperHost = new HostMO(getServiceContext(), morTgtHost); morDc = srcHyperHost.getHyperHostDatacenter(); morDcOfTargetHost = tgtHyperHost.getHyperHostDatacenter(); if (!morDc.getValue().equalsIgnoreCase(morDcOfTargetHost.getValue())) { String msg = "Source host & target host are in different datacentesr"; throw new CloudRuntimeException(msg); } VmwareManager mgr = tgtHyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); String srcHostApiVersion = ((HostMO) srcHyperHost).getHostAboutInfo().getApiVersion(); // find VM through datacenter (VM is not at the target host yet) vmMo = srcHyperHost.findVmOnPeerHyperHost(vmName); if (vmMo == null) { String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue(); s_logger.error(msg); throw new Exception(msg); } vmName = vmMo.getName(); // Specify destination datastore location for each volume for (Pair<VolumeTO, StorageFilerTO> entry : volToFiler) { volume = entry.first(); filerTo = entry.second(); s_logger.debug("Preparing spec for volume : " + volume.getName()); morDsAtTarget = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(tgtHyperHost, filerTo.getUuid()); morDsAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, filerTo.getUuid()); if (morDsAtTarget == null) { String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " on target host: " + tgtHyperHost.getHyperHostName() + " to execute MigrateWithStorageCommand"; s_logger.error(msg); throw new Exception(msg); } morTgtDatastore = morDsAtTarget; // If host version is below 5.1 then simultaneous change of VM's datastore and host is not supported. // So since only the datastore will be changed first, ensure the target datastore is mounted on source host. if (srcHostApiVersion.compareTo("5.1") < 0) { tgtDsName = filerTo.getUuid().replace("-", ""); tgtDsHost = filerTo.getHost(); tgtDsPath = filerTo.getPath(); tgtDsPort = filerTo.getPort(); // If datastore is NFS and target datastore is not already mounted on source host then mount the datastore. if (filerTo.getType().equals(StoragePoolType.NetworkFilesystem)) { if (morDsAtSource == null) { morDsAtSource = srcHyperHost.mountDatastore(false, tgtDsHost, tgtDsPort, tgtDsPath, tgtDsName); if (morDsAtSource == null) { throw new Exception("Unable to mount NFS datastore " + tgtDsHost + ":/" + tgtDsPath + " on " + _hostName); } mountedDatastoresAtSource.add(tgtDsName); s_logger.debug( "Mounted datastore " + tgtDsHost + ":/" + tgtDsPath + " on " + _hostName); } } // If datastore is VMFS and target datastore is not mounted or accessible to source host then fail migration. if (filerTo.getType().equals(StoragePoolType.VMFS)) { if (morDsAtSource == null) { s_logger.warn( "If host version is below 5.1, then target VMFS datastore(s) need to manually mounted on source host for a successful live storage migration."); throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not mounted on source host: " + _hostName); } DatastoreMO dsAtSourceMo = new DatastoreMO(getServiceContext(), morDsAtSource); String srcHostValue = srcHyperHost.getMor().getValue(); if (!dsAtSourceMo.isAccessibleToHost(srcHostValue)) { s_logger.warn( "If host version is below 5.1, then target VMFS datastore(s) need to accessible to source host for a successful live storage migration."); throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not accessible on source host: " + _hostName); } } morTgtDatastore = morDsAtSource; } if (volume.getType() == Volume.Type.ROOT) { relocateSpec.setDatastore(morTgtDatastore); } diskLocator = new VirtualMachineRelocateSpecDiskLocator(); diskLocator.setDatastore(morDsAtSource); Pair<VirtualDisk, String> diskInfo = getVirtualDiskInfo(vmMo, volume.getPath() + ".vmdk"); String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first()); if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) { vmMo.updateAdapterTypeIfRequired(vmdkAbsFile); } int diskId = diskInfo.first().getKey(); diskLocator.setDiskId(diskId); diskLocators.add(diskLocator); volumeDeviceKey.put(volume.getId(), diskId); } // If a target datastore is provided for the VM, then by default all volumes associated with the VM will be migrated to that target datastore. // Hence set the existing datastore as target datastore for volumes that are not to be migrated. List<Pair<Integer, ManagedObjectReference>> diskDatastores = vmMo.getAllDiskDatastores(); for (Pair<Integer, ManagedObjectReference> diskDatastore : diskDatastores) { if (!volumeDeviceKey.containsValue(diskDatastore.first().intValue())) { diskLocator = new VirtualMachineRelocateSpecDiskLocator(); diskLocator.setDiskId(diskDatastore.first().intValue()); diskLocator.setDatastore(diskDatastore.second()); diskLocators.add(diskLocator); } } relocateSpec.getDisk().addAll(diskLocators); // Prepare network at target before migration NicTO[] nics = vmTo.getNics(); for (NicTO nic : nics) { // prepare network on the host prepareNetworkFromNicInfo(new HostMO(getServiceContext(), morTgtHost), nic, false, vmTo.getType()); } // Ensure secondary storage mounted on target host Pair<String, Long> secStoreUrlAndId = mgr.getSecondaryStorageStoreUrlAndId(Long.parseLong(_dcId)); String secStoreUrl = secStoreUrlAndId.first(); Long secStoreId = secStoreUrlAndId.second(); if (secStoreUrl == null) { String msg = "secondary storage for dc " + _dcId + " is not ready yet?"; throw new Exception(msg); } mgr.prepareSecondaryStorageStore(secStoreUrl, secStoreId); ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnSpecificHost(secStoreUrl, tgtHyperHost); if (morSecDs == null) { String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl; throw new Exception(msg); } if (srcHostApiVersion.compareTo("5.1") < 0) { // Migrate VM's volumes to target datastore(s). if (!vmMo.changeDatastore(relocateSpec)) { throw new Exception("Change datastore operation failed during storage migration"); } else { s_logger.debug("Successfully migrated storage of VM " + vmName + " to target datastore(s)"); } // Migrate VM to target host. ManagedObjectReference morPool = tgtHyperHost.getHyperHostOwnerResourcePool(); if (!vmMo.migrate(morPool, tgtHyperHost.getMor())) { throw new Exception("VM migration to target host failed during storage migration"); } else { s_logger.debug("Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName()); } } else { // Simultaneously migrate VM's volumes to target datastore and VM to target host. relocateSpec.setHost(tgtHyperHost.getMor()); relocateSpec.setPool(tgtHyperHost.getHyperHostOwnerResourcePool()); if (!vmMo.changeDatastore(relocateSpec)) { throw new Exception("Change datastore operation failed during storage migration"); } else { s_logger.debug("Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName() + " and its storage to target datastore(s)"); } } // Consolidate VM disks. // In case of a linked clone VM, if VM's disks are not consolidated, further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies. if (!vmMo.consolidateVmDisks()) { s_logger.warn( "VM disk consolidation failed after storage migration. Yet proceeding with VM migration."); } else { s_logger.debug("Successfully consolidated disks of VM " + vmName + "."); } // Update and return volume path and chain info for every disk because that could have changed after migration VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); for (Pair<VolumeTO, StorageFilerTO> entry : volToFiler) { volume = entry.first(); long volumeId = volume.getId(); VirtualDisk[] disks = vmMo.getAllDiskDevice(); for (VirtualDisk disk : disks) { if (volumeDeviceKey.get(volumeId) == disk.getKey()) { VolumeObjectTO newVol = new VolumeObjectTO(); String newPath = vmMo.getVmdkFileBaseName(disk); String poolName = entry.second().getUuid().replace("-", ""); VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolName); newVol.setId(volumeId); newVol.setPath(newPath); newVol.setChainInfo(_gson.toJson(diskInfo)); volumeToList.add(newVol); break; } } } return new MigrateWithStorageAnswer(cmd, volumeToList); } catch (Throwable e) { if (e instanceof RemoteException) { s_logger.warn("Encountered remote exception at vCenter, invalidating VMware session context"); invalidateServiceContext(); } String msg = "MigrationCommand failed due to " + VmwareHelper.getExceptionMessage(e); s_logger.warn(msg, e); return new MigrateWithStorageAnswer(cmd, (Exception) e); } finally { // Cleanup datastores mounted on source host for (String mountedDatastore : mountedDatastoresAtSource) { s_logger.debug("Attempting to unmount datastore " + mountedDatastore + " at " + _hostName); try { srcHyperHost.unmountDatastore(mountedDatastore); } catch (Exception unmountEx) { s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName + ". Seems the datastore is still being used by " + _hostName + ". Please unmount manually to cleanup."); } s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + _hostName); } } }
From source file:io.warp10.continuum.gts.GTSHelper.java
public static GTSEncoder parse(GTSEncoder encoder, String str, Map<String, String> extraLabels, Long now, long maxValueSize, boolean parseAttributes) throws ParseException, IOException { int idx = 0;//from ww w. java2 s . co m int tsoffset = 0; if ('=' == str.charAt(0)) { if (null == encoder) { throw new ParseException("Invalid continuation.", 0); } tsoffset = 1; } //idx = str.indexOf("/"); idx = UnsafeString.indexOf(str, '/'); if (-1 == idx) { throw new ParseException("Missing timestamp separator.", idx); } long timestamp; if (tsoffset == idx) { // No timestamp provided, use 'now' timestamp = null != now ? (long) now : TimeSource.getTime(); } else { if ('T' == str.charAt(tsoffset)) { // Support T-XXX to record timestamps which are relative to 'now', useful for // devices with no time reference but only relative timestamps //timestamp = (null != now ? (long) now : TimeSource.getTime()) + Long.valueOf(str.substring(1 + tsoffset, idx)); timestamp = (null != now ? (long) now : TimeSource.getTime()) + Long.parseLong(str.substring(1 + tsoffset, idx)); } else { //timestamp = Long.valueOf(str.substring(tsoffset, idx)); timestamp = Long.parseLong(str.substring(tsoffset, idx)); } } // Advance past the '/' idx++; //int idx2 = str.indexOf("/", idx); int idx2 = UnsafeString.indexOf(str, '/', idx); if (-1 == idx2) { throw new ParseException("Missing location/elevation separator.", idx); } long location = GeoTimeSerie.NO_LOCATION; if (idx != idx2) { // We have a location (lat:lon) String latlon = str.substring(idx, idx2); // Advance past the second '/' idx = idx2 + 1; //idx2 = latlon.indexOf(":"); idx2 = UnsafeString.indexOf(latlon, ':'); //location = GeoXPLib.toGeoXPPoint(Double.valueOf(latlon.substring(0, idx2)), Double.valueOf(latlon.substring(idx2 + 1))); location = GeoXPLib.toGeoXPPoint(Double.parseDouble(latlon.substring(0, idx2)), Double.parseDouble(latlon.substring(idx2 + 1))); } else { // Advance past the second '/' idx = idx2 + 1; } //idx2 = str.indexOf(" ", idx); idx2 = UnsafeString.indexOf(str, ' ', idx); if (-1 == idx2) { throw new ParseException(str, idx); } long elevation = GeoTimeSerie.NO_ELEVATION; if (idx != idx2) { // We have an elevation //elevation = Long.valueOf(str.substring(idx, idx2)); elevation = Long.parseLong(str.substring(idx, idx2)); } // Advance past the ' ' idx = idx2 + 1; while (idx < str.length() && str.charAt(idx) == ' ') { idx++; } // If line started with '=', assume there is no class+labels component if (tsoffset > 0) { idx2 = -1; } else { //idx2 = str.indexOf("{", idx); idx2 = UnsafeString.indexOf(str, '{', idx); } String name = null; Map<String, String> labels = null; Map<String, String> attributes = null; boolean reuseLabels = false; if (-1 == idx2) { // If we are over the end of the string, we're missing a value if (idx >= str.length()) { throw new ParseException("Missing value", idx); } // No class+labels, assume same class+labels as those in encoder, except // if encoder is null in which case we throw a parse exception if (null == encoder) { throw new ParseException(str, idx); } name = encoder.getMetadata().getName(); labels = encoder.getMetadata().getLabels(); reuseLabels = true; } else { name = str.substring(idx, idx2); //if (name.contains("%")) { if (-1 != UnsafeString.indexOf(name, '%')) { try { name = URLDecoder.decode(name, "UTF-8"); } catch (UnsupportedEncodingException uee) { // Can't happen, we're using UTF-8 } } // Advance past the '{' idx = idx2 + 1; //idx2 = str.indexOf("}", idx); idx2 = UnsafeString.indexOf(str, '}', idx); if (-1 == idx2) { throw new ParseException(str, idx); } // // Parse labels // labels = parseLabels(null != extraLabels ? extraLabels.size() : 0, str.substring(idx, idx2)); // // FIXME(hbs): parse attributes???? // // Advance past the '}' and over spaces idx = idx2 + 1; // FIXME(hbs): should we skip over attributes if they are present? if (idx < str.length() && str.charAt(idx) == '{') { idx++; int attrstart = idx; while (idx < str.length() && str.charAt(idx) != '}') { idx++; } if (parseAttributes) { if (idx >= str.length()) { throw new ParseException("Missing attributes.", idx2); } attributes = parseLabels(str.substring(attrstart, idx)); } idx++; } while (idx < str.length() && str.charAt(idx) == ' ') { idx++; } if (idx >= str.length()) { throw new ParseException("Missing value.", idx2); } } // // Add any provided extra labels // // WARNING(hbs): as we check reuseLabels, note that extraLabels won't be pushed onto the GTS labels // if reuseLabels is 'true', this means that if you call parse on a continuation line with different extraLabels // than for previous lines, the new extra labels won't be set. But this is not something that should be done anyway, // so it should not be considered a problem... // if (!reuseLabels && null != extraLabels) { labels.putAll(extraLabels); // // Remove labels with null values // // FIXME(hbs): may be removed when dummy tokens have disappeared // if (extraLabels.containsValue(null)) { Set<Entry<String, String>> entries = extraLabels.entrySet(); while (labels.containsValue(null)) { for (Entry<String, String> entry : entries) { if (null == entry.getValue()) { labels.remove(entry.getKey()); } } } } } // // Extract value // String valuestr = str.substring(idx); if (valuestr.length() > maxValueSize) { throw new ParseException("Value too large at for GTS " + (null != encoder ? GTSHelper.buildSelector(encoder.getMetadata()) : ""), 0); } Object value = parseValue(valuestr); if (null == value) { throw new ParseException("Unable to parse value '" + valuestr + "'", 0); } // Allocate a new Encoder if need be, with a base timestamp of 0L. if (null == encoder || !name.equals(encoder.getName()) || !labels.equals(encoder.getMetadata().getLabels())) { encoder = new GTSEncoder(0L); encoder.setName(name); //encoder.setLabels(labels); encoder.getMetadata().setLabels(labels); if (null != attributes) { encoder.getMetadata().setAttributes(attributes); } } encoder.addValue(timestamp, location, elevation, value); return encoder; }