/** * @param primaryStoragePoolNameLabel * The primary storage Pool * @param snapshotUuid * The UUID of the snapshot which is going to be backed up * @param secondaryStoragePoolURL * This is what shows up in the UI when you click on Secondary * storage. In the code, it is present as: In the * vmops.host_details table, there is a field mount.parent. This * is the value of that field If you have better ideas on how to * get it, you are welcome. */ public SnapshotCommand(final StoragePool pool, final String secondaryStorageUrl, final String snapshotUuid, final String snapshotName, final Long dcId, final Long accountId, final Long volumeId) { primaryStoragePoolNameLabel = pool.getUuid(); primaryPool = new StorageFilerTO(pool); this.snapshotUuid = snapshotUuid; this.secondaryStorageUrl = secondaryStorageUrl; this.dcId = dcId; this.accountId = accountId; this.volumeId = volumeId; this.snapshotName = snapshotName; }
private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPool) { final Long threshold = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value(); if (threshold != null && threshold > 0) { long count = _jobMgr.countPendingJobs("\"storageid\":\"" + destPool.getUuid() + "\"", MigrateVMCmd.class.getName(), MigrateVolumeCmd.class.getName(), MigrateVolumeCmdByAdmin.class.getName()); if (count > threshold) { throw new CloudRuntimeException("Number of concurrent migration jobs per datastore exceeded the threshold: " + threshold.toString() + ". Please try again after some time."); } } }
private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPool) { final Long threshold = VolumeApiService.ConcurrentMigrationsThresholdPerDatastore.value(); if (threshold != null && threshold > 0) { long count = _jobMgr.countPendingJobs("\"storageid\":\"" + destPool.getUuid() + "\"", MigrateVMCmd.class.getName(), MigrateVolumeCmd.class.getName(), MigrateVolumeCmdByAdmin.class.getName()); if (count > threshold) { throw new CloudRuntimeException("Number of concurrent migration jobs per datastore exceeded the threshold: " + threshold.toString() + ". Please try again after some time."); } } }
private void deleteElastistorVolume(StoragePool pool, boolean managed) { String poolid = pool.getUuid(); boolean status; try { status = ElastistorUtil.deleteElastistorTsm(poolid, managed); } catch (Throwable e) { throw new CloudRuntimeException("Failed to delete primary storage on elastistor" + e); } if (status == true) { s_logger.info("deletion of elastistor primary storage complete"); } else { s_logger.error("deletion of elastistor volume failed"); } }
public CreatePrivateTemplateFromVolumeCommand(StoragePool pool, String secondaryStorageUrl, long templateId, long accountId, String userSpecifiedName, String uniqueName, String volumePath, String vmName, int wait) { _secondaryStorageUrl = secondaryStorageUrl; _templateId = templateId; _accountId = accountId; _userSpecifiedName = userSpecifiedName; _uniqueName = uniqueName; _volumePath = volumePath; _vmName = vmName; primaryStoragePoolNameLabel = pool.getUuid(); _primaryPool = new StorageFilerTO(pool); setWait(wait); }
public PrimaryStorageDownloadCommand(final String name, final String url, final ImageFormat format, final long accountId, final StoragePool pool, final int wait) { super(name, url, format, accountId); poolId = pool.getId(); poolUuid = pool.getUuid(); primaryPool = new StorageFilerTO(pool); setWait(wait); }
/** * We will add a mapping of volume to storage pool if needed. The conditions to add a mapping are the following: * <ul> * <li> The candidate storage pool where the volume is to be allocated can be accessed by the target host * <li> If no storage pool is found to allocate the volume we throw an exception. * </ul> * * Side note: this method should only be called if the volume is on local storage or if we are executing a cross cluster migration. */ protected void createVolumeToStoragePoolMappingIfPossible(VirtualMachineProfile profile, Host targetHost, Map<Volume, StoragePool> volumeToPoolObjectMap, Volume volume, StoragePoolVO currentPool) { List<StoragePool> storagePoolList = getCandidateStoragePoolsToMigrateLocalVolume(profile, targetHost, volume); if (CollectionUtils.isEmpty(storagePoolList)) { throw new CloudRuntimeException(String.format("There is not storage pools available at the target host [%s] to migrate volume [%s]", targetHost.getUuid(), volume.getUuid())); } Collections.shuffle(storagePoolList); boolean canTargetHostAccessVolumeCurrentStoragePool = false; for (StoragePool storagePool : storagePoolList) { if (storagePool.getId() == currentPool.getId()) { canTargetHostAccessVolumeCurrentStoragePool = true; break; } } if (!canTargetHostAccessVolumeCurrentStoragePool) { volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(storagePoolList.get(0).getUuid())); } }
private void afterHypervisorMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { boolean isDebugEnabled = s_logger.isDebugEnabled(); if(isDebugEnabled) { String msg = String.format("cleaning up after hypervisor pool migration volumes for VM %s(%s) to pool %s(%s)", vm.getInstanceName(), vm.getUuid(), destPool.getName(), destPool.getUuid()); s_logger.debug(msg); } setDestinationPoolAndReallocateNetwork(destPool, vm); // OfflineVmwareMigration: don't set this to null or have another way to address the command; twice migrating will lead to an NPE Long destPodId = destPool.getPodId(); Long vmPodId = vm.getPodIdToDeployIn(); if (destPodId == null || ! destPodId.equals(vmPodId)) { if(isDebugEnabled) { String msg = String.format("resetting lasHost for VM %s(%s) as pod (%s) is no good.", vm.getInstanceName(), vm.getUuid(), destPodId); s_logger.debug(msg); } vm.setLastHostId(null); vm.setPodIdToDeployIn(destPodId); // OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod) }// else keep last host set for this vm markVolumesInPool(vm,destPool, hypervisorMigrationResults); // OfflineVmwareMigration: deal with answers, if (hypervisorMigrationResults.length > 0) // OfflineVmwareMigration: iterate over the volumes for data updates }
public VmWorkMigrate(long userId, long accountId, long vmId, String handlerName, long srcHostId, DeployDestination dst) { super(userId, accountId, vmId, handlerName); this.srcHostId = srcHostId; zoneId = dst.getDataCenter() != null ? dst.getDataCenter().getId() : null; podId = dst.getPod() != null ? dst.getPod().getId() : null; clusterId = dst.getCluster() != null ? dst.getCluster().getId() : null; hostId = dst.getHost() != null ? dst.getHost().getId() : null; if (dst.getStorageForDisks() != null) { storage = new HashMap<String, String>(dst.getStorageForDisks().size()); for (Map.Entry<Volume, StoragePool> entry : dst.getStorageForDisks().entrySet()) { storage.put(entry.getKey().getUuid(), entry.getValue().getUuid()); } } else { storage = null; } }
public StorageFilerTO(StoragePool pool) { this.id = pool.getId(); this.host = pool.getHostAddress(); this.port = pool.getPort(); this.path = pool.getPath(); this.type = pool.getPoolType(); this.uuid = pool.getUuid(); this.userInfo = pool.getUserInfo(); }
@Override public void execute() { Pair<List<? extends StoragePool>, List<? extends StoragePool>> pools = _mgr.listStoragePoolsForMigrationOfVolume(getId()); ListResponse<StoragePoolResponse> response = new ListResponse<StoragePoolResponse>(); List<StoragePoolResponse> poolResponses = new ArrayList<StoragePoolResponse>(); List<? extends StoragePool> allPools = pools.first(); List<? extends StoragePool> suitablePoolList = pools.second(); for (StoragePool pool : allPools) { StoragePoolResponse poolResponse = _responseGenerator.createStoragePoolForMigrationResponse(pool); Boolean suitableForMigration = false; for (StoragePool suitablePool : suitablePoolList) { if (StringUtils.equals(suitablePool.getUuid(), pool.getUuid())) { suitableForMigration = true; break; } } poolResponse.setSuitableForMigration(suitableForMigration); poolResponse.setObjectName("storagepool"); poolResponses.add(poolResponse); } sortPoolsBySuitabilityAndName(poolResponses); response.setResponses(poolResponses); response.setResponseName(getCommandName()); this.setResponseObject(response); }
public DestroyCommand(StoragePool pool, VMTemplateStorageResourceAssoc templatePoolRef) { volume = new VolumeTO(templatePoolRef.getId(), null, pool.getPoolType(), pool.getUuid(), null, pool.getPath(), templatePoolRef.getInstallPath(), templatePoolRef.getTemplateSize(), null); }
/** * Performs the validations required for replacing the disk offering while migrating the volume of storage. If no new disk offering is provided, we do not execute any validation. * If a disk offering is informed, we then proceed with the following checks. * <ul> * <li>We check if the given volume is of ROOT type. We cannot change the disk offering of a ROOT volume. Therefore, we thrown an {@link InvalidParameterValueException}; * <li>We the disk is being migrated to shared storage and the new disk offering is for local storage (or vice versa), we throw an {@link InvalidParameterValueException}. Bear in mind that we are validating only the new disk offering. If none is provided we can override the current disk offering. This means, placing a volume with shared disk offering in local storage and vice versa; * <li>We then proceed checking the target storage pool supports the new disk offering {@link #doesTargetStorageSupportNewDiskOffering(StoragePool, DiskOfferingVO)}. * </ul> * * If all of the above validations pass, we check if the size of the new disk offering is different from the volume. If it is, we log a warning message. */ protected void validateConditionsToReplaceDiskOfferingOfVolume(VolumeVO volume, DiskOfferingVO newDiskOffering, StoragePool destPool) { if (newDiskOffering == null) { return; } if ((destPool.isShared() && newDiskOffering.isUseLocalStorage()) || destPool.isLocal() && newDiskOffering.isShared()) { throw new InvalidParameterValueException("You cannot move the volume to a shared storage and assing a disk offering for local storage and vice versa."); } if (!doesTargetStorageSupportDiskOffering(destPool, newDiskOffering)) { throw new InvalidParameterValueException(String.format("Target Storage [id=%s] tags [%s] does not match new disk offering [id=%s] tags [%s].", destPool.getUuid(), getStoragePoolTags(destPool), newDiskOffering.getUuid(), newDiskOffering.getTags())); } if (volume.getSize() != newDiskOffering.getDiskSize()) { DiskOfferingVO oldDiskOffering = this._diskOfferingDao.findById(volume.getDiskOfferingId()); s_logger.warn(String.format( "You are migrating a volume [id=%s] and changing the disk offering[from id=%s to id=%s] to reflect this migration. However, the sizes of the volume and the new disk offering are different.", volume.getUuid(), oldDiskOffering.getUuid(), newDiskOffering.getUuid())); } s_logger.info(String.format("Changing disk offering to [uuid=%s] while migrating volume [uuid=%s, name=%s].", newDiskOffering.getUuid(), volume.getUuid(), volume.getName())); }
volumeToStorageUuid.add(new Pair<>(volumeTo, ((StoragePool)entry.getValue()).getUuid()));
public VolumeTO(Volume volume, StoragePool pool) { this.id = volume.getId(); this.name = volume.getName(); this.path = volume.getPath(); this.size = volume.getSize(); this.type = volume.getVolumeType(); this.storagePoolType = pool.getPoolType(); this.storagePoolUuid = pool.getUuid(); this.mountPoint = volume.getFolder(); this.chainInfo = volume.getChainInfo(); this.chainSize = volume.getVmSnapshotChainSize(); if (volume.getDeviceId() != null) this.deviceId = volume.getDeviceId(); }