@Override public Scope getScope() { StoragePoolVO vo = dataStoreDao.findById(pdsv.getId()); if (vo.getScope() == ScopeType.CLUSTER) { return new ClusterScope(vo.getClusterId(), vo.getPodId(), vo.getDataCenterId()); } else if (vo.getScope() == ScopeType.ZONE) { return new ZoneScope(vo.getDataCenterId()); } else if (vo.getScope() == ScopeType.HOST) { List<StoragePoolHostVO> poolHosts = poolHostDao.listByPoolId(vo.getId()); if (poolHosts.size() > 0) { return new HostScope(poolHosts.get(0).getHostId(), vo.getClusterId(), vo.getDataCenterId()); } s_logger.debug("can't find a local storage in pool host table: " + vo.getId()); } return null; }
private List<Map<String, String>> getVolumesToDisconnect(VirtualMachine vm) { List<Map<String, String>> volumesToDisconnect = new ArrayList<>(); List<VolumeVO> volumes = _volsDao.findByInstance(vm.getId()); if (CollectionUtils.isEmpty(volumes)) { return volumesToDisconnect; } for (VolumeVO volume : volumes) { StoragePoolVO storagePool = _storagePoolDao.findById(volume.getPoolId()); if (storagePool != null && storagePool.isManaged()) { Map<String, String> info = new HashMap<>(3); info.put(DiskTO.STORAGE_HOST, storagePool.getHostAddress()); info.put(DiskTO.STORAGE_PORT, String.valueOf(storagePool.getPort())); info.put(DiskTO.IQN, volume.get_iScsiName()); volumesToDisconnect.add(info); } } return volumesToDisconnect; }
private ModifyTargetsCommand getModifyTargetsCommand(long storagePoolId, String iqn, boolean add) { StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); Map<String, String> details = new HashMap<>(); details.put(ModifyTargetsCommand.IQN, iqn); details.put(ModifyTargetsCommand.STORAGE_TYPE, storagePool.getPoolType().name()); details.put(ModifyTargetsCommand.STORAGE_UUID, storagePool.getUuid()); details.put(ModifyTargetsCommand.STORAGE_HOST, storagePool.getHostAddress()); details.put(ModifyTargetsCommand.STORAGE_PORT, String.valueOf(storagePool.getPort())); ModifyTargetsCommand cmd = new ModifyTargetsCommand(); List<Map<String, String>> targets = new ArrayList<>(); targets.add(details); cmd.setTargets(targets); cmd.setApplyToAllHostsInCluster(true); cmd.setAdd(add); cmd.setTargetTypeToRemove(ModifyTargetsCommand.TargetTypeToRemove.DYNAMIC); return cmd; }
/** * Executes the managed storage checks for the mapping<volume, storage pool> entered by the user. The checks execute by this method are the following. * <ul> * <li> If the current storage pool of the volume is not a managed storage, we do not need to validate anything here. * <li> If the current storage pool is a managed storage and the target storage pool ID is different from the current one, we throw an exception. * </ul> */ protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) { if (!currentPool.isManaged()) { return; } if (currentPool.getId() == targetPool.getId()) { return; } throw new CloudRuntimeException(String.format("Currently, a volume on managed storage can only be 'migrated' to itself " + "[volumeId=%s, currentStoragePoolId=%s, targetStoragePoolId=%s].", volume.getUuid(), currentPool.getUuid(), targetPool.getUuid())); }
public PrimaryDataStoreDaoImpl() { AllFieldSearch = createSearchBuilder(); AllFieldSearch.and("name", AllFieldSearch.entity().getName(), SearchCriteria.Op.EQ); AllFieldSearch.and("uuid", AllFieldSearch.entity().getUuid(), SearchCriteria.Op.EQ); AllFieldSearch.and("datacenterId", AllFieldSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); AllFieldSearch.and("hostAddress", AllFieldSearch.entity().getHostAddress(), SearchCriteria.Op.EQ); AllFieldSearch.and("status", AllFieldSearch.entity().getStatus(), SearchCriteria.Op.EQ); AllFieldSearch.and("scope", AllFieldSearch.entity().getScope(), SearchCriteria.Op.EQ); AllFieldSearch.and("path", AllFieldSearch.entity().getPath(), SearchCriteria.Op.EQ); AllFieldSearch.and("podId", AllFieldSearch.entity().getPodId(), Op.EQ); AllFieldSearch.and("clusterId", AllFieldSearch.entity().getClusterId(), Op.EQ); AllFieldSearch.and("storage_provider_name", AllFieldSearch.entity().getStorageProviderName(), Op.EQ); AllFieldSearch.done(); DcPodSearch.and("datacenterId", DcPodSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); DcPodSearch.and("status", DcPodSearch.entity().getStatus(), SearchCriteria.Op.EQ); DcPodSearch.and("scope", DcPodSearch.entity().getScope(), SearchCriteria.Op.EQ); DcPodSearch.and().op("nullpod", DcPodSearch.entity().getPodId(), SearchCriteria.Op.NULL); DcPodSearch.or("podId", DcPodSearch.entity().getPodId(), SearchCriteria.Op.EQ); DcPodSearch.cp(); DcPodSearch.and().op("nullcluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.NULL); DcPodSearch.or("cluster", DcPodSearch.entity().getClusterId(), SearchCriteria.Op.EQ); DcPodSearch.cp(); DcPodSearch.done(); DcPodAnyClusterSearch.and("datacenterId", DcPodAnyClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); DcPodAnyClusterSearch.and("status", DcPodAnyClusterSearch.entity().getStatus(), SearchCriteria.Op.EQ); DcPodAnyClusterSearch.and("scope", DcPodAnyClusterSearch.entity().getScope(), SearchCriteria.Op.EQ); DcPodAnyClusterSearch.and().op("nullpod", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.NULL); DcPodAnyClusterSearch.or("podId", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.EQ);
@Override public void createCapacityEntry(StoragePoolVO storagePool, short capacityType, long allocated) { SearchCriteria<CapacityVO> capacitySC = _capacityDao.createSearchCriteria(); capacitySC.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, storagePool.getId()); capacitySC.addAnd("dataCenterId", SearchCriteria.Op.EQ, storagePool.getDataCenterId()); capacitySC.addAnd("capacityType", SearchCriteria.Op.EQ, capacityType); if (storagePool.getPoolType().supportsOverProvisioning()) { BigDecimal overProvFactor = getStorageOverProvisioningFactor(storagePool.getId()); totalOverProvCapacity = overProvFactor.multiply(new BigDecimal(storagePool.getCapacityBytes())).longValue(); s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString() + " with overprovisioning factor " + overProvFactor.toString()); s_logger.debug("Total over provisioned capacity calculated is " + overProvFactor + " * " + storagePool.getCapacityBytes()); } else { s_logger.debug("Found storage pool " + storagePool.getName() + " of type " + storagePool.getPoolType().toString()); totalOverProvCapacity = storagePool.getCapacityBytes(); s_logger.debug("Total over provisioned capacity of the pool " + storagePool.getName() + " id: " + storagePool.getId() + " is " + totalOverProvCapacity); CapacityState capacityState = CapacityState.Enabled; if (storagePool.getScope() == ScopeType.ZONE) { DataCenterVO dc = ApiDBUtils.findZoneById(storagePool.getDataCenterId()); AllocationState allocationState = dc.getAllocationState(); capacityState = (allocationState == AllocationState.Disabled) ? CapacityState.Disabled : CapacityState.Enabled; } else { if (storagePool.getClusterId() != null) { ClusterVO cluster = ApiDBUtils.findClusterById(storagePool.getClusterId()); if (cluster != null) { AllocationState allocationState = _configMgr.findClusterAllocationState(cluster); if (storagePool.getScope() == ScopeType.HOST) {
if (!(storagePool.isManaged())) { super.createAsync(dataStore, dataObject, callback); volume.setPoolType(storagePool.getPoolType()); _volumeDao.update(volume.getId(), volume); long capacityIops = storagePool.getCapacityIops(); capacityIops = capacityIops - Iops; throw new CloudRuntimeException("IOPS not available. [pool:" + storagePool.getName() + "] [availiops:" + capacityIops + "] [requirediops:" + Iops + "]"); } else if (dataStoreVO.getPoolType().equals(StoragePoolType.NetworkFilesystem) || dataStoreVO.getPoolType().equals(StoragePoolType.Filesystem)) { protocoltype = "nfs"; } else { esvolume = ElastistorUtil.createElastistorVolume(volumeName, dataStoreVO.getUuid(), quotaSize, Iops, protocoltype, volumeName); } catch (Throwable e) { s_logger.error(e.toString(), e); long capacityBytes = storagePool.getCapacityBytes(); long usedBytes = storagePool.getUsedBytes(); storagePool.setCapacityIops(capacityIops); storagePool.setUsedBytes(usedBytes > capacityBytes ? capacityBytes : usedBytes);
List<VolumeVO> volumes = _volsDao.findByPoolId(pool.getId(), null); List<String> volumeLocators = new ArrayList<String>(); for (VolumeVO volume : volumes) { if (pool.getScope() == ScopeType.ZONE) { volumeStatsByUuid = new HashMap<>(); for (final Cluster cluster : _clusterDao.listByZoneId(pool.getDataCenterId())) { final Map<String, VolumeStatsEntry> volumeStatsForCluster = _userVmMgr.getVolumeStatistics(cluster.getId(), pool.getUuid(), pool.getPoolType(), volumeLocators, StatsTimeout.value()); if (volumeStatsForCluster != null) { volumeStatsByUuid = _userVmMgr.getVolumeStatistics(pool.getClusterId(), pool.getUuid(), pool.getPoolType(), volumeLocators, StatsTimeout.value()); s_logger.warn("Failed to get volume stats for cluster with ID: " + pool.getClusterId(), e); continue;
if (pool.getScope() == ScopeType.ZONE) { spes = primaryDataStoreDao.listBy(pool.getDataCenterId(), null, null, ScopeType.ZONE); } else { spes = primaryDataStoreDao.listBy(pool.getDataCenterId(), pool.getPodId(), pool.getClusterId(), ScopeType.CLUSTER); if (sp.getStatus() == StoragePoolStatus.PrepareForMaintenance) { throw new CloudRuntimeException("Only one storage pool in a cluster can be in PrepareForMaintenance mode, " + sp.getId() + " is already in PrepareForMaintenance mode "); if (pool.getScope().equals(ScopeType.ZONE)) { if (HypervisorType.Any.equals(pool.getHypervisor())) { hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZone(pool.getDataCenterId()); hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(pool.getHypervisor(), pool.getDataCenterId()); hosts = _resourceMgr.listHostsInClusterByStatus(pool.getClusterId(), Status.Up); pool.setStatus(StoragePoolStatus.Maintenance); primaryDataStoreDao.update(pool.getId(), pool); return true; } else { pool.setStatus(StoragePoolStatus.PrepareForMaintenance); primaryDataStoreDao.update(pool.getId(), pool); List<StoragePoolVO> upPools = primaryDataStoreDao.listByStatusInZone(pool.getDataCenterId(), StoragePoolStatus.Up); boolean restart = true; if (upPools == null || upPools.size() == 0) {
host = _hostDao.findById(hostId); if (host != null && host.getHypervisorType() == HypervisorType.XenServer && volumeToAttachStoragePool != null && volumeToAttachStoragePool.isManaged()) { sendCommand = true; verifyManagedStorage(volumeToAttachStoragePool.getId(), hostId); DataStore dataStore = volumeToAttachStoragePool != null ? dataStoreMgr.getDataStore(volumeToAttachStoragePool.getId(), DataStoreRole.Primary) : null; if (host != null && host.getHypervisorType() == HypervisorType.KVM && volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) { volumeToAttach.setPath(volumeToAttach.get_iScsiName()); details.put(DiskTO.MANAGED, String.valueOf(volumeToAttachStoragePool.isManaged())); details.put(DiskTO.STORAGE_HOST, volumeToAttachStoragePool.getHostAddress()); details.put(DiskTO.STORAGE_PORT, String.valueOf(volumeToAttachStoragePool.getPort())); details.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeToAttach.getSize())); details.put(DiskTO.IQN, volumeToAttach.get_iScsiName()); if (volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) { volumeToAttach.setPath(answer.getDisk().getPath()); if (vm.getHypervisorType() == HypervisorType.KVM && volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) { volumeToAttach.setPath(volumeToAttach.get_iScsiName());
for (StoragePoolVO pool : storagePools) { List<Long> hostIds = _storageManager.getUpHostsInPool(pool.getId()); if (hostIds == null || hostIds.isEmpty()) continue; GetStorageStatsCommand command = new GetStorageStatsCommand(pool.getUuid(), pool.getPoolType(), pool.getPath()); long poolId = pool.getId(); try { Answer answer = _storageManager.sendToPool(pool, command); if (answer != null && answer.getResult()) { storagePoolStats.put(pool.getId(), (StorageStats)answer); pool.setCapacityBytes(((StorageStats)answer).getCapacityBytes()); _storagePoolDao.update(pool.getId(), pool);
private Map<String, String> getDestDetails(StoragePoolVO storagePoolVO, SnapshotInfo snapshotInfo) { Map<String, String> destDetails = new HashMap<>(); destDetails.put(DiskTO.STORAGE_HOST, storagePoolVO.getHostAddress()); destDetails.put(DiskTO.STORAGE_PORT, String.valueOf(storagePoolVO.getPort())); long snapshotId = snapshotInfo.getId(); destDetails.put(DiskTO.IQN, getProperty(snapshotId, DiskTO.IQN)); destDetails.put(DiskTO.CHAP_INITIATOR_USERNAME, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_USERNAME)); destDetails.put(DiskTO.CHAP_INITIATOR_SECRET, getProperty(snapshotId, DiskTO.CHAP_INITIATOR_SECRET)); destDetails.put(DiskTO.CHAP_TARGET_USERNAME, getProperty(snapshotId, DiskTO.CHAP_TARGET_USERNAME)); destDetails.put(DiskTO.CHAP_TARGET_SECRET, getProperty(snapshotId, DiskTO.CHAP_TARGET_SECRET)); return destDetails; }
public DataStore attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId()); if (poolHost == null) { poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath()); storagePoolHostDao.persist(poolHost); } StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); pool.setScope(scope.getScopeType()); pool.setUsedBytes(existingInfo.getCapacityBytes() - existingInfo.getAvailableBytes()); pool.setCapacityBytes(existingInfo.getCapacityBytes()); pool.setStatus(StoragePoolStatus.Up); this.dataStoreDao.update(pool.getId(), pool); this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getUsedBytes()); return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); }
if (pool.getStatus() != StoragePoolStatus.Up) { continue; if (!pool.isShared()) { continue; if (pool.getPoolType() == StoragePoolType.OCFS2 && !_ocfs2Mgr.prepareNodes(pool.getClusterId())) { throw new ConnectionException(true, "Unable to prepare OCFS2 nodes for pool " + pool.getId()); s_logger.debug("Host " + hostId + " connected, connecting host to shared pool id " + pool.getId() + " and sending storage pool information ..."); _storageManager.connectHostToSharedPool(hostId, pool.getId()); _storageManager.createCapacityEntry(pool.getId()); } catch (Exception e) { throw new ConnectionException(true, "Unable to connect host " + hostId + " to storage pool id " + pool.getId() + " due to " + e.toString(), e);
@Override public boolean isLocalStorageActiveOnHost(Long hostId) { List<StoragePoolHostVO> storagePoolHostRefs = _storagePoolHostDao.listByHostId(hostId); for (StoragePoolHostVO storagePoolHostRef : storagePoolHostRefs) { StoragePoolVO PrimaryDataStoreVO = _storagePoolDao.findById(storagePoolHostRef.getPoolId()); if (PrimaryDataStoreVO.getPoolType() == StoragePoolType.LVM || PrimaryDataStoreVO.getPoolType() == StoragePoolType.EXT) { SearchBuilder<VolumeVO> volumeSB = _volsDao.createSearchBuilder(); volumeSB.and("poolId", volumeSB.entity().getPoolId(), SearchCriteria.Op.EQ); volumeSB.and("removed", volumeSB.entity().getRemoved(), SearchCriteria.Op.NULL); volumeSB.and("state", volumeSB.entity().getState(), SearchCriteria.Op.NIN); SearchBuilder<VMInstanceVO> activeVmSB = _vmInstanceDao.createSearchBuilder(); activeVmSB.and("state", activeVmSB.entity().getState(), SearchCriteria.Op.IN); volumeSB.join("activeVmSB", activeVmSB, volumeSB.entity().getInstanceId(), activeVmSB.entity().getId(), JoinBuilder.JoinType.INNER); SearchCriteria<VolumeVO> volumeSC = volumeSB.create(); volumeSC.setParameters("poolId", PrimaryDataStoreVO.getId()); volumeSC.setParameters("state", Volume.State.Expunging, Volume.State.Destroy); volumeSC.setJoinParameters("activeVmSB", "state", State.Starting, State.Running, State.Stopping, State.Migrating); List<VolumeVO> volumes = _volsDao.search(volumeSC, null); if (volumes.size() > 0) { return true; } } } return false; }
@Override public String getUri() { String path = pdsv.getPath().replaceFirst("/*", ""); StringBuilder builder = new StringBuilder(); builder.append(pdsv.getPoolType()); builder.append("://"); builder.append(pdsv.getHostAddress()); builder.append(File.separator); builder.append(path); builder.append(File.separator); builder.append("?" + EncodingType.ROLE + "=" + getRole()); builder.append("&" + EncodingType.STOREUUID + "=" + pdsv.getUuid()); return builder.toString(); }
@Override public boolean attachCluster(DataStore dataStore, ClusterScope scope) { StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStore.getId()); dataStoreVO.setDataCenterId(scope.getZoneId()); dataStoreVO.setPodId(scope.getPodId()); dataStoreVO.setClusterId(scope.getScopeId()); dataStoreVO.setStatus(StoragePoolStatus.Attaching); dataStoreVO.setScope(scope.getScopeType()); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); attachCluster(dataStore); dataStoreVO = dataStoreDao.findById(dataStore.getId()); dataStoreVO.setStatus(StoragePoolStatus.Up); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); return true; }
@Override public boolean isManaged() { return pdsv.isManaged(); }
hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, storagePool.getClusterId(), storagePool.getPodId(), storagePool.getDataCenterId(), null); if (listHost == null || listHost.size() == 0) { throw new InvalidParameterValueException("no host in up state is found");