@Override public long getId() { return pdsv.getId(); }
private void reallyRun() { s_logger.info("Start to preload template " + template.getId() + " into primary storage " + pool.getId()); StoragePool pol = (StoragePool)_dataStoreMgr.getPrimaryDataStore(pool.getId()); prepareTemplateForCreate(template, pol); s_logger.info("End of preloading template " + template.getId() + " into primary storage " + pool.getId()); } });
public long getStoragePoolIdForStoragePoolUuid(String storagePoolUuid) { StoragePoolVO storagePool = storagePoolDao.findByUuid(storagePoolUuid); if (storagePool == null) { throw new CloudRuntimeException("Unable to find Volume for ID: " + storagePoolUuid); } return storagePool.getId(); }
@Override public PrimaryDataStore getPrimaryDataStore(String uuid) { StoragePoolVO dataStoreVO = dataStoreDao.findByUuid(uuid); return getPrimaryDataStore(dataStoreVO.getId()); }
/** * Executes the managed storage checks for the mapping<volume, storage pool> entered by the user. The checks execute by this method are the following. * <ul> * <li> If the current storage pool of the volume is not a managed storage, we do not need to validate anything here. * <li> If the current storage pool is a managed storage and the target storage pool ID is different from the current one, we throw an exception. * </ul> */ protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StoragePoolVO currentPool, VolumeVO volume, StoragePoolVO targetPool) { if (!currentPool.isManaged()) { return; } if (currentPool.getId() == targetPool.getId()) { return; } throw new CloudRuntimeException(String.format("Currently, a volume on managed storage can only be 'migrated' to itself " + "[volumeId=%s, currentStoragePoolId=%s, targetStoragePoolId=%s].", volume.getUuid(), currentPool.getUuid(), targetPool.getUuid())); }
@Override @DB public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags) { TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); pool = super.persist(pool); if (details != null) { for (Map.Entry<String, String> detail : details.entrySet()) { StoragePoolDetailVO vo = new StoragePoolDetailVO(pool.getId(), detail.getKey(), detail.getValue(), true); _detailsDao.persist(vo); } } if (CollectionUtils.isNotEmpty(tags)) { _tagsDao.persist(pool.getId(), tags); } txn.commit(); return pool; }
public void prepareTemplateInAllStoragePools(final VMTemplateVO template, long zoneId) { List<StoragePoolVO> pools = _poolDao.listByStatus(StoragePoolStatus.Up); for (final StoragePoolVO pool : pools) { if (pool.getDataCenterId() == zoneId) { prepareTemplateInOneStoragePool(template, pool); } else { s_logger.info("Skip loading template " + template.getId() + " into primary storage " + pool.getId() + " as pool zone " + pool.getDataCenterId() + " is different from the requested zone " + zoneId); } } }
public boolean maintain(DataStore store) { StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); pool.setStatus(StoragePoolStatus.Maintenance); this.dataStoreDao.update(pool.getId(), pool); return true; }
public boolean enable(DataStore store) { StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); pool.setStatus(StoragePoolStatus.Up); dataStoreDao.update(pool.getId(), pool); return true; }
public DataStore attachCluster(DataStore store) { StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); storageMgr.createCapacityEntry(pool.getId()); pool.setScope(ScopeType.CLUSTER); pool.setStatus(StoragePoolStatus.Up); this.dataStoreDao.update(pool.getId(), pool); return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); }
public boolean disable(DataStore store) { StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); pool.setStatus(StoragePoolStatus.Disabled); this.dataStoreDao.update(pool.getId(), pool); return true; }
@Override public Scope getScope() { StoragePoolVO vo = dataStoreDao.findById(pdsv.getId()); if (vo.getScope() == ScopeType.CLUSTER) { return new ClusterScope(vo.getClusterId(), vo.getPodId(), vo.getDataCenterId()); } else if (vo.getScope() == ScopeType.ZONE) { return new ZoneScope(vo.getDataCenterId()); } else if (vo.getScope() == ScopeType.HOST) { List<StoragePoolHostVO> poolHosts = poolHostDao.listByPoolId(vo.getId()); if (poolHosts.size() > 0) { return new HostScope(poolHosts.get(0).getHostId(), vo.getClusterId(), vo.getDataCenterId()); } s_logger.debug("can't find a local storage in pool host table: " + vo.getId()); } return null; }
@ActionEvent(eventType = EventTypes.EVENT_ENABLE_PRIMARY_STORAGE, eventDescription = "enable storage pool") private void enablePrimaryStoragePool(StoragePoolVO primaryStorage) { if (!primaryStorage.getStatus().equals(StoragePoolStatus.Disabled)) { throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be enabled. Storage pool state : " + primaryStorage.getStatus().toString()); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); DataStoreLifeCycle dataStoreLifeCycle = provider.getDataStoreLifeCycle(); DataStore store = _dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); ((PrimaryDataStoreLifeCycle)dataStoreLifeCycle).enableStoragePool(store); }
@ActionEvent(eventType = EventTypes.EVENT_DISABLE_PRIMARY_STORAGE, eventDescription = "disable storage pool") private void disablePrimaryStoragePool(StoragePoolVO primaryStorage) { if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up)) { throw new InvalidParameterValueException("Primary storage with id " + primaryStorage.getId() + " cannot be disabled. Storage pool state : " + primaryStorage.getStatus().toString()); } DataStoreProvider provider = _dataStoreProviderMgr.getDataStoreProvider(primaryStorage.getStorageProviderName()); DataStoreLifeCycle dataStoreLifeCycle = provider.getDataStoreLifeCycle(); DataStore store = _dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); ((PrimaryDataStoreLifeCycle)dataStoreLifeCycle).disableStoragePool(store); }
/** * Executes the managed storage checks for the volumes that the user has not entered a mapping of <volume, storage pool>. The following checks are performed. * <ul> * <li> If the current storage pool is not a managed storage, we do not need to proceed with this method; * <li> We check if the target host has access to the current managed storage pool. If it does not have an exception will be thrown. * </ul> */ protected void executeManagedStorageChecksWhenTargetStoragePoolNotProvided(Host targetHost, StoragePoolVO currentPool, Volume volume) { if (!currentPool.isManaged()) { return; } if (_poolHostDao.findByPoolHost(currentPool.getId(), targetHost.getId()) == null) { throw new CloudRuntimeException(String.format("The target host does not have access to the volume's managed storage pool. [volumeId=%s, storageId=%s, targetHostId=%s].", volume.getUuid(), currentPool.getUuid(), targetHost.getUuid())); } }
public DataStore attachZone(DataStore store) { StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); pool.setScope(ScopeType.ZONE); pool.setStatus(StoragePoolStatus.Up); this.dataStoreDao.update(pool.getId(), pool); return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); }
public DataStore attachZone(DataStore store, HypervisorType hypervisor) { StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); pool.setScope(ScopeType.ZONE); pool.setHypervisor(hypervisor); pool.setStatus(StoragePoolStatus.Up); this.dataStoreDao.update(pool.getId(), pool); return dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Primary); }
@Override public boolean attachCluster(DataStore dataStore, ClusterScope scope) { StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStore.getId()); dataStoreVO.setDataCenterId(scope.getZoneId()); dataStoreVO.setPodId(scope.getPodId()); dataStoreVO.setClusterId(scope.getScopeId()); dataStoreVO.setStatus(StoragePoolStatus.Attaching); dataStoreVO.setScope(scope.getScopeType()); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); attachCluster(dataStore); dataStoreVO = dataStoreDao.findById(dataStore.getId()); dataStoreVO.setStatus(StoragePoolStatus.Up); dataStoreDao.update(dataStoreVO.getId(), dataStoreVO); return true; }
private VolumeVO duplicateVolumeOnAnotherStorage(Volume volume, StoragePoolVO storagePoolVO) { Long lastPoolId = volume.getPoolId(); VolumeVO newVol = new VolumeVO(volume); newVol.setInstanceId(null); newVol.setChainInfo(null); newVol.setPath(null); newVol.setFolder(null); newVol.setPodId(storagePoolVO.getPodId()); newVol.setPoolId(storagePoolVO.getId()); newVol.setLastPoolId(lastPoolId); return _volumeDao.persist(newVol); }
public DataStore attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) { StoragePoolHostVO poolHost = storagePoolHostDao.findByPoolHost(store.getId(), scope.getScopeId()); if (poolHost == null) { poolHost = new StoragePoolHostVO(store.getId(), scope.getScopeId(), existingInfo.getLocalPath()); storagePoolHostDao.persist(poolHost); } StoragePoolVO pool = this.dataStoreDao.findById(store.getId()); pool.setScope(scope.getScopeType()); pool.setUsedBytes(existingInfo.getCapacityBytes() - existingInfo.getAvailableBytes()); pool.setCapacityBytes(existingInfo.getCapacityBytes()); pool.setStatus(StoragePoolStatus.Up); this.dataStoreDao.update(pool.getId(), pool); this.storageMgr.createCapacityEntry(pool, Capacity.CAPACITY_TYPE_LOCAL_STORAGE, pool.getUsedBytes()); return dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary); }