@Override public VolumeInfo getBaseVolume() { return volFactory.getVolume(snapshot.getVolumeId()); }
/** * Clean volumes cache entries (if they exist). */ protected void cleanVolumesCache(VolumeVO volume) { List<VolumeInfo> cacheVols = volFactory.listVolumeOnCache(volume.getId()); if (CollectionUtils.isEmpty(cacheVols)) { return; } for (VolumeInfo volOnCache : cacheVols) { s_logger.info("Delete volume from image cache store: " + volOnCache.getDataStore().getName()); volOnCache.delete(); } }
@Override public List<VolumeObjectTO> getVolumeTOList(Long vmId) { List<VolumeObjectTO> volumeTOs = new ArrayList<VolumeObjectTO>(); List<VolumeVO> volumeVos = volumeDao.findByInstance(vmId); VolumeInfo volumeInfo = null; for (VolumeVO volume : volumeVos) { volumeInfo = volumeDataFactory.getVolume(volume.getId()); volumeTOs.add((VolumeObjectTO)volumeInfo.getTO()); } return volumeTOs; }
private List<VolumeObjectTO> getVolumeTOList(Long vmId) { List<VolumeObjectTO> volumeTOs = new ArrayList<VolumeObjectTO>(); List<VolumeVO> volumeVos = _volumeDao.findByInstance(vmId); VolumeInfo volumeInfo = null; for (VolumeVO volume : volumeVos) { volumeInfo = volumeDataFactory.getVolume(volume.getId()); volumeTOs.add((VolumeObjectTO)volumeInfo.getTO()); } return volumeTOs; }
/** * We will check if the given volume is in the primary storage. If it is, we will execute an asynchronous call to delete it there. * If the volume is not in the primary storage, we do nothing here. */ protected void expungeVolumesInPrimaryStorageIfNeeded(VolumeVO volume) throws InterruptedException, ExecutionException { VolumeInfo volOnPrimary = volFactory.getVolume(volume.getId(), DataStoreRole.Primary); if (volOnPrimary != null) { s_logger.info("Expunging volume " + volume.getId() + " from primary data store"); AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volOnPrimary); future.get(); } }
@DB @Override public void destroyVolume(long volumeId) { // mark volume entry in volumes table as destroy state VolumeInfo vol = volFactory.getVolume(volumeId); vol.stateTransit(Volume.Event.DestroyRequested); snapshotMgr.deletePoliciesForVolume(volumeId); vol.stateTransit(Volume.Event.OperationSucceeded); }
@DB protected Volume liveMigrateVolume(Volume volume, StoragePool destPool) { VolumeInfo vol = volFactory.getVolume(volume.getId()); AsyncCallFuture<VolumeApiResult> future = volService.migrateVolume(vol, (DataStore)destPool); try { VolumeApiResult result = future.get(); if (result.isFailed()) { s_logger.debug("migrate volume failed:" + result.getResult()); return null; } return result.getVolume(); } catch (InterruptedException e) { s_logger.debug("migrate volume failed", e); return null; } catch (ExecutionException e) { s_logger.debug("migrate volume failed", e); return null; } }
/** * We will check if the given volume is in the secondary storage. If the volume is not in the primary storage, we do nothing here. * If it is, we will execute an asynchronous call to delete it there. Then, we decrement the {@link ResourceType#secondary_storage} for the account that owns the volume. */ protected void expungeVolumesInSecondaryStorageIfNeeded(VolumeVO volume) throws InterruptedException, ExecutionException { VolumeInfo volOnSecondary = volFactory.getVolume(volume.getId(), DataStoreRole.Image); if (volOnSecondary != null) { s_logger.info("Expunging volume " + volume.getId() + " from secondary data store"); AsyncCallFuture<VolumeApiResult> future2 = volService.expungeVolumeAsync(volOnSecondary); future2.get(); _resourceLimitMgr.decrementResourceCount(volOnSecondary.getAccountId(), ResourceType.secondary_storage, volOnSecondary.getSize()); } }
@Override public VolumeVO doInTransaction(TransactionStatus status) { VolumeVO newVolume = allocateDuplicateVolumeVO(existingVolume, templateIdToUseFinal); try { stateTransitTo(existingVolume, Volume.Event.DestroyRequested); } catch (NoTransitionException e) { s_logger.debug("Unable to destroy existing volume: " + e.toString()); } // In case of VMware VM will continue to use the old root disk until expunged, so force expunge old root disk if (vm.getHypervisorType() == HypervisorType.VMware) { s_logger.info("Expunging volume " + existingVolume.getId() + " from primary data store"); AsyncCallFuture<VolumeApiResult> future = volService.expungeVolumeAsync(volFactory.getVolume(existingVolume.getId())); try { future.get(); } catch (Exception e) { s_logger.debug("Failed to expunge volume:" + existingVolume.getId(), e); } } return newVolume; } });
future = volService.expungeVolumeAsync(volFactory.getVolume(expunge.getId())); try { future.get();
volumeMap.put(volFactory.getVolume(volume.getId()), (DataStore)destPool);
private void verifySufficientBytesForStoragePool(long storagePoolId, long volumeId, long newSize, Integer newHypervisorSnapshotReserve) { DataStore primaryDataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); VolumeInfo volumeInfo = volumeFactory.getVolume(volumeId, primaryDataStore); StoragePoolVO storagePool = storagePoolDao.findById(storagePoolId); long currentSizeWithHsr = getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, storagePool); newHypervisorSnapshotReserve = newHypervisorSnapshotReserve == null ? LOWEST_HYPERVISOR_SNAPSHOT_RESERVE : Math.max(newHypervisorSnapshotReserve, LOWEST_HYPERVISOR_SNAPSHOT_RESERVE); long newSizeWithHsr = (long)(newSize + newSize * (newHypervisorSnapshotReserve / 100f)); if (newSizeWithHsr < currentSizeWithHsr) { throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not support shrinking a volume."); } long availableBytes = storagePool.getCapacityBytes() - getUsedBytes(storagePool); if ((newSizeWithHsr - currentSizeWithHsr) > availableBytes) { throw new CloudRuntimeException("Storage pool " + storagePoolId + " does not have enough space to expand the volume."); } }
@Override public VolumeInfo updateHypervisorSnapshotReserveForVolume(DiskOffering diskOffering, long volumeId, HypervisorType hyperType) { if (diskOffering != null && hyperType != null) { Integer hypervisorSnapshotReserve = diskOffering.getHypervisorSnapshotReserve(); if (hyperType == HypervisorType.KVM) { hypervisorSnapshotReserve = null; } else if (hypervisorSnapshotReserve == null || hypervisorSnapshotReserve < 0) { hypervisorSnapshotReserve = 0; } VolumeVO volume = volDao.findById(volumeId); volume.setHypervisorSnapshotReserve(hypervisorSnapshotReserve); volDao.update(volume.getId(), volume); } return volFactory.getVolume(volumeId); } }
@Override public DataObject get(DataObject dataObj, DataStore store) { if (dataObj.getType() == DataObjectType.TEMPLATE) { return imageFactory.getTemplate(dataObj, store); } else if (dataObj.getType() == DataObjectType.VOLUME) { return volumeFactory.getVolume(dataObj, store); } else if (dataObj.getType() == DataObjectType.SNAPSHOT) { return snapshotFactory.getSnapshot(dataObj, store); } throw new CloudRuntimeException("unknown type"); }
private long getDataObjectSizeIncludingHypervisorSnapshotReserve(Volume volume, StoragePool pool) { DataStoreProvider storeProvider = _dataStoreProviderMgr.getDataStoreProvider(pool.getStorageProviderName()); DataStoreDriver storeDriver = storeProvider.getDataStoreDriver(); if (storeDriver instanceof PrimaryDataStoreDriver) { PrimaryDataStoreDriver primaryStoreDriver = (PrimaryDataStoreDriver)storeDriver; VolumeInfo volumeInfo = volFactory.getVolume(volume.getId()); return primaryStoreDriver.getDataObjectSizeIncludingHypervisorSnapshotReserve(volumeInfo, pool); } return volume.getSize(); }
@DB protected Volume liveMigrateVolume(Volume volume, StoragePool destPool) throws StorageUnavailableException { VolumeInfo vol = volFactory.getVolume(volume.getId()); DataStore dataStoreTarget = dataStoreMgr.getDataStore(destPool.getId(), DataStoreRole.Primary); AsyncCallFuture<VolumeApiResult> future = volService.migrateVolume(vol, dataStoreTarget); try { VolumeApiResult result = future.get(); if (result.isFailed()) { s_logger.debug("migrate volume failed:" + result.getResult()); throw new StorageUnavailableException("Migrate volume failed: " + result.getResult(), destPool.getId()); } return result.getVolume(); } catch (InterruptedException e) { s_logger.debug("migrate volume failed", e); throw new CloudRuntimeException(e.getMessage()); } catch (ExecutionException e) { s_logger.debug("migrate volume failed", e); throw new CloudRuntimeException(e.getMessage()); } }
@Override public void revokeAccess(long vmId, long hostId) { HostVO host = _hostDao.findById(hostId); List<VolumeVO> volumesForVm = _volsDao.findByInstance(vmId); if (volumesForVm != null) { for (VolumeVO volumeForVm : volumesForVm) { VolumeInfo volumeInfo = volFactory.getVolume(volumeForVm.getId()); // pool id can be null for the VM's volumes in Allocated state if (volumeForVm.getPoolId() != null) { DataStore dataStore = dataStoreMgr.getDataStore(volumeForVm.getPoolId(), DataStoreRole.Primary); volService.revokeAccess(volumeInfo, host, dataStore); } } } }
private synchronized void checkAndSetAttaching(Long volumeId, Long hostId) { VolumeInfo volumeToAttach = volFactory.getVolume(volumeId); if (volumeToAttach.isAttachedVM()) { throw new CloudRuntimeException("volume: " + volumeToAttach.getName() + " is already attached to a VM: " + volumeToAttach.getAttachedVmName()); } if (volumeToAttach.getState().equals(Volume.State.Ready)) { volumeToAttach.stateTransit(Volume.Event.AttachRequested); } else { String error = null; if (hostId == null) { error = "Please try attach operation after starting VM once"; } else { error = "Volume: " + volumeToAttach.getName() + " is in " + volumeToAttach.getState() + ". It should be in Ready state"; } s_logger.error(error); throw new CloudRuntimeException(error); } }
volumeInfo = volFactory.getVolume(volumeInfo.getId(), primaryDataStore);
} catch (Exception e) { created = false; VolumeInfo vol = volFactory.getVolume(cmd.getEntityId()); vol.stateTransit(Volume.Event.DestroyRequested); throw new CloudRuntimeException("Failed to create volume: " + volume.getId(), e);