From 1524363781fdae2166fe55f4ea12605f4cc077db Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 29 Mar 2024 18:20:34 +0000 Subject: [PATCH 01/47] Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes --- .../orchestration/VolumeOrchestrator.java | 9 +- .../StorageSystemDataMotionStrategy.java | 275 ++++++++++++------ .../impl/DefaultModuleDefinitionSet.java | 5 +- .../acl/DynamicRoleBasedAPIAccessChecker.java | 4 +- .../acl/ProjectRoleBasedApiAccessChecker.java | 8 +- .../kvm/storage/FiberChannelAdapter.java | 31 ++ .../kvm/storage/KVMStorageProcessor.java | 29 +- .../kvm/storage/MultipathSCSIAdapterBase.java | 226 ++++---------- .../datastore/adapter/ProviderAdapter.java | 22 +- .../adapter/ProviderAdapterFactory.java | 4 + .../adapter/ProviderVolumeNamer.java | 1 - .../driver/AdaptiveDataStoreDriverImpl.java | 218 ++++++++++---- .../AdaptiveDataStoreLifeCycleImpl.java | 61 +++- ...tivePrimaryDatastoreAdapterFactoryMap.java | 4 + .../provider/AdaptivePrimaryHostListener.java | 2 + .../adapter/flasharray/FlashArrayAdapter.java | 258 ++++++++-------- .../flasharray/FlashArrayAdapterFactory.java | 5 + .../adapter/flasharray/FlashArrayHost.java | 29 ++ .../adapter/flasharray/FlashArrayVolume.java | 4 +- .../flasharray/FlashArrayVolumePod.java | 17 +- .../adapter/primera/PrimeraAdapter.java | 153 +++++----- .../primera/PrimeraAdapterFactory.java | 5 + .../adapter/primera/PrimeraHost.java | 56 ++++ .../primera/PrimeraHostDescriptor.java | 23 ++ .../adapter/primera/PrimeraHostset.java | 44 +-- .../adapter/primera/PrimeraPort.java | 23 ++ .../adapter/primera/PrimeraPortPos.java | 30 ++ .../PrimeraVolumeCopyRequestParameters.java | 2 +- .../primera/PrimeraVolumePromoteRequest.java | 5 +- .../oauth2/OAuth2UserAuthenticator.java | 7 + .../java/com/cloud/vm/UserVmManagerImpl.java | 22 +- .../cloudstack/snapshot/SnapshotHelper.java | 4 +- .../vm/UnmanagedVMsManagerImpl.java | 1 + 33 files changed, 1003 insertions(+), 584 deletions(-) create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHost.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 409b5388d72f..eaf91a0cd246 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -1500,18 +1500,17 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest for (VolumeVO vol : vols) { VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); - DataTO volTO = volumeInfo.getTO(); - DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId()); DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); - disk.setDetails(getDetails(volumeInfo, dataStore)); - PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; // This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only) if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) { volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore); } - + // make sure this is done AFTER grantAccess, as grantAccess may change the volume's state + DataTO volTO = volumeInfo.getTO(); + DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId()); + disk.setDetails(getDetails(volumeInfo, dataStore)); vm.addDisk(disk); } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index a93f624aa53c..81540bec3bdc 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -44,6 +44,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; @@ -146,6 +147,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private static final int LOCK_TIME_IN_SECONDS = 300; private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported."; + @Inject protected AgentManager agentManager; @Inject @@ -844,12 +846,25 @@ private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo de checkAvailableForMigration(vm); String errMsg = null; + HostVO hostVO = null; try { destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); updatePathFromScsiName(volumeVO); destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); - HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); + hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); + + // if managed we need to grant access + PrimaryDataStore pds = (PrimaryDataStore)this.dataStoreMgr.getPrimaryDataStore(destVolumeInfo.getDataStore().getUuid()); + if (pds == null) { + throw new CloudRuntimeException("Unable to find primary data store driver for this volume"); + } + + // grant access (for managed volumes) + _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + + // re-retrieve volume to get any updated information from grant + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); // migrate the volume via the hypervisor String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); @@ -870,6 +885,19 @@ private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo de throw new CloudRuntimeException(errMsg, ex); } } finally { + // revoke access (for managed volumes) + if (hostVO != null) { + try { + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + } catch (Exception e) { + LOGGER.warn(String.format("Failed to revoke access for volume 'name=%s,uuid=%s' after a migration attempt", destVolumeInfo.getVolume(), destVolumeInfo.getUuid()), e); + } + } + + // re-retrieve volume to get any updated information from grant + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + + CopyCmdAnswer copyCmdAnswer; if (errMsg != null) { copyCmdAnswer = new CopyCmdAnswer(errMsg); @@ -920,6 +948,125 @@ private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, return hostVO; } + private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snapshotInfo) { + VolumeInfo tempVolumeInfo = null; + VolumeVO volumeVO = null; + try { + volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", + snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); + volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); + _volumeDao.persist(volumeVO); + tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + + if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { + snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); + // refresh volume info as data could have changed + tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + } else { + throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); + } + return tempVolumeInfo; + } catch (Throwable e) { + try { + if (tempVolumeInfo != null) { + tempVolumeInfo.getDataStore().getDriver().deleteAsync(tempVolumeInfo.getDataStore(), tempVolumeInfo, null); + } + + // cleanup temporary volume + if (volumeVO != null) { + _volumeDao.remove(volumeVO.getId()); + } + } catch (Throwable e2) { + LOGGER.warn("Failed to delete temporary volume created for copy", e2); + } + + throw e; + } + } + + /** + * Simplier logic for copy from snapshot for adaptive driver only. + * @param snapshotInfo + * @param destData + * @param callback + */ + private void handleCopyAsyncToSecondaryStorageAdaptive(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback callback) { + CopyCmdAnswer copyCmdAnswer = null; + DataObject srcFinal = null; + HostVO hostVO = null; + DataStore srcDataStore = null; + boolean tempRequired = false; + + try { + snapshotInfo.processEvent(Event.CopyingRequested); + hostVO = getHost(snapshotInfo); + DataObject destOnStore = destData; + srcDataStore = snapshotInfo.getDataStore(); + int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); + CopyCommand copyCommand = null; + if (!Boolean.parseBoolean(srcDataStore.getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT"))) { + srcFinal = createTemporaryVolumeCopyOfSnapshotAdaptive(snapshotInfo); + tempRequired = true; + } else { + srcFinal = snapshotInfo; + } + + _volumeService.grantAccess(srcFinal, hostVO, srcDataStore); + + DataTO srcTo = srcFinal.getTO(); + + // have to set PATH as extraOptions due to logic in KVM hypervisor processor + HashMap extraDetails = new HashMap<>(); + extraDetails.put(DiskTO.PATH, srcTo.getPath()); + + copyCommand = new CopyCommand(srcFinal.getTO(), destOnStore.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); + copyCommand.setOptions(extraDetails); + copyCmdAnswer = (CopyCmdAnswer)agentManager.send(hostVO.getId(), copyCommand); + } catch (Exception ex) { + String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : "; + LOGGER.warn(msg, ex); + throw new CloudRuntimeException(msg + ex.getMessage(), ex); + } + finally { + // remove access tot he volume that was used + if (srcFinal != null && hostVO != null && srcDataStore != null) { + _volumeService.revokeAccess(srcFinal, hostVO, srcDataStore); + } + + // delete the temporary volume if it was needed + if (srcFinal != null && tempRequired) { + try { + srcFinal.getDataStore().getDriver().deleteAsync(srcFinal.getDataStore(), srcFinal, null); + } catch (Throwable e) { + LOGGER.warn("Failed to delete temporary volume created for copy", e); + } + } + + // check we have a reasonable result + String errMsg = null; + if (copyCmdAnswer == null || (!copyCmdAnswer.getResult() && copyCmdAnswer.getDetails() == null)) { + errMsg = "Unable to create template from snapshot"; + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } else if (!copyCmdAnswer.getResult() && StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + errMsg = "Unable to create template from snapshot"; + } else if (!copyCmdAnswer.getResult()) { + errMsg = copyCmdAnswer.getDetails(); + } + + //submit processEvent + if (StringUtils.isEmpty(errMsg)) { + snapshotInfo.processEvent(Event.OperationSuccessed); + } else { + snapshotInfo.processEvent(Event.OperationFailed); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(copyCmdAnswer.getDetails()); + callback.complete(result); + } + } + /** * This function is responsible for copying a snapshot from managed storage to secondary storage. This is used in the following two cases: * 1) When creating a template from a snapshot @@ -930,6 +1077,13 @@ private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, * @param callback callback for async */ private void handleCopyAsyncToSecondaryStorage(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback callback) { + + // if this flag is set (true or false), we will fall out to use simplier logic for the Adaptive handler + if (snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT") != null) { + handleCopyAsyncToSecondaryStorageAdaptive(snapshotInfo, destData, callback); + return; + } + String errMsg = null; CopyCmdAnswer copyCmdAnswer = null; boolean usingBackendSnapshot = false; @@ -1696,14 +1850,15 @@ private void handleCreateVolumeFromVolumeOnSecondaryStorage(VolumeInfo srcVolume private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo destVolumeInfo, HostVO hostVO) { int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); - CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait, - VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand copyCommand = null; CopyCmdAnswer copyCmdAnswer; try { _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); Map destDetails = getVolumeDetails(destVolumeInfo); copyCommand.setOptions2(destDetails); @@ -1728,42 +1883,6 @@ private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo des return copyCmdAnswer; } - /** - * Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot) - - * @param volumeVO - * @param snapshotInfo - */ - public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) { - VolumeVO volumeVO = null; - try { - volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", - snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); - volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); - _volumeDao.persist(volumeVO); - VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); - - if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { - snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); - // refresh volume info as data could have changed - tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); - // save the "temp" volume info into the snapshot details (we need this to clean up at the end) - _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true); - _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true); - // NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO() - // whenever the TemporaryVolumeCopyPath is set. - } else { - throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); - } - } catch (Throwable e) { - // cleanup temporary volume - if (volumeVO != null) { - _volumeDao.remove(volumeVO.getId()); - } - throw e; - } - } - /** * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. @@ -1775,13 +1894,8 @@ public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) { * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. */ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { - prepTempVolumeForCopyFromSnapshot(snapshotInfo); - return; - - } - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); + try { snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); } @@ -1796,31 +1910,20 @@ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { * invocation of createVolumeFromSnapshot(SnapshotInfo). */ private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - VolumeVO volumeVO = null; - // cleanup any temporary volume previously created for copy from a snapshot - if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { - SnapshotDetailsVO tempUuid = null; - tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); - if (tempUuid == null || tempUuid.getValue() == null) { - return; - } + try { + LOGGER.debug("Cleaning up temporary volume created for copy from a snapshot"); - volumeVO = _volumeDao.findByUuid(tempUuid.getValue()); - if (volumeVO != null) { - _volumeDao.remove(volumeVO.getId()); - } - _snapshotDetailsDao.remove(tempUuid.getId()); - _snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); - return; - } + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); + try { + snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); + } + finally { + _snapshotDetailsDao.remove(snapshotDetails.getId()); + } - try { - snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); - } - finally { - _snapshotDetailsDao.remove(snapshotDetails.getId()); + } catch (Throwable e) { + LOGGER.warn("Failed to clean up temporary volume created for copy from a snapshot, transction will not be failed but an adminstrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e); } } @@ -2496,15 +2599,17 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); - CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand copyCommand = null; try { handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); - if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { + if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) { _volumeService.grantAccess(volumeInfo, hostVO, srcDataStore); } + copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + Map srcDetails = getVolumeDetails(volumeInfo); copyCommand.setOptions(srcDetails); @@ -2533,7 +2638,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa throw new CloudRuntimeException(msg + ex.getMessage(), ex); } finally { - if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { + if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) { try { _volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore); } @@ -2628,13 +2733,7 @@ private Map getSnapshotDetails(SnapshotInfo snapshotInfo) { long snapshotId = snapshotInfo.getId(); - // if the snapshot required a temporary volume be created check if the UUID is set so we can - // retrieve the temporary volume's path to use during remote copy - List storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath"); - if (storedDetails != null && storedDetails.size() > 0) { - String value = storedDetails.get(0).getValue(); - snapshotDetails.put(DiskTO.PATH, value); - } else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) { + if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) { snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath()); } else { snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); @@ -2850,6 +2949,8 @@ private String migrateVolumeForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolu Map srcDetails = getVolumeDetails(srcVolumeInfo); Map destDetails = getVolumeDetails(destVolumeInfo); + _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(), srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value()); @@ -2892,18 +2993,18 @@ private String copyManagedVolumeToSecondaryStorage(VolumeInfo srcVolumeInfo, Vol StoragePoolVO storagePoolVO = _storagePoolDao.findById(srcVolumeInfo.getPoolId()); Map srcDetails = getVolumeDetails(srcVolumeInfo); - CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO, - destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true); - - copyVolumeCommand.setSrcData(srcVolumeInfo.getTO()); - copyVolumeCommand.setSrcDetails(srcDetails); - handleQualityOfServiceForVolumeMigration(srcVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); if (srcVolumeDetached) { _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); } + CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO, + destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true); + + copyVolumeCommand.setSrcData(srcVolumeInfo.getTO()); + copyVolumeCommand.setSrcDetails(srcDetails); + CopyVolumeAnswer copyVolumeAnswer = (CopyVolumeAnswer)agentManager.send(hostVO.getId(), copyVolumeCommand); if (copyVolumeAnswer == null || !copyVolumeAnswer.getResult()) { @@ -2975,7 +3076,7 @@ private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snaps srcData = cacheData; } - CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand copyCommand = null; try { if (Snapshot.LocationType.PRIMARY.equals(locationType)) { @@ -2983,11 +3084,13 @@ private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snaps Map srcDetails = getSnapshotDetails(snapshotInfo); + copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); copyCommand.setOptions(srcDetails); + } else { + _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); + copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); } - _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); - Map destDetails = getVolumeDetails(volumeInfo); copyCommand.setOptions2(destDetails); diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java index 6c03c3ce9e16..7c73a27f71f8 100644 --- a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java @@ -101,7 +101,10 @@ public void with(ModuleDefinition def, Stack parents) { log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); ApplicationContext context = getApplicationContext(moduleDefinitionName); try { - if (context.containsBean("moduleStartup")) { + if (context == null) { + log.warn(String.format("Application context not found for module definition [%s]", moduleDefinitionName)); + + } else if (context.containsBean("moduleStartup")) { Runnable runnable = context.getBean("moduleStartup", Runnable.class); log.info(String.format("Starting module [%s].", moduleDefinitionName)); runnable.run(); diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index cca9e3388687..1dfe20a10be2 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -122,7 +122,9 @@ public boolean checkAccess(Account account, String commandName) { } if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { - LOGGER.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); + } return true; } diff --git a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java index 0306a062df98..cffda4681c66 100644 --- a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java +++ b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java @@ -76,7 +76,9 @@ public List getApisAllowedToUser(Role role, User user, List apiN Project project = CallContext.current().getProject(); if (project == null) { - LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); + } return apiNames; } @@ -114,8 +116,10 @@ public boolean checkAccess(User user, String apiCommandName) throws PermissionDe Project project = CallContext.current().getProject(); if (project == null) { - LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, user)); + } return true; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java index be7cb727ad77..1bc96dd396e3 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java @@ -16,13 +16,39 @@ // under the License. package com.cloud.hypervisor.kvm.storage; +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.apache.log4j.Logger; + import com.cloud.storage.Storage; import com.cloud.utils.exception.CloudRuntimeException; @StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.FiberChannel) public class FiberChannelAdapter extends MultipathSCSIAdapterBase { + + private Logger LOGGER = Logger.getLogger(getClass()); + + private String hostname = null; + private String hostnameFq = null; + public FiberChannelAdapter() { LOGGER.info("Loaded FiberChannelAdapter for StorageLayer"); + // get the hostname - we need this to compare to connid values + try { + InetAddress inetAddress = InetAddress.getLocalHost(); + hostname = inetAddress.getHostName(); // basic hostname + if (hostname.indexOf(".") > 0) { + hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain + } + if (hostname.indexOf(".") > 0) { + hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain + } + hostnameFq = inetAddress.getCanonicalHostName(); // fully qualified hostname + LOGGER.info("Loaded FiberChannelAdapter for StorageLayer on host [" + hostname + "]"); + } catch (UnknownHostException e) { + LOGGER.error("Error getting hostname", e); + } } @Override @@ -72,6 +98,11 @@ public AddressInfo parseAndValidatePath(String inPath) { address = value; } else if (key.equals("connid")) { connectionId = value; + } else if (key.startsWith("connid.")) { + String inHostname = key.substring(7); + if (inHostname != null && (inHostname.equals(this.hostname) || inHostname.equals(this.hostnameFq))) { + connectionId = value; + } } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 1be4a8b61851..8eaf0a6b0e5e 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -266,10 +266,16 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { Map details = primaryStore.getDetails(); - String path = details != null ? details.get("managedStoreTarget") : null; + String path = null; + if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { + path = destData.getPath(); + } else { + path = details != null ? details.get("managedStoreTarget") : null; + } if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); @@ -405,7 +411,12 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds()); } if (primaryPool.getType() == StoragePoolType.PowerFlex) { Map details = primaryStore.getDetails(); - String path = details != null ? details.get("managedStoreTarget") : null; + String path = null; + if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { + path = destData.getPath(); + } else { + path = details != null ? details.get("managedStoreTarget") : null; + } if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); @@ -1021,7 +1032,9 @@ public Answer backupSnapshot(final CopyCommand cmd) { command.add(NAME_OPTION, snapshotName); command.add("-p", snapshotDestPath); - descName = UUID.randomUUID().toString(); + if (isCreatedFromVmSnapshot) { + descName = UUID.randomUUID().toString(); + } command.add("-t", descName); final String result = command.execute(); @@ -1046,7 +1059,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { srcVolume.clearPassphrase(); if (isCreatedFromVmSnapshot) { s_logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot"); - } else if (primaryPool.getType() != StoragePoolType.RBD) { + } else if (primaryPool != null && primaryPool.getType() != StoragePoolType.RBD) { deleteSnapshotOnPrimary(cmd, snapshot, primaryPool); } @@ -2463,8 +2476,12 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { s_logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); } - String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; - destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath; + if (destPrimaryStore.getPoolType() == StoragePoolType.FiberChannel) { + destVolumeName = destData.getPath(); + } else { + String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; + destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath; + } } else { final String volumeName = UUID.randomUUID().toString(); destVolumeName = volumeName + "." + destFormat.getFileExtension(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 06dea46a98dd..df1d7035581d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -21,20 +21,17 @@ import java.io.File; import java.io.IOException; import java.io.InputStreamReader; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Timer; import java.util.TimerTask; -import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; +import org.joda.time.Duration; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; @@ -43,8 +40,7 @@ import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; import org.apache.commons.lang3.StringUtils; -import org.libvirt.LibvirtException; -import org.joda.time.Duration; +import org.apache.log4j.Logger; public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class); @@ -82,6 +78,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { * Initialize static program-wide configurations and background jobs */ static { + long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000; boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue(); @@ -96,16 +93,13 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { throw new Error("Unable to find the disconnectVolume.sh script"); } - resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript); - if (resizeScript == null) { - throw new Error("Unable to find the resizeVolume.sh script"); - } - copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript); if (copyScript == null) { throw new Error("Unable to find the copyVolume.sh script"); } + resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript); + if (cleanupEnabled) { cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript); if (cleanupScript == null) { @@ -137,9 +131,6 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type); - /** - * We expect WWN values in the volumePath so need to convert it to an actual physical path - */ public abstract AddressInfo parseAndValidatePath(String path); @Override @@ -151,6 +142,7 @@ public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) { return null; } + // we expect WWN values in the volumePath so need to convert it to an actual physical path AddressInfo address = parseAndValidatePath(volumePath); return getPhysicalDisk(address, pool); } @@ -194,7 +186,14 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map volumeToDisconnect) { @Override public boolean disconnectPhysicalDiskByPath(String localPath) { LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath)); + if (localPath == null || (localPath != null && !localPath.startsWith("/dev/mapper/"))) { + LOGGER.debug(String.format("isconnectPhysicalDiskByPath(localPath) returning FALSE, volume path is not a multipath volume: %s", localPath)); + return false; + } ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", "")); if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true; + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); + return true; } @Override public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { - LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString())); - return true; + return false; } @Override @@ -275,15 +283,9 @@ public boolean createFolder(String uuid, String path, String localPath) { return true; } - /** - * Validate inputs and return the source file for a template copy - * @param templateFilePath - * @param destTemplatePath - * @param destPool - * @param format - * @return - */ - File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) { + + @Override + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) { LOGGER.error("Unable to create template from direct download template file due to insufficient data"); throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); @@ -296,57 +298,18 @@ File createTemplateFromDirectDownloadFileValidate(String templateFilePath, Strin throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host"); } - if (destTemplatePath == null || destTemplatePath.isEmpty()) { - LOGGER.error("Failed to create template, target template disk path not provided"); - throw new CloudRuntimeException("Target template disk path not provided"); - } - - if (this.isStoragePoolTypeSupported(destPool.getType())) { - throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString()); - } - - if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) { - LOGGER.error("Failed to create template, unsupported template format: " + format.toString()); - throw new CloudRuntimeException("Unsupported template format: " + format.toString()); - } - return sourceFile; - } - - String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) { - String srcTemplateFilePath = templateFilePath; - if (isTemplateExtractable(templateFilePath)) { - srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString(); - LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); - String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath); - Script.runSimpleBashScript(extractCommand); - Script.runSimpleBashScript("rm -f " + templateFilePath); - } - return srcTemplateFilePath; - } - - QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) { - if (format == Storage.ImageFormat.RAW) { - return QemuImg.PhysicalDiskFormat.RAW; - } else if (format == Storage.ImageFormat.QCOW2) { - return QemuImg.PhysicalDiskFormat.QCOW2; - } else { - return QemuImg.PhysicalDiskFormat.RAW; - } - } - - @Override - public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { - File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format); - LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); - KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath()); + KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(templateFilePath); return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN); } @Override public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) { + if (StringUtils.isEmpty(name) || disk == null || destPool == null) { + LOGGER.error("Unable to copy physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); + } - validateForDiskCopy(disk, name, destPool); LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); @@ -366,8 +329,18 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat()); QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath()); + ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()); + /**Script script = new Script( + String.format("%s %s %s %s", copyScript, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()), + Duration.millis(timeout), + LOGGER); + + script.execute(); + int rc = script.getExitValue(); + */ int rc = result.getExitCode(); if (rc != 0) { throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult()); @@ -377,49 +350,21 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt return destDisk; } - void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) { - if (StringUtils.isEmpty(name) || disk == null || destPool == null) { - LOGGER.error("Unable to copy physical disk due to insufficient data"); - throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); - } - } - - /** - * Copy a disk path to another disk path using QemuImg command - * @param disk - * @param destDisk - * @param name - * @param timeout - */ - void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) { - QemuImg qemu; - try { - qemu = new QemuImg(timeout); - } catch (LibvirtException | QemuImgException e) { - throw new CloudRuntimeException (e); - } - QemuImgFile srcFile = null; - QemuImgFile destFile = null; - - try { - srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); - destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); - - LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); - qemu.convert(srcFile, destFile, true); - LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); - } catch (QemuImgException | LibvirtException e) { - try { - Map srcInfo = qemu.info(srcFile); - LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo)); - } catch (Exception ignored) { - LOGGER.warn("Unable to get info from source disk: " + disk.getName()); - } - - String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage())); - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg, e); + private static final ScriptResult runScript(String script, long timeout, String...args) { + ScriptResult result = new ScriptResult(); + Script cmd = new Script(script, Duration.millis(timeout), LOGGER); + cmd.add(args); + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String output = cmd.execute(parser); + // its possible the process never launches which causes an NPE on getExitValue below + if (output != null && output.contains("Unable to execute the command")) { + result.setResult(output); + result.setExitCode(-1); + return result; } + result.setResult(output); + result.setExitCode(cmd.getExitValue()); + return result; } @Override @@ -460,25 +405,9 @@ String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String } } - private static final ScriptResult runScript(String script, long timeout, String...args) { - ScriptResult result = new ScriptResult(); - Script cmd = new Script(script, Duration.millis(timeout), LOGGER); - cmd.add(args); - OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); - String output = cmd.execute(parser); - // its possible the process never launches which causes an NPE on getExitValue below - if (output != null && output.contains("Unable to execute the command")) { - result.setResult(output); - result.setExitCode(-1); - return result; - } - result.setResult(output); - result.setExitCode(cmd.getExitValue()); - return result; - } - boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) { LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs"); + long scriptTimeoutSecs = 30; // how long to wait for each script execution to run long maxTries = 10; // how many max retries to attempt the script long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait @@ -553,41 +482,8 @@ boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, l } LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); - return false; - } - - void runConnectScript(String lun, AddressInfo address) { - try { - ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress()); - Process p = builder.start(); - int rc = p.waitFor(); - StringBuffer output = new StringBuffer(); - if (rc == 0) { - BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); - String line = null; - while ((line = input.readLine()) != null) { - output.append(line); - output.append(" "); - } - } else { - LOGGER.warn("Failure discovering LUN via " + connectScript); - BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream())); - String line = null; - while ((line = error.readLine()) != null) { - LOGGER.warn("error --> " + line); - } - } - } catch (IOException | InterruptedException e) { - throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e); - } - } - - void sleep(long sleepTimeMs) { - try { - Thread.sleep(sleepTimeMs); - } catch (Exception ex) { - // don't do anything - } + throw new CloudRuntimeException("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid()); + //return false; } long getPhysicalDiskSize(String diskPath) { diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java index 0cd44cd04c2a..9c0db25d52eb 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java @@ -69,14 +69,14 @@ public interface ProviderAdapter { * @param request * @return */ - public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request); + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname); /** * Detach the host from the storage context * @param context * @param request */ - public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request); + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname); /** * Delete the provided volume/object @@ -154,4 +154,22 @@ public interface ProviderAdapter { * @return */ public boolean canAccessHost(ProviderAdapterContext context, String hostname); + + /** + * Returns true if the provider allows direct attach/connection of snapshots to a host + * @return + */ + public boolean canDirectAttachSnapshot(); + + + /** + * Given a ProviderAdapterDataObject, return a map of connection IDs to connection values. Generally + * this would be used to return a map of hostnames and the VLUN ID for the attachment associated with + * that hostname. If the provider is using a hostgroup/hostset model where the ID is assigned in common + * across all hosts in the group, then the map MUST contain a single entry with host key set as a wildcard + * character (exactly '*'). + * @param dataIn + * @return + */ + public Map getConnectionIdMap(ProviderAdapterDataObject dataIn); } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java index 13a843d47635..ceedc6530513 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java @@ -19,6 +19,10 @@ import java.util.Map; public interface ProviderAdapterFactory { + /** Name of the provider */ public String getProviderName(); + /** create a new instance of a provider adapter */ public ProviderAdapter create(String url, Map details); + /** returns true if this type of adapter can directly attach snapshots to hosts */ + public Object canDirectAttachSnapshot(); } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java index 5a72871e9c0d..f578b1cc460d 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java @@ -21,7 +21,6 @@ public class ProviderVolumeNamer { private static final String SNAPSHOT_PREFIX = "snap"; private static final String VOLUME_PREFIX = "vol"; private static final String TEMPLATE_PREFIX = "tpl"; - /** Simple method to allow sharing storage setup, primarily in lab/testing environment */ private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier"); public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) { diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java index d908d48c7dad..32342b951542 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -43,6 +44,7 @@ import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; @@ -53,10 +55,12 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap; +import org.apache.cloudstack.storage.image.store.TemplateObject; +import org.apache.cloudstack.storage.snapshot.SnapshotObject; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.cloudstack.storage.snapshot.SnapshotObject; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; @@ -73,7 +77,6 @@ import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage.ImageFormat; - import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; @@ -133,6 +136,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive DomainDao _domainDao; @Inject VolumeService _volumeService; + @Inject + VolumeDataFactory volumeDataFactory; private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null; @@ -142,7 +147,52 @@ public AdaptiveDataStoreDriverImpl(AdaptivePrimaryDatastoreAdapterFactoryMap fac @Override public DataTO getTO(DataObject data) { - return null; + // we need to get connectionId and and the VLUN ID for currently attached hosts to add to the DataTO object + DataTO to = null; + if (data.getType() == DataObjectType.VOLUME) { + VolumeObjectTO vto = new VolumeObjectTO((VolumeObject)data); + vto.setPath(getPath(data)); + to = vto; + } else if (data.getType() == DataObjectType.TEMPLATE) { + TemplateObjectTO tto = new TemplateObjectTO((TemplateObject)data); + tto.setPath(getPath(data)); + to = tto; + } else if (data.getType() == DataObjectType.SNAPSHOT) { + SnapshotObjectTO sto = new SnapshotObjectTO((SnapshotObject)data); + sto.setPath(getPath(data)); + to = sto; + } else { + to = super.getTO(data); + } + return to; + } + + /* + * For the given data object, return the path with current connection info. If a snapshot + * object is passed, we will determine if a temporary volume is avialable for that + * snapshot object and return that conneciton info instead. + */ + String getPath(DataObject data) { + StoragePoolVO storagePool = _storagePoolDao.findById(data.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterDataObject dataIn = newManagedDataObject(data, storagePool); + + /** This means the object is not yet associated with the external provider so path is null */ + if (dataIn.getExternalName() == null) { + return null; + } + + ProviderAdapterContext context = newManagedVolumeContext(data); + Map connIdMap = api.getConnectionIdMap(dataIn); + ProviderVolume volume = api.getVolume(context, dataIn); + // if this is an existing object, generate the path for it. + String finalPath = null; + if (volume != null) { + finalPath = generatePathInfo(volume, connIdMap); + } + return finalPath; } @Override @@ -217,11 +267,8 @@ public void createAsync(DataStore dataStore, DataObject dataObject, dataIn.setExternalName(volume.getExternalName()); dataIn.setExternalUuid(volume.getExternalUuid()); - // add the volume to the host set - String connectionId = api.attach(context, dataIn); - // update the cloudstack metadata about the volume - persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId); + persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null); result = new CreateCmdResult(dataObject.getUuid(), new Answer(null)); result.setSuccess(true); @@ -288,6 +335,7 @@ public void copyAsync(DataObject srcdata, DataObject destdata, ProviderAdapterContext context = newManagedVolumeContext(destdata); ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool); ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool); + outVolume = api.copy(context, sourceIn, destIn); // populate this data - it may be needed later @@ -302,17 +350,9 @@ public void copyAsync(DataObject srcdata, DataObject destdata, api.resize(context, destIn, destdata.getSize()); } - String connectionId = api.attach(context, destIn); - - String finalPath; - // format: type=fiberwwn; address=
; connid= - if (connectionId != null) { - finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId); - } else { - finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase()); - } - - persistVolumeData(storagePool, details, destdata, outVolume, connectionId); + // initial volume info does not have connection map yet. That is added when grantAccess is called later. + String finalPath = generatePathInfo(outVolume, null); + persistVolumeData(storagePool, details, destdata, outVolume, null); s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); VolumeObjectTO voto = new VolumeObjectTO(); @@ -442,6 +482,66 @@ public void resize(DataObject data, AsyncCompletionCallback cal } + public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { + s_logger.debug("Granting host " + host.getName() + " access to volume " + dataObject.getUuid()); + + try { + StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool); + api.attach(context, sourceIn, host.getName()); + + // rewrite the volume data, especially the connection string for informational purposes - unless it was turned off above + ProviderVolume vol = api.getVolume(context, sourceIn); + ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool); + Map connIdMap = api.getConnectionIdMap(dataIn); + persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); + + + s_logger.info("Granted host " + host.getName() + " access to volume " + dataObject.getUuid()); + return true; + } catch (Throwable e) { + String msg = "Error granting host " + host.getName() + " access to volume " + dataObject.getUuid() + ":" + e.getMessage(); + s_logger.error(msg); + throw new CloudRuntimeException(msg, e); + } + } + + public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { + // nothing to do if the host is null + if (dataObject == null || host == null || dataStore == null) { + return; + } + + s_logger.debug("Revoking access for host " + host.getName() + " to volume " + dataObject.getUuid()); + + try { + StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool); + + api.detach(context, sourceIn, host.getName()); + + // rewrite the volume data, especially the connection string for informational purposes + ProviderVolume vol = api.getVolume(context, sourceIn); + ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool); + Map connIdMap = api.getConnectionIdMap(dataIn); + persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); + + s_logger.info("Revoked access for host " + host.getName() + " to volume " + dataObject.getUuid()); + } catch (Throwable e) { + String msg = "Error revoking access for host " + host.getName() + " to volume " + dataObject.getUuid() + ":" + e.getMessage(); + s_logger.error(msg); + throw new CloudRuntimeException(msg, e); + } + } + @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { @@ -492,15 +592,7 @@ public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback getCapabilities() { mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString()); - // indicates the datastore can create temporary volumes for use when copying - // data from a snapshot - mapCapabilities.put("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT", Boolean.TRUE.toString()); - + ProviderAdapterFactory factory = _adapterFactoryMap.getFactory(this.getProviderName()); + if (factory != null) { + mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", factory.canDirectAttachSnapshot().toString()); + } else { + mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", Boolean.FALSE.toString()); + } return mapCapabilities; } @@ -667,6 +761,11 @@ public boolean canProvideVolumeStats() { return true; } + @Override + public boolean requiresAccessForMigration(DataObject dataObject) { + return true; + } + public String getProviderName() { return providerName; } @@ -715,8 +814,13 @@ public Pair getVolumeStats(StoragePool storagePool, String volumePat object.setType(ProviderAdapterDataObject.Type.VOLUME); ProviderVolumeStats stats = api.getVolumeStats(context, object); - Long provisionedSizeInBytes = stats.getActualUsedInBytes(); - Long allocatedSizeInBytes = stats.getAllocatedInBytes(); + Long provisionedSizeInBytes = null; + Long allocatedSizeInBytes = null; + if (stats != null) { + provisionedSizeInBytes = stats.getActualUsedInBytes(); + allocatedSizeInBytes = stats.getAllocatedInBytes(); + } + if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) { return null; } @@ -734,31 +838,19 @@ public boolean canHostAccessStoragePool(Host host, StoragePool pool) { } void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map storagePoolDetails, - DataObject dataObject, ProviderVolume volume, String connectionId) { + DataObject dataObject, ProviderVolume volume, Map connIdMap) { if (dataObject.getType() == DataObjectType.VOLUME) { - persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connectionId); + persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap); } else if (dataObject.getType() == DataObjectType.TEMPLATE) { - persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connectionId); + persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap); } } void persistVolumeData(StoragePoolVO storagePool, Map details, DataObject dataObject, - ProviderVolume managedVolume, String connectionId) { + ProviderVolume managedVolume, Map connIdMap) { VolumeVO volumeVO = _volumeDao.findById(dataObject.getId()); - // if its null check if the storage provider returned one that is already set - if (connectionId == null) { - connectionId = managedVolume.getExternalConnectionId(); - } - - String finalPath; - // format: type=fiberwwn; address=
; connid= - if (connectionId != null) { - finalPath = String.format("type=%s; address=%s; connid=%s", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase(), connectionId); - } else { - finalPath = String.format("type=%s; address=%s;", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase()); - } - + String finalPath = generatePathInfo(managedVolume, connIdMap); volumeVO.setPath(finalPath); volumeVO.setFormat(ImageFormat.RAW); volumeVO.setPoolId(storagePool.getId()); @@ -783,25 +875,31 @@ void persistVolumeData(StoragePoolVO storagePool, Map details, D } void persistTemplateData(StoragePoolVO storagePool, Map details, DataObject dataObject, - ProviderVolume volume, String connectionId) { + ProviderVolume volume, Map connIdMap) { TemplateInfo templateInfo = (TemplateInfo) dataObject; VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), templateInfo.getId(), null); - // template pool ref doesn't have a details object so we'll save: - // 1. external name ==> installPath - // 2. address ==> local download path - if (connectionId == null) { - templatePoolRef.setInstallPath(String.format("type=%s; address=%s", volume.getAddressType().toString(), - volume.getAddress().toLowerCase())); - } else { - templatePoolRef.setInstallPath(String.format("type=%s; address=%s; connid=%s", volume.getAddressType().toString(), - volume.getAddress().toLowerCase(), connectionId)); - } + + templatePoolRef.setInstallPath(generatePathInfo(volume, connIdMap)); templatePoolRef.setLocalDownloadPath(volume.getExternalName()); templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes()); _vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef); } + String generatePathInfo(ProviderVolume volume, Map connIdMap) { + String finalPath = String.format("type=%s; address=%s; providerName=%s; providerID=%s;", + volume.getAddressType().toString(), volume.getAddress().toLowerCase(), volume.getExternalName(), volume.getExternalUuid()); + + // if a map was provided, add the connection IDs to the path info. the map is all the possible vlun id's used + // across each host or the hostset (represented with host name key as "*"); + if (connIdMap != null && connIdMap.size() > 0) { + for (String key: connIdMap.keySet()) { + finalPath += String.format(" connid.%s=%s;", key, connIdMap.get(key)); + } + } + return finalPath; + } + ProviderAdapterContext newManagedVolumeContext(DataObject obj) { ProviderAdapterContext ctx = new ProviderAdapterContext(); if (obj instanceof VolumeInfo) { diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java index 56d9a25f34f8..26d2494eacf8 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java @@ -189,7 +189,6 @@ public DataStore initialize(Map dsInfos) { parameters.setName(dsName); parameters.setProviderName(providerName); parameters.setManaged(true); - parameters.setCapacityBytes(capacityBytes); parameters.setUsedBytes(0); parameters.setCapacityIops(capacityIops); parameters.setHypervisorType(HypervisorType.KVM); @@ -223,7 +222,7 @@ public DataStore initialize(Map dsInfos) { // if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes ProviderVolumeStorageStats stats = api.getManagedStorageStats(); - if (capacityBytes != null && capacityBytes != 0) { + if (capacityBytes != null && capacityBytes != 0 && stats != null) { if (stats.getCapacityInBytes() > 0) { if (stats.getCapacityInBytes() < capacityBytes) { throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes()); @@ -233,8 +232,8 @@ public DataStore initialize(Map dsInfos) { } // if we have no user-provided capacity bytes, use the ones provided by storage else { - if (stats.getCapacityInBytes() <= 0) { - throw new InvalidParameterValueException("Capacity bytes note available from the storage provider, user provided capacity bytes must be specified"); + if (stats == null || stats.getCapacityInBytes() <= 0) { + throw new InvalidParameterValueException("Capacity bytes not available from the storage provider, user provided capacity bytes must be specified"); } parameters.setCapacityBytes(stats.getCapacityInBytes()); } @@ -383,8 +382,58 @@ public boolean migrateToObjectStore(DataStore store) { * Update the storage pool configuration */ @Override - public void updateStoragePool(StoragePool storagePool, Map details) { - _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), details); + public void updateStoragePool(StoragePool storagePool, Map newDetails) { + /**String newAuthnType = newDetails.get(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY); + String newUser = newDetails.get(ProviderAdapter.API_USERNAME_KEY); + String newToken = newDetails.get(ProviderAdapter.API_TOKEN_KEY); + String newPassword = fetchMightBeEncryptedProperty(ProviderAdapter.API_PASSWORD_KEY, newDetails); + String newSecret = fetchMightBeEncryptedProperty(ProviderAdapter.API_TOKEN_KEY, newDetails); + String newUrl = newDetails.get(ProviderAdapter.API_URL_KEY); + String skipTlsValidationStr = newDetails.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + Boolean newSkipTlsValidation = null; + if (skipTlsValidationStr != null) { + newSkipTlsValidation = Boolean.parseBoolean(skipTlsValidationStr); + } + + String capacityInBytesStr = newDetails.get("capacityBytes"); + Long newCapacityInBytes = null; + if (capacityInBytesStr != null) { + newCapacityInBytes = Long.parseLong(capacityInBytesStr); + } + + String capacityIopsStr = newDetails.get("capacityIops"); + Long newCapacityIops = null; + if (capacityIopsStr != null) { + newCapacityIops = Long.parseLong(capacityIopsStr); + } + + + Map existingDetails = _primaryDataStoreDao.getDetails(storagePool.getId()); + if (newAuthnType != null) { + existingDetails.put(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY, newAuthnType); + } + + if (newUser != null) existingDetails.put(ProviderAdapter.API_USERNAME_KEY, newUser); + if (newToken != null) existingDetails.put(ProviderAdapter.API_TOKEN_KEY, newToken); + if (newPassword != null) existingDetails.put(ProviderAdapter.API_PASSWORD_KEY, newPassword); + if (newSecret != null) existingDetails.put(ProviderAdapter.API_TOKEN_KEY, newSecret); + if (newUrl != null) existingDetails.put(ProviderAdapter.API_URL_KEY, newUrl); + if (newSkipTlsValidation != null) existingDetails.put(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY, newSkipTlsValidation.toString()); + if (newCapacityInBytes != null) existingDetails.put("capacityBytes", capacityInBytesStr); + if (newCapacityIops != null) existingDetails.put("capacityIops", capacityIopsStr); + + _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), existingDetails);*/ + _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), newDetails); + } + + private String fetchMightBeEncryptedProperty(String key, Map details) { + String value; + try { + value = DBEncryptionUtil.decrypt(details.get(key)); + } catch (Exception e) { + value = details.get(key); + } + return value; } /** diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java index ee5caa7178ef..e68153512d3d 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java @@ -131,4 +131,8 @@ protected ProviderAdapter createNewAdapter(String uuid, String providerName, Map logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url); return api; } + + public ProviderAdapterFactory getFactory(String providerName) { + return this.factoryMap.get(providerName); + } } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java index 68dd4a15c62a..2a58c8f86f26 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java @@ -54,6 +54,8 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep if (storagePoolHost == null) { storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); storagePoolHostDao.persist(storagePoolHost); + } else { + return false; } return true; } diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index 3082a19c7324..e26f593189d8 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -25,7 +25,6 @@ import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; import javax.net.ssl.HostnameVerifier; @@ -109,7 +108,8 @@ protected FlashArrayAdapter(String url, Map details) { } @Override - public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, ProviderAdapterDiskOffering offering, long size) { + public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, + ProviderAdapterDiskOffering offering, long size) { FlashArrayVolume request = new FlashArrayVolume(); request.setExternalName( pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject)); @@ -128,30 +128,50 @@ public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterData * cluster (depending on Cloudstack Storage Pool configuration) */ @Override - public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) { + + // should not happen but double check for sanity + if (dataObject.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) { + throw new RuntimeException("This storage provider does not support direct attachments of snapshots to hosts"); + } + String volumeName = normalizeName(pod, dataObject.getExternalName()); try { - FlashArrayList list = POST("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName, null, new TypeReference> () { }); + FlashArrayList list = null; + FlashArrayHost host = getHost(hostname); + if (host != null) { + list = POST("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName, null, + new TypeReference>() { + }); + } if (list == null || list.getItems() == null || list.getItems().size() == 0) { throw new RuntimeException("Volume attach did not return lun information"); } - FlashArrayConnection connection = (FlashArrayConnection)this.getFlashArrayItem(list); + FlashArrayConnection connection = (FlashArrayConnection) this.getFlashArrayItem(list); if (connection.getLun() == null) { throw new RuntimeException("Volume attach missing lun field"); } - return ""+connection.getLun(); + return "" + connection.getLun(); } catch (Throwable e) { - // the volume is already attached. happens in some scenarios where orchestration creates the volume before copying to it + // the volume is already attached. happens in some scenarios where orchestration + // creates the volume before copying to it if (e.toString().contains("Connection already exists")) { FlashArrayList list = GET("/connections?volume_names=" + volumeName, - new TypeReference>() { - }); + new TypeReference>() { + }); if (list != null && list.getItems() != null) { - return ""+list.getItems().get(0).getLun(); + for (FlashArrayConnection conn : list.getItems()) { + if (conn.getHost() != null && conn.getHost().getName() != null && + (conn.getHost().getName().equals(hostname) || conn.getHost().getName().equals(hostname.substring(0, hostname.indexOf('.')))) && + conn.getLun() != null) { + return "" + conn.getLun(); + } + } + throw new RuntimeException("Volume lun is not found in existing connection"); } else { throw new RuntimeException("Volume lun is not found in existing connection"); } @@ -162,9 +182,18 @@ public String attach(ProviderAdapterContext context, ProviderAdapterDataObject d } @Override - public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) { String volumeName = normalizeName(pod, dataObject.getExternalName()); - DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName); + // hostname is always provided by cloudstack, but we will detach from hostgroup + // if this pool is configured to use hostgroup for attachments + if (hostgroup != null) { + DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName); + } + + FlashArrayHost host = getHost(hostname); + if (host != null) { + DELETE("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName); + } } @Override @@ -205,8 +234,6 @@ public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterD return null; } - populateConnectionId(volume); - return volume; } catch (Exception e) { // assume any exception is a not found. Flash returns 400's for most errors @@ -217,7 +244,7 @@ public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterD @Override public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) { // public FlashArrayVolume getVolumeByWwn(String wwn) { - if (address == null ||addressType == null) { + if (address == null || addressType == null) { throw new RuntimeException("Invalid search criteria provided for getVolumeByAddress"); } @@ -234,21 +261,19 @@ public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, Address FlashArrayVolume volume = null; try { FlashArrayList list = GET("/volumes?filter=" + query, - new TypeReference>() { - }); + new TypeReference>() { + }); // if we didn't get an address back its likely an empty object if (list == null || list.getItems() == null || list.getItems().size() == 0) { return null; } - volume = (FlashArrayVolume)this.getFlashArrayItem(list); + volume = (FlashArrayVolume) this.getFlashArrayItem(list); if (volume != null && volume.getAddress() == null) { return null; } - populateConnectionId(volume); - return volume; } catch (Exception e) { // assume any exception is a not found. Flash returns 400's for most errors @@ -256,32 +281,6 @@ public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, Address } } - private void populateConnectionId(FlashArrayVolume volume) { - // we need to see if there is a connection (lun) associated with this volume. - // note we assume 1 lun for the hostgroup associated with this object - FlashArrayList list = null; - try { - list = GET("/connections?volume_names=" + volume.getExternalName(), - new TypeReference>() { - }); - } catch (CloudRuntimeException e) { - // this means there is no attachment associated with this volume on the array - if (e.toString().contains("Bad Request")) { - return; - } - } - - if (list != null && list.getItems() != null) { - for (FlashArrayConnection conn: list.getItems()) { - if (conn.getHostGroup() != null && conn.getHostGroup().getName().equals(this.hostgroup)) { - volume.setExternalConnectionId(""+conn.getLun()); - break; - } - } - - } - } - @Override public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, long newSizeInBytes) { // public void resizeVolume(String volumeNamespace, String volumeName, long @@ -299,7 +298,8 @@ public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dat * @return */ @Override - public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject targetDataObject) { + public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, + ProviderAdapterDataObject targetDataObject) { // public FlashArrayVolume snapshotVolume(String volumeNamespace, String // volumeName, String snapshotName) { FlashArrayList list = POST( @@ -354,11 +354,12 @@ public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdap } @Override - public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject destDataObject) { + public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, + ProviderAdapterDataObject destDataObject) { // private ManagedVolume copy(ManagedVolume sourceVolume, String destNamespace, // String destName) { if (sourceDataObject == null || sourceDataObject.getExternalName() == null - ||sourceDataObject.getType() == null) { + || sourceDataObject.getType() == null) { throw new RuntimeException("Provided volume has no external source information"); } @@ -424,12 +425,6 @@ public void refresh(Map details) { @Override public void validate() { login(); - // check if hostgroup and pod from details really exist - we will - // require a distinct configuration object/connection object for each type - if (this.getHostgroup(hostgroup) == null) { - throw new RuntimeException("Hostgroup [" + hostgroup + "] not found in FlashArray at [" + url - + "], please validate configuration"); - } if (this.getVolumeNamespace(pod) == null) { throw new RuntimeException( @@ -477,36 +472,33 @@ public boolean canAccessHost(ProviderAdapterContext context, String hostname) { throw new RuntimeException("Unable to validate host access because a hostname was not provided"); } - List members = getHostgroupMembers(hostgroup); - - // check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack - // hostname configuration - String shortname; - if (hostname.indexOf('.') > 0) { - shortname = hostname.substring(0, (hostname.indexOf('.'))); - } else { - shortname = hostname; + FlashArrayHost host = getHost(hostname); + if (host != null) { + return true; } - for (String member : members) { - // exact match (short or long names) - if (member.equals(hostname)) { - return true; - } + return false; + } - // primera has short name and cloudstack had long name - if (member.equals(shortname)) { - return true; - } + private FlashArrayHost getHost(String hostname) { + FlashArrayList list = null; - // member has long name but cloudstack had shortname - if (member.indexOf('.') > 0) { - if (member.substring(0, (member.indexOf('.'))).equals(shortname)) { - return true; - } + try { + list = GET("/hosts?names=" + hostname, + new TypeReference>() { + }); + } catch (Exception e) { + + } + + if (list == null) { + if (hostname.indexOf('.') > 0) { + list = GET("/hosts?names=" + hostname.substring(0, (hostname.indexOf('.'))), + new TypeReference>() { + }); } } - return false; + return (FlashArrayHost) getFlashArrayItem(list); } private String getAccessToken() { @@ -527,13 +519,21 @@ private synchronized void refreshSession(boolean force) { } } catch (Exception e) { // retry frequently but not every request to avoid DDOS on storage API - logger.warn("Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds", + logger.warn( + "Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds", e); keyExpiration = System.currentTimeMillis() + (5 * 1000); } } - private void validateLoginInfo(String urlStr) { + /** + * Login to the array and get an access token + */ + private void login() { + username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); + password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); + String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); + URL urlFull; try { urlFull = new URL(urlStr); @@ -571,15 +571,6 @@ private void validateLoginInfo(String urlStr) { } } - hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP); - if (hostgroup == null) { - hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP); - if (hostgroup == null) { - throw new RuntimeException( - FlashArrayAdapter.STORAGE_POD + " paramater/option required to configure this storage pool"); - } - } - apiLoginVersion = connectionDetails.get(FlashArrayAdapter.API_LOGIN_VERSION); if (apiLoginVersion == null) { apiLoginVersion = queryParms.get(FlashArrayAdapter.API_LOGIN_VERSION); @@ -596,6 +587,12 @@ private void validateLoginInfo(String urlStr) { } } + // retrieve for legacy purposes. if set, we'll remove any connections to hostgroup we find and use the host + hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP); + if (hostgroup == null) { + hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP); + } + String connTimeoutStr = connectionDetails.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); if (connTimeoutStr == null) { connTimeoutStr = queryParms.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); @@ -651,16 +648,7 @@ private void validateLoginInfo(String urlStr) { } else { skipTlsValidation = true; } - } - /** - * Login to the array and get an access token - */ - private void login() { - username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); - password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); - String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); - validateLoginInfo(urlStr); CloseableHttpResponse response = null; try { HttpPost request = new HttpPost(url + "/" + apiLoginVersion + "/auth/apitoken"); @@ -749,7 +737,13 @@ private void removeVlunsAll(ProviderAdapterContext context, String volumeNamespa if (list != null && list.getItems() != null) { for (FlashArrayConnection conn : list.getItems()) { - DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names=" + volumeName); + if (hostgroup != null && conn.getHostGroup() != null && conn.getHostGroup().getName() != null) { + DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names=" + + volumeName); + break; + } else if (conn.getHost() != null && conn.getHost().getName() != null) { + DELETE("/connections?host_names=" + conn.getHost().getName() + "&volume_names=" + volumeName); + } } } } @@ -762,30 +756,10 @@ private FlashArrayVolume getVolume(String volumeName) { } private FlashArrayPod getVolumeNamespace(String name) { - FlashArrayList list = GET("/pods?names=" + name, new TypeReference>() { - }); - return (FlashArrayPod) getFlashArrayItem(list); - } - - private FlashArrayHostgroup getHostgroup(String name) { - FlashArrayList list = GET("/host-groups?name=" + name, - new TypeReference>() { - }); - return (FlashArrayHostgroup) getFlashArrayItem(list); - } - - private List getHostgroupMembers(String groupname) { - FlashArrayGroupMemberReferenceList list = GET("/hosts/host-groups?group_names=" + groupname, - new TypeReference() { + FlashArrayList list = GET("/pods?names=" + name, + new TypeReference>() { }); - if (list == null || list.getItems().size() == 0) { - return null; - } - List hostnames = new ArrayList(); - for (FlashArrayGroupMemberReference ref : list.getItems()) { - hostnames.add(ref.getMember().getName()); - } - return hostnames; + return (FlashArrayPod) getFlashArrayItem(list); } private FlashArrayVolume getSnapshot(String snapshotName) { @@ -856,7 +830,8 @@ private T POST(String path, Object input, final TypeReference type) { } return null; } catch (UnsupportedOperationException | IOException e) { - throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]", e); + throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]", + e); } } else if (statusCode == 400) { try { @@ -1083,4 +1058,39 @@ private Long roundUp512Boundary(Long sizeInBytes) { } return sizeInBytes; } + + @Override + public Map getConnectionIdMap(ProviderAdapterDataObject dataIn) { + Map map = new HashMap(); + + // flasharray doesn't let you directly map a snapshot to a host, so we'll just return an empty map + if (dataIn.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) { + return map; + } + + try { + FlashArrayList list = GET("/connections?volume_names=" + dataIn.getExternalName(), + new TypeReference>() { + }); + + if (list != null && list.getItems() != null) { + for (FlashArrayConnection conn : list.getItems()) { + if (conn.getHost() != null) { + map.put(conn.getHost().getName(), "" + conn.getLun()); + } + } + } + } catch (Exception e) { + // flasharray returns a 400 if the volume doesn't exist, so we'll just return an empty object. + if (logger.isTraceEnabled()) { + logger.trace("Error getting connection map for volume [" + dataIn.getExternalName() + "]: " + e.toString(), e); + } + } + return map; + } + + @Override + public boolean canDirectAttachSnapshot() { + return false; + } } diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java index d1c3cee8fa8b..de55b27884eb 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java @@ -33,4 +33,9 @@ public ProviderAdapter create(String url, Map details) { return new FlashArrayAdapter(url, details); } + @Override + public Object canDirectAttachSnapshot() { + return false; + } + } diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java new file mode 100644 index 000000000000..0c3a1e7179d5 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java @@ -0,0 +1,29 @@ +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayHost { + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public List getWwns() { + return wwns; + } + public void setWwns(List wwns) { + this.wwns = wwns; + } + @JsonProperty("name") + private String name; + @JsonProperty("wwns") + private List wwns; + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java index f939d70a77f4..a3201a753a75 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java @@ -83,7 +83,7 @@ public String getName() { @JsonIgnore public String getPodName() { if (pod != null) { - return pod.getName(); + return pod.name; } else { return null; } @@ -129,7 +129,7 @@ public void setName(String name) { } public void setPodName(String podname) { FlashArrayVolumePod pod = new FlashArrayVolumePod(); - pod.setName(podname); + pod.name = podname; this.pod = pod; } @Override diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java index 1e46441e7d1e..e9c10f84a750 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java @@ -24,20 +24,7 @@ @JsonInclude(JsonInclude.Include.NON_NULL) public class FlashArrayVolumePod { @JsonProperty("id") - private String id; + public String id; @JsonProperty("name") - private String name; - - public String getId() { - return id; - } - public void setId(String id) { - this.id = id; - } - public String getName() { - return name; - } - public void setName(String name) { - this.name = name; - } + public String name; } diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java index 69f98567f728..8e1375a77027 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java @@ -24,7 +24,6 @@ import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.util.HashMap; -import java.util.List; import java.util.Map; import javax.net.ssl.HostnameVerifier; @@ -106,18 +105,11 @@ public void refresh(Map details) { this.refreshSession(true); } - /** - * Validate that the hostgroup and pod from the details data exists. Each - * configuration object/connection needs a distinct set of these 2 things. - */ @Override public void validate() { login(); - if (this.getHostset(hostset) == null) { - throw new RuntimeException("Hostgroup [" + hostset + "] not found in FlashArray at [" + url - + "], please validate configuration"); - } - + // check if hostgroup and pod from details really exist - we will + // require a distinct configuration object/connection object for each type if (this.getCpg(cpg) == null) { throw new RuntimeException( "Pod [" + cpg + "] not found in FlashArray at [" + url + "], please validate configuration"); @@ -176,10 +168,15 @@ public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterData } @Override - public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) { + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn, String hostname) { assert dataIn.getExternalName() != null : "External name not provided internally on volume attach"; PrimeraHostset.PrimeraHostsetVLUNRequest request = new PrimeraHostset.PrimeraHostsetVLUNRequest(); - request.setHostname("set:" + hostset); + PrimeraHost host = getHost(hostname); + if (host == null) { + throw new RuntimeException("Unable to find host " + hostname + " on storage provider"); + } + request.setHostname(host.getName()); + request.setVolumeName(dataIn.getExternalName()); request.setAutoLun(true); // auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4 @@ -194,12 +191,36 @@ public String attach(ProviderAdapterContext context, ProviderAdapterDataObject d return toks[1]; } - @Override + /** + * This detaches ALL vlun's for the provided volume name IF they are associated to this hostset + * @param context + * @param request + */ public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request) { + detach(context, request, null); + } + + @Override + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname) { // we expect to only be attaching one hostset to the vluns, so on detach we'll // remove ALL vluns we find. assert request.getExternalName() != null : "External name not provided internally on volume detach"; - removeAllVluns(request.getExternalName()); + + PrimeraVlunList list = getVluns(request.getExternalName()); + if (list != null && list.getMembers().size() > 0) { + list.getMembers().forEach(vlun -> { + // remove any hostset from old code if configured + if (hostset != null && vlun.getHostname() != null && vlun.getHostname().equals("set:" + hostset)) { + removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname()); + } + + if (hostname != null) { + if (vlun.getHostname().equals(hostname) || vlun.getHostname().equals(hostname.split("\\.")[0])) { + removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname()); + } + } + }); + } } public void removeVlun(String name, Integer lunid, String hostString) { @@ -208,20 +229,7 @@ public void removeVlun(String name, Integer lunid, String hostString) { DELETE("/vluns/" + name + "," + lunid + "," + hostString); } - /** - * Removes all vluns - this should only be done when you are sure the volume is no longer in use - * @param name - */ - public void removeAllVluns(String name) { - PrimeraVlunList list = getVolumeHostsets(name); - if (list != null && list.getMembers() != null) { - for (PrimeraVlun vlun: list.getMembers()) { - removeVlun(vlun.getVolumeName(), vlun.getLun(), vlun.getHostname()); - } - } - } - - public PrimeraVlunList getVolumeHostsets(String name) { + public PrimeraVlunList getVluns(String name) { String query = "%22volumeName%20EQ%20" + name + "%22"; return GET("/vluns?query=" + query, new TypeReference() {}); } @@ -231,7 +239,7 @@ public void delete(ProviderAdapterContext context, ProviderAdapterDataObject req assert request.getExternalName() != null : "External name not provided internally on volume delete"; // first remove vluns (take volumes from vluns) from hostset - removeAllVluns(request.getExternalName()); + detach(context, request); DELETE("/volumes/" + request.getExternalName()); } @@ -420,6 +428,7 @@ public ProviderVolumeStorageStats getManagedStorageStats() { if (cpgobj == null || cpgobj.getTotalSpaceMiB() == 0) { return null; } + Long capacityBytes = 0L; if (cpgobj.getsDGrowth() != null) { capacityBytes = cpgobj.getsDGrowth().getLimitMiB() * PrimeraAdapter.BYTES_IN_MiB; @@ -453,39 +462,25 @@ public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, Provid @Override public boolean canAccessHost(ProviderAdapterContext context, String hostname) { - PrimeraHostset hostset = getHostset(this.hostset); - - List members = hostset.getSetmembers(); - - // check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack - // hostname configuration - String shortname; - if (hostname.indexOf('.') > 0) { - shortname = hostname.substring(0, (hostname.indexOf('.'))); - } else { - shortname = hostname; + // check that the array has the host configured + PrimeraHost host = this.getHost(hostname); + if (host != null) { + // if hostset is configured we'll additionally check if the host is in it (legacy/original behavior) + return true; } - for (String member: members) { - // exact match (short or long names) - if (member.equals(hostname)) { - return true; - } - // primera has short name and cloudstack had long name - if (member.equals(shortname)) { - return true; - } + return false; + } - // member has long name but cloudstack had shortname - int index = member.indexOf("."); - if (index > 0) { - if (member.substring(0, (member.indexOf('.'))).equals(shortname)) { - return true; - } + private PrimeraHost getHost(String name) { + PrimeraHost host = GET("/hosts/" + name, new TypeReference() { }); + if (host == null) { + if (name.indexOf('.') > 0) { + host = this.getHost(name.substring(0, (name.indexOf('.')))); } } + return host; - return false; } private PrimeraCpg getCpg(String name) { @@ -493,11 +488,6 @@ private PrimeraCpg getCpg(String name) { }); } - private PrimeraHostset getHostset(String name) { - return GET("/hostsets/" + name, new TypeReference() { - }); - } - private String getSessionKey() { refreshSession(false); return key; @@ -518,8 +508,14 @@ private synchronized void refreshSession(boolean force) { keyExpiration = System.currentTimeMillis() + (5*1000); } } + /** + * Login to the array and get an access token + */ + private void login() { + username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); + password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); + String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); - private void validateLoginInfo(String urlStr) { URL urlFull; try { urlFull = new URL(urlStr); @@ -566,13 +562,10 @@ private void validateLoginInfo(String urlStr) { } } + // if this is null, we will use direct-to-host vlunids (preferred) hostset = connectionDetails.get(PrimeraAdapter.HOSTSET); if (hostset == null) { hostset = queryParms.get(PrimeraAdapter.HOSTSET); - if (hostset == null) { - throw new RuntimeException( - PrimeraAdapter.HOSTSET + " paramater/option required to configure this storage pool"); - } } String connTimeoutStr = connectionDetails.get(PrimeraAdapter.CONNECT_TIMEOUT_MS); @@ -629,16 +622,7 @@ private void validateLoginInfo(String urlStr) { } else { skipTlsValidation = true; } - } - /** - * Login to the array and get an access token - */ - private void login() { - username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); - password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); - String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); - validateLoginInfo(urlStr); CloseableHttpResponse response = null; try { HttpPost request = new HttpPost(url + "/credentials"); @@ -720,7 +704,7 @@ private T POST(String path, Object input, final TypeReference type) { try { String data = mapper.writeValueAsString(input); request.setEntity(new StringEntity(data)); - logger.debug("POST data: " + request.getEntity()); + if (logger.isTraceEnabled()) logger.trace("POST data: " + request.getEntity()); } catch (UnsupportedEncodingException | JsonProcessingException e) { throw new RuntimeException( "Error processing request payload to [" + url + "] for path [" + path + "]", e); @@ -926,5 +910,22 @@ private void DELETE(String path) { } } + @Override + public Map getConnectionIdMap(ProviderAdapterDataObject dataIn) { + Map connIdMap = new HashMap(); + PrimeraVlunList list = this.getVluns(dataIn.getExternalName()); + + if (list != null && list.getMembers() != null && list.getMembers().size() > 0) { + for (PrimeraVlun vlun: list.getMembers()) { + connIdMap.put(vlun.getHostname(), ""+vlun.getLun()); + } + } + + return connIdMap; + } + @Override + public boolean canDirectAttachSnapshot() { + return true; + } } diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java index 81ae442b38df..43a0245431e8 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java @@ -33,4 +33,9 @@ public ProviderAdapter create(String url, Map details) { return new PrimeraAdapter(url, details); } + @Override + public Object canDirectAttachSnapshot() { + return true; + } + } diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHost.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHost.java new file mode 100644 index 000000000000..e7371c329c49 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHost.java @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.List; + + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraHost { + private Integer id; + private String name; + private List fcPaths; + private PrimeraHostDescriptor descriptors; + public Integer getId() { + return id; + } + public void setId(Integer id) { + this.id = id; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public List getFcPaths() { + return fcPaths; + } + public void setFcPaths(List fcPaths) { + this.fcPaths = fcPaths; + } + public PrimeraHostDescriptor getDescriptors() { + return descriptors; + } + public void setDescriptors(PrimeraHostDescriptor descriptors) { + this.descriptors = descriptors; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java new file mode 100644 index 000000000000..29ba90ffe7c7 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java @@ -0,0 +1,23 @@ +package org.apache.cloudstack.storage.datastore.adapter.primera; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraHostDescriptor { + private String IPAddr = null; + private String os = null; + public String getIPAddr() { + return IPAddr; + } + public void setIPAddr(String iPAddr) { + IPAddr = iPAddr; + } + public String getOs() { + return os; + } + public void setOs(String os) { + this.os = os; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java index e062f0782af5..0d3c6146a79a 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java @@ -34,105 +34,115 @@ public class PrimeraHostset { private String uuid; private Map additionalProperties = new LinkedHashMap(); + + public String getComment() { return comment; } + + public void setComment(String comment) { this.comment = comment; } + + public Integer getId() { return id; } + + public void setId(Integer id) { this.id = id; } + + public String getName() { return name; } + + public void setName(String name) { this.name = name; } + + public List getSetmembers() { return setmembers; } + + public void setSetmembers(List setmembers) { this.setmembers = setmembers; } + + public String getUuid() { return uuid; } + + public void setUuid(String uuid) { this.uuid = uuid; } + + public Map getAdditionalProperties() { return additionalProperties; } + + public void setAdditionalProperties(Map additionalProperties) { this.additionalProperties = additionalProperties; } + + // adds members to a hostset public static class PrimeraHostsetVLUNRequest { private String volumeName; private Boolean autoLun = true; private Integer lun = 0; private Integer maxAutoLun = 0; - /** - * This can be a single hostname OR the set of hosts in the format - * "set:". - * For the purposes of this driver, its expected that the predominate usecase is - * to use - * a hostset that is aligned with a CloudStack Cluster. - */ + // hostset format: "set:" private String hostname; - public String getVolumeName() { return volumeName; } - public void setVolumeName(String volumeName) { this.volumeName = volumeName; } - public Boolean getAutoLun() { return autoLun; } - public void setAutoLun(Boolean autoLun) { this.autoLun = autoLun; } - public Integer getLun() { return lun; } - public void setLun(Integer lun) { this.lun = lun; } - public Integer getMaxAutoLun() { return maxAutoLun; } - public void setMaxAutoLun(Integer maxAutoLun) { this.maxAutoLun = maxAutoLun; } - public String getHostname() { return hostname; } - public void setHostname(String hostname) { this.hostname = hostname; } diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java new file mode 100644 index 000000000000..e6e84faeb292 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java @@ -0,0 +1,23 @@ +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraPort { + private String wwn; + private PrimeraPortPos portPos; + public String getWwn() { + return wwn; + } + public void setWwn(String wwn) { + this.wwn = wwn; + } + public PrimeraPortPos getPortPos() { + return portPos; + } + public void setPortPos(PrimeraPortPos portPos) { + this.portPos = portPos; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java new file mode 100644 index 000000000000..e05de43542fd --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java @@ -0,0 +1,30 @@ +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraPortPos { + private Integer cardPort; + private Integer node; + private Integer slot; + public Integer getCardPort() { + return cardPort; + } + public void setCardPort(Integer cardPort) { + this.cardPort = cardPort; + } + public Integer getNode() { + return node; + } + public void setNode(Integer node) { + this.node = node; + } + public Integer getSlot() { + return slot; + } + public void setSlot(Integer slot) { + this.slot = slot; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java index 33ad0d445f85..6bfa9f5920dd 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java @@ -35,7 +35,7 @@ public class PrimeraVolumeCopyRequestParameters { private String snapCPG = null; private Boolean skipZero = null; private Boolean saveSnapshot = null; - /** 1=HIGH, 2=MED, 3=LOW */ + // 1=HIGH, 2=MED, 3=LOW private Integer priority = null; public String getDestVolume() { return destVolume; diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java index 48898c272771..10ba364a5c5e 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java @@ -22,10 +22,7 @@ @JsonIgnoreProperties(ignoreUnknown = true) @JsonInclude(JsonInclude.Include.NON_NULL) public class PrimeraVolumePromoteRequest { - /** - * Defines action for the request as described at https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html - */ - private Integer action = 4; + private Integer action = 4; // PROMOTE_VIRTUAL_COPY, https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html private Boolean online = true; private Integer priority = 2; // MEDIUM private Boolean allowRemoteCopyParent = true; diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java index 8484a5ef798d..b479436dab2c 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java @@ -58,10 +58,17 @@ public Pair authenticate(String username, final String[] provider = (String[])requestParameters.get(ApiConstants.PROVIDER); final String[] emailArray = (String[])requestParameters.get(ApiConstants.EMAIL); final String[] secretCodeArray = (String[])requestParameters.get(ApiConstants.SECRET_CODE); + + if (provider == null) { + return new Pair(false, null); + } + String oauthProvider = ((provider == null) ? null : provider[0]); String email = ((emailArray == null) ? null : emailArray[0]); String secretCode = ((secretCodeArray == null) ? null : secretCodeArray[0]); + + UserOAuth2Authenticator authenticator = _userOAuth2mgr.getUserOAuth2AuthenticationProvider(oauthProvider); if (user != null && authenticator.verifyUser(email, secretCode)) { return new Pair(true, null); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 0d3f047809a2..d0904b4c7fc9 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.vm; +import static com.cloud.configuration.ConfigurationManager.VM_USERDATA_MAX_LENGTH; import static com.cloud.configuration.ConfigurationManagerImpl.VM_USERDATA_MAX_LENGTH; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; @@ -4356,14 +4357,7 @@ public boolean checkIfDynamicScalingCanBeEnabled(VirtualMachine vm, ServiceOffer */ protected long configureCustomRootDiskSize(Map customParameters, VMTemplateVO template, HypervisorType hypervisorType, DiskOfferingVO rootDiskOffering) { verifyIfHypervisorSupportsRootdiskSizeOverride(hypervisorType); - Long rootDiskSizeCustomParam = null; - if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { - rootDiskSizeCustomParam = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1); - if (rootDiskSizeCustomParam <= 0) { - throw new InvalidParameterValueException("Root disk size should be a positive number."); - } - } - long rootDiskSizeInBytes = verifyAndGetDiskSize(rootDiskOffering, rootDiskSizeCustomParam); + long rootDiskSizeInBytes = verifyAndGetDiskSize(rootDiskOffering, NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1)); if (rootDiskSizeInBytes > 0) { //if the size at DiskOffering is not zero then the Service Offering had it configured, it holds priority over the User custom size _volumeService.validateVolumeSizeInBytes(rootDiskSizeInBytes); long rootDiskSizeInGiB = rootDiskSizeInBytes / GiB_TO_BYTES; @@ -4372,7 +4366,11 @@ protected long configureCustomRootDiskSize(Map customParameters, } if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { - Long rootDiskSize = rootDiskSizeCustomParam * GiB_TO_BYTES; + Long rootDiskSize = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1); + if (rootDiskSize <= 0) { + throw new InvalidParameterValueException("Root disk size should be a positive number."); + } + rootDiskSize *= GiB_TO_BYTES; _volumeService.validateVolumeSizeInBytes(rootDiskSize); return rootDiskSize; } else { @@ -6399,6 +6397,12 @@ private VMInstanceVO preVmStorageMigrationCheck(Long vmId) { + " hypervisors: [%s].", hypervisorType, HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS)); } + List vols = _volsDao.findByInstance(vm.getId()); + if (vols.size() > 1 && + !(HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType))) { + throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first"); + } + // Check that Vm does not have VM Snapshots if (_vmSnapshotDao.findByVm(vmId).size() > 0) { throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM"); diff --git a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java index a11593a86080..7f0ee77deaf3 100644 --- a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java +++ b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java @@ -92,7 +92,9 @@ public class SnapshotHelper { */ public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) { if (!kvmSnapshotOnlyInPrimaryStorage) { - logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); + if (snapInfo != null) { + logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); + } return; } diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index 1ed5a8f1648a..bca52c6e54b9 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -558,6 +558,7 @@ private StoragePool getStoragePool(final UnmanagedInstanceTO.Disk disk, final Da } } } + if (storagePool == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Storage pool for disk %s(%s) with datastore: %s not found in zone ID: %s", disk.getLabel(), disk.getDiskId(), disk.getDatastoreName(), zone.getUuid())); } From 41ef2ee1fad1121c1fa368d590b8a89413f2eedc Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 1 Apr 2024 18:32:55 +0000 Subject: [PATCH 02/47] Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes --- .../cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index df1d7035581d..8bd6d4f6fcac 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -482,8 +482,7 @@ boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, l } LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); - throw new CloudRuntimeException("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid()); - //return false; + return false; } long getPhysicalDiskSize(String diskPath) { From cb1f0fa4642147b45186fab1a5decea923e34d5a Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 1 Apr 2024 21:19:59 +0000 Subject: [PATCH 03/47] Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes --- .../hypervisor/kvm/storage/MultipathSCSIAdapterBase.java | 7 ++++--- .../datastore/adapter/flasharray/FlashArrayAdapter.java | 1 - 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 8bd6d4f6fcac..eff6767a06b6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -178,12 +178,12 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map Date: Tue, 2 Apr 2024 01:06:11 +0000 Subject: [PATCH 04/47] Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes --- .../storage/datastore/driver/AdaptiveDataStoreDriverImpl.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java index 32342b951542..549604b35777 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -996,4 +996,8 @@ ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool stor dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString())); return dataIn; } + + public boolean volumesRequireGrantAccessWhenUsed() { + return true; + } } From 7c12b06ff740990209d06c42f976b0c20e58d6a5 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Tue, 2 Apr 2024 16:38:43 +0000 Subject: [PATCH 05/47] update to add timestamp when deleting pure volumes to avoid future conflicts --- .../datastore/adapter/flasharray/FlashArrayAdapter.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index 66ddac4e855a..a1b277c7e54c 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -23,6 +23,7 @@ import java.security.KeyManagementException; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; +import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; @@ -203,6 +204,10 @@ public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dat String fullName = normalizeName(pod, dataObject.getExternalName()); FlashArrayVolume volume = new FlashArrayVolume(); + // rename as we delete so it doesn't conflict if the template or volume is ever recreated + // pure keeps the volume(s) around in a Destroyed bucket for a period of time post delete + String timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new java.util.Date()); + volume.setName(fullName + "-" + timestamp); volume.setDestroyed(true); try { PATCH("/volumes?names=" + fullName, volume, new TypeReference>() { From 5bd6909c23f151e2f1e36f99bd536f9a669658e0 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Wed, 3 Apr 2024 15:27:26 +0000 Subject: [PATCH 06/47] update to migrate to properly check disk offering is valid for the target storage pool --- .../cloud/storage/VolumeApiServiceImpl.java | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 2e8b36da446a..04def1b6048b 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -3399,12 +3399,15 @@ private DiskOfferingVO retrieveAndValidateNewDiskOffering(MigrateVolumeCmd cmd) Account caller = CallContext.current().getCallingAccount(); DataCenter zone = null; Volume volume = _volsDao.findById(cmd.getId()); - if (volume != null) { - zone = _dcDao.findById(volume.getDataCenterId()); + if (volume == null) { + throw new InvalidParameterValueException(String.format("Provided volume id is not valid: %s", cmd.getId())); } + zone = _dcDao.findById(volume.getDataCenterId()); + _accountMgr.checkAccess(caller, newDiskOffering, zone); - DiskOfferingVO currentDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); - if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingHasTagsAsOldDiskOffering(currentDiskOffering, newDiskOffering)) { + StoragePool destStoragePool = _storagePoolDao.findById(cmd.getStoragePoolId()); + + if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingMatchTargetStoragePool(destStoragePool, newDiskOffering)) { throw new InvalidParameterValueException(String.format("Existing disk offering storage tags of the volume %s does not contain in the new disk offering %s ", volume.getUuid(), newDiskOffering.getUuid())); } return newDiskOffering; @@ -3520,16 +3523,24 @@ public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String return result; } - public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) { - String[] oldDOStorageTags = oldDO.getTagsArray(); + public boolean doesNewDiskOfferingMatchTargetStoragePool(StoragePool destPool, DiskOfferingVO newDO) { String[] newDOStorageTags = newDO.getTagsArray(); - if (oldDOStorageTags.length == 0) { - return true; + List destPoolTags = storagePoolTagsDao.findStoragePoolTags(destPool.getId()); + if (newDOStorageTags == null || newDOStorageTags.length == 0) { + if (destPoolTags == null || destPoolTags.isEmpty()) { + return true; + } else { + return false; + } } - if (newDOStorageTags.length == 0) { - return false; + for (StoragePoolTagVO spt: destPoolTags) { + for (String doTag: newDOStorageTags) { + if (doTag.equals(spt.getTag())) { + return true; + } + } } - return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags)); + return false; } /** From 7d041c4f17f7562f2bb139c28bf1975535f40d9a Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Wed, 3 Apr 2024 15:36:04 +0000 Subject: [PATCH 07/47] update to migrate to properly check disk offering is valid for the target storage pool --- .../cloud/storage/VolumeApiServiceImpl.java | 36 ++++++++----------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 04def1b6048b..b7f1732e7b94 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -3407,8 +3407,8 @@ private DiskOfferingVO retrieveAndValidateNewDiskOffering(MigrateVolumeCmd cmd) _accountMgr.checkAccess(caller, newDiskOffering, zone); StoragePool destStoragePool = _storagePoolDao.findById(cmd.getStoragePoolId()); - if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingMatchTargetStoragePool(destStoragePool, newDiskOffering)) { - throw new InvalidParameterValueException(String.format("Existing disk offering storage tags of the volume %s does not contain in the new disk offering %s ", volume.getUuid(), newDiskOffering.getUuid())); + if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesTargetStorageSupportDiskOffering(destStoragePool, newDiskOffering)) { + throw new InvalidParameterValueException(String.format("New disk offering is not valid for the provided storage pool: volume [%s], disk offering [%s]", volume.getUuid(), newDiskOffering.getUuid())); } return newDiskOffering; } @@ -3494,6 +3494,18 @@ protected boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, Dis return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags); } + public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) { + String[] oldDOStorageTags = oldDO.getTagsArray(); + String[] newDOStorageTags = newDO.getTagsArray(); + if (oldDOStorageTags.length == 0) { + return true; + } + if (newDOStorageTags.length == 0) { + return false; + } + return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags)); + } + @Override public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String diskOfferingTags) { Pair, Boolean> storagePoolTags = getStoragePoolTags(destPool); @@ -3523,26 +3535,6 @@ public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String return result; } - public boolean doesNewDiskOfferingMatchTargetStoragePool(StoragePool destPool, DiskOfferingVO newDO) { - String[] newDOStorageTags = newDO.getTagsArray(); - List destPoolTags = storagePoolTagsDao.findStoragePoolTags(destPool.getId()); - if (newDOStorageTags == null || newDOStorageTags.length == 0) { - if (destPoolTags == null || destPoolTags.isEmpty()) { - return true; - } else { - return false; - } - } - for (StoragePoolTagVO spt: destPoolTags) { - for (String doTag: newDOStorageTags) { - if (doTag.equals(spt.getTag())) { - return true; - } - } - } - return false; - } - /** * Returns a {@link Pair}, where the first value is the list of the StoragePool tags, and the second value is whether the returned tags are to be interpreted as a rule, * or a normal list of tags. From c761869c3447c7dd417872ce47e4e212a5216ea9 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Wed, 3 Apr 2024 16:16:01 +0000 Subject: [PATCH 08/47] update to migrate to properly check disk offering is valid for the target storage pool --- .../main/java/com/cloud/storage/VolumeApiServiceImpl.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index b7f1732e7b94..f8bc8172b834 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -3405,11 +3405,6 @@ private DiskOfferingVO retrieveAndValidateNewDiskOffering(MigrateVolumeCmd cmd) zone = _dcDao.findById(volume.getDataCenterId()); _accountMgr.checkAccess(caller, newDiskOffering, zone); - StoragePool destStoragePool = _storagePoolDao.findById(cmd.getStoragePoolId()); - - if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesTargetStorageSupportDiskOffering(destStoragePool, newDiskOffering)) { - throw new InvalidParameterValueException(String.format("New disk offering is not valid for the provided storage pool: volume [%s], disk offering [%s]", volume.getUuid(), newDiskOffering.getUuid())); - } return newDiskOffering; } From 3e8cb9648d7051643cf9d8741121fa7551e780cb Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Thu, 4 Apr 2024 15:15:11 +0000 Subject: [PATCH 09/47] update to migrate to properly check disk offering is valid for the target storage pool --- .../main/java/com/cloud/storage/VolumeApiServiceImpl.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index f8bc8172b834..066cd3fca8a2 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -3314,6 +3314,13 @@ public Volume migrateVolume(MigrateVolumeCmd cmd) { } DiskOfferingVO newDiskOffering = retrieveAndValidateNewDiskOffering(cmd); + // if no new disk offering was provided, and match is required, default to the offering of the + // original volume. otherwise it falls through with no check and the target volume may + // not work correctly in some scenarios with the target provider. Adminstrator + // can disable this flag dynamically for certain bulk migration scenarios if required. + if (newDiskOffering == null && Boolean.TRUE.equals(MatchStoragePoolTagsWithDiskOffering.value())) { + newDiskOffering = diskOffering; + } validateConditionsToReplaceDiskOfferingOfVolume(vol, newDiskOffering, destPool); if (vm != null) { From 73a6fd931e57b0e9728d98510cc2096b73e8c5c4 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 29 Mar 2024 18:20:34 +0000 Subject: [PATCH 10/47] Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes --- .../orchestration/VolumeOrchestrator.java | 9 +- .../StorageSystemDataMotionStrategy.java | 275 ++++++++++++------ .../impl/DefaultModuleDefinitionSet.java | 5 +- .../acl/DynamicRoleBasedAPIAccessChecker.java | 4 +- .../acl/ProjectRoleBasedApiAccessChecker.java | 8 +- .../kvm/storage/FiberChannelAdapter.java | 31 ++ .../kvm/storage/KVMStorageProcessor.java | 29 +- .../kvm/storage/MultipathSCSIAdapterBase.java | 226 ++++---------- .../datastore/adapter/ProviderAdapter.java | 22 +- .../adapter/ProviderAdapterFactory.java | 4 + .../adapter/ProviderVolumeNamer.java | 1 - .../driver/AdaptiveDataStoreDriverImpl.java | 218 ++++++++++---- .../AdaptiveDataStoreLifeCycleImpl.java | 61 +++- ...tivePrimaryDatastoreAdapterFactoryMap.java | 4 + .../provider/AdaptivePrimaryHostListener.java | 2 + .../adapter/flasharray/FlashArrayAdapter.java | 258 ++++++++-------- .../flasharray/FlashArrayAdapterFactory.java | 5 + .../adapter/flasharray/FlashArrayHost.java | 29 ++ .../adapter/flasharray/FlashArrayVolume.java | 4 +- .../flasharray/FlashArrayVolumePod.java | 17 +- .../adapter/primera/PrimeraAdapter.java | 153 +++++----- .../primera/PrimeraAdapterFactory.java | 5 + .../adapter/primera/PrimeraHost.java | 56 ++++ .../primera/PrimeraHostDescriptor.java | 23 ++ .../adapter/primera/PrimeraHostset.java | 44 +-- .../adapter/primera/PrimeraPort.java | 23 ++ .../adapter/primera/PrimeraPortPos.java | 30 ++ .../PrimeraVolumeCopyRequestParameters.java | 2 +- .../primera/PrimeraVolumePromoteRequest.java | 5 +- .../oauth2/OAuth2UserAuthenticator.java | 7 + .../java/com/cloud/vm/UserVmManagerImpl.java | 22 +- .../cloudstack/snapshot/SnapshotHelper.java | 4 +- .../vm/UnmanagedVMsManagerImpl.java | 1 + 33 files changed, 1003 insertions(+), 584 deletions(-) create mode 100644 plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHost.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java create mode 100644 plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 409b5388d72f..eaf91a0cd246 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -1500,18 +1500,17 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest for (VolumeVO vol : vols) { VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); - DataTO volTO = volumeInfo.getTO(); - DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId()); DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); - disk.setDetails(getDetails(volumeInfo, dataStore)); - PrimaryDataStore primaryDataStore = (PrimaryDataStore)dataStore; // This might impact other managed storages, enable requires access for migration in relevant datastore driver (currently enabled for PowerFlex storage pool only) if (primaryDataStore.isManaged() && volService.requiresAccessForMigration(volumeInfo, dataStore)) { volService.grantAccess(volFactory.getVolume(vol.getId()), dest.getHost(), dataStore); } - + // make sure this is done AFTER grantAccess, as grantAccess may change the volume's state + DataTO volTO = volumeInfo.getTO(); + DiskTO disk = storageMgr.getDiskWithThrottling(volTO, vol.getVolumeType(), vol.getDeviceId(), vol.getPath(), vm.getServiceOfferingId(), vol.getDiskOfferingId()); + disk.setDetails(getDetails(volumeInfo, dataStore)); vm.addDisk(disk); } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index a93f624aa53c..81540bec3bdc 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -44,6 +44,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; @@ -146,6 +147,7 @@ public class StorageSystemDataMotionStrategy implements DataMotionStrategy { private static final int LOCK_TIME_IN_SECONDS = 300; private static final String OPERATION_NOT_SUPPORTED = "This operation is not supported."; + @Inject protected AgentManager agentManager; @Inject @@ -844,12 +846,25 @@ private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo de checkAvailableForMigration(vm); String errMsg = null; + HostVO hostVO = null; try { destVolumeInfo.getDataStore().getDriver().createAsync(destVolumeInfo.getDataStore(), destVolumeInfo, null); VolumeVO volumeVO = _volumeDao.findById(destVolumeInfo.getId()); updatePathFromScsiName(volumeVO); destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); - HostVO hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); + hostVO = getHostOnWhichToExecuteMigrationCommand(srcVolumeInfo, destVolumeInfo); + + // if managed we need to grant access + PrimaryDataStore pds = (PrimaryDataStore)this.dataStoreMgr.getPrimaryDataStore(destVolumeInfo.getDataStore().getUuid()); + if (pds == null) { + throw new CloudRuntimeException("Unable to find primary data store driver for this volume"); + } + + // grant access (for managed volumes) + _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + + // re-retrieve volume to get any updated information from grant + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); // migrate the volume via the hypervisor String path = migrateVolumeForKVM(srcVolumeInfo, destVolumeInfo, hostVO, "Unable to migrate the volume from non-managed storage to managed storage"); @@ -870,6 +885,19 @@ private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo de throw new CloudRuntimeException(errMsg, ex); } } finally { + // revoke access (for managed volumes) + if (hostVO != null) { + try { + _volumeService.revokeAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + } catch (Exception e) { + LOGGER.warn(String.format("Failed to revoke access for volume 'name=%s,uuid=%s' after a migration attempt", destVolumeInfo.getVolume(), destVolumeInfo.getUuid()), e); + } + } + + // re-retrieve volume to get any updated information from grant + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + + CopyCmdAnswer copyCmdAnswer; if (errMsg != null) { copyCmdAnswer = new CopyCmdAnswer(errMsg); @@ -920,6 +948,125 @@ private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, return hostVO; } + private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snapshotInfo) { + VolumeInfo tempVolumeInfo = null; + VolumeVO volumeVO = null; + try { + volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", + snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); + volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); + _volumeDao.persist(volumeVO); + tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + + if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { + snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); + // refresh volume info as data could have changed + tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + } else { + throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); + } + return tempVolumeInfo; + } catch (Throwable e) { + try { + if (tempVolumeInfo != null) { + tempVolumeInfo.getDataStore().getDriver().deleteAsync(tempVolumeInfo.getDataStore(), tempVolumeInfo, null); + } + + // cleanup temporary volume + if (volumeVO != null) { + _volumeDao.remove(volumeVO.getId()); + } + } catch (Throwable e2) { + LOGGER.warn("Failed to delete temporary volume created for copy", e2); + } + + throw e; + } + } + + /** + * Simplier logic for copy from snapshot for adaptive driver only. + * @param snapshotInfo + * @param destData + * @param callback + */ + private void handleCopyAsyncToSecondaryStorageAdaptive(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback callback) { + CopyCmdAnswer copyCmdAnswer = null; + DataObject srcFinal = null; + HostVO hostVO = null; + DataStore srcDataStore = null; + boolean tempRequired = false; + + try { + snapshotInfo.processEvent(Event.CopyingRequested); + hostVO = getHost(snapshotInfo); + DataObject destOnStore = destData; + srcDataStore = snapshotInfo.getDataStore(); + int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); + CopyCommand copyCommand = null; + if (!Boolean.parseBoolean(srcDataStore.getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT"))) { + srcFinal = createTemporaryVolumeCopyOfSnapshotAdaptive(snapshotInfo); + tempRequired = true; + } else { + srcFinal = snapshotInfo; + } + + _volumeService.grantAccess(srcFinal, hostVO, srcDataStore); + + DataTO srcTo = srcFinal.getTO(); + + // have to set PATH as extraOptions due to logic in KVM hypervisor processor + HashMap extraDetails = new HashMap<>(); + extraDetails.put(DiskTO.PATH, srcTo.getPath()); + + copyCommand = new CopyCommand(srcFinal.getTO(), destOnStore.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); + copyCommand.setOptions(extraDetails); + copyCmdAnswer = (CopyCmdAnswer)agentManager.send(hostVO.getId(), copyCommand); + } catch (Exception ex) { + String msg = "Failed to create template from snapshot (Snapshot ID = " + snapshotInfo.getId() + ") : "; + LOGGER.warn(msg, ex); + throw new CloudRuntimeException(msg + ex.getMessage(), ex); + } + finally { + // remove access tot he volume that was used + if (srcFinal != null && hostVO != null && srcDataStore != null) { + _volumeService.revokeAccess(srcFinal, hostVO, srcDataStore); + } + + // delete the temporary volume if it was needed + if (srcFinal != null && tempRequired) { + try { + srcFinal.getDataStore().getDriver().deleteAsync(srcFinal.getDataStore(), srcFinal, null); + } catch (Throwable e) { + LOGGER.warn("Failed to delete temporary volume created for copy", e); + } + } + + // check we have a reasonable result + String errMsg = null; + if (copyCmdAnswer == null || (!copyCmdAnswer.getResult() && copyCmdAnswer.getDetails() == null)) { + errMsg = "Unable to create template from snapshot"; + copyCmdAnswer = new CopyCmdAnswer(errMsg); + } else if (!copyCmdAnswer.getResult() && StringUtils.isEmpty(copyCmdAnswer.getDetails())) { + errMsg = "Unable to create template from snapshot"; + } else if (!copyCmdAnswer.getResult()) { + errMsg = copyCmdAnswer.getDetails(); + } + + //submit processEvent + if (StringUtils.isEmpty(errMsg)) { + snapshotInfo.processEvent(Event.OperationSuccessed); + } else { + snapshotInfo.processEvent(Event.OperationFailed); + } + + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(copyCmdAnswer.getDetails()); + callback.complete(result); + } + } + /** * This function is responsible for copying a snapshot from managed storage to secondary storage. This is used in the following two cases: * 1) When creating a template from a snapshot @@ -930,6 +1077,13 @@ private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, * @param callback callback for async */ private void handleCopyAsyncToSecondaryStorage(SnapshotInfo snapshotInfo, DataObject destData, AsyncCompletionCallback callback) { + + // if this flag is set (true or false), we will fall out to use simplier logic for the Adaptive handler + if (snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_DIRECT_ATTACH_SNAPSHOT") != null) { + handleCopyAsyncToSecondaryStorageAdaptive(snapshotInfo, destData, callback); + return; + } + String errMsg = null; CopyCmdAnswer copyCmdAnswer = null; boolean usingBackendSnapshot = false; @@ -1696,14 +1850,15 @@ private void handleCreateVolumeFromVolumeOnSecondaryStorage(VolumeInfo srcVolume private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo destVolumeInfo, HostVO hostVO) { int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); - CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait, - VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand copyCommand = null; CopyCmdAnswer copyCmdAnswer; try { _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); + copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait, + VirtualMachineManager.ExecuteInSequence.value()); Map destDetails = getVolumeDetails(destVolumeInfo); copyCommand.setOptions2(destDetails); @@ -1728,42 +1883,6 @@ private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo des return copyCmdAnswer; } - /** - * Use normal volume semantics (create a volume known to cloudstack, ask the storage driver to create it as a copy of the snapshot) - - * @param volumeVO - * @param snapshotInfo - */ - public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) { - VolumeVO volumeVO = null; - try { - volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", - snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); - volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); - _volumeDao.persist(volumeVO); - VolumeInfo tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); - - if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { - snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); - // refresh volume info as data could have changed - tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); - // save the "temp" volume info into the snapshot details (we need this to clean up at the end) - _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID", tempVolumeInfo.getUuid(), true); - _snapshotDetailsDao.addDetail(snapshotInfo.getId(), "TemporaryVolumeCopyPath", tempVolumeInfo.getPath(), true); - // NOTE: for this to work, the Driver must return a custom SnapshotObjectTO object from getTO() - // whenever the TemporaryVolumeCopyPath is set. - } else { - throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); - } - } catch (Throwable e) { - // cleanup temporary volume - if (volumeVO != null) { - _volumeDao.remove(volumeVO.getId()); - } - throw e; - } - } - /** * If the underlying storage system is making use of read-only snapshots, this gives the storage system the opportunity to * create a volume from the snapshot so that we can copy the VHD file that should be inside of the snapshot to secondary storage. @@ -1775,13 +1894,8 @@ public void prepTempVolumeForCopyFromSnapshot(SnapshotInfo snapshotInfo) { * resign the SR and the VDI that should be inside of the snapshot before copying the VHD file to secondary storage. */ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { - prepTempVolumeForCopyFromSnapshot(snapshotInfo); - return; - - } - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "create"); + try { snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); } @@ -1796,31 +1910,20 @@ private void createVolumeFromSnapshot(SnapshotInfo snapshotInfo) { * invocation of createVolumeFromSnapshot(SnapshotInfo). */ private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { - VolumeVO volumeVO = null; - // cleanup any temporary volume previously created for copy from a snapshot - if ("true".equalsIgnoreCase(snapshotInfo.getDataStore().getDriver().getCapabilities().get("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT"))) { - SnapshotDetailsVO tempUuid = null; - tempUuid = _snapshotDetailsDao.findDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); - if (tempUuid == null || tempUuid.getValue() == null) { - return; - } + try { + LOGGER.debug("Cleaning up temporary volume created for copy from a snapshot"); - volumeVO = _volumeDao.findByUuid(tempUuid.getValue()); - if (volumeVO != null) { - _volumeDao.remove(volumeVO.getId()); - } - _snapshotDetailsDao.remove(tempUuid.getId()); - _snapshotDetailsDao.removeDetail(snapshotInfo.getId(), "TemporaryVolumeCopyUUID"); - return; - } + SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); - SnapshotDetailsVO snapshotDetails = handleSnapshotDetails(snapshotInfo.getId(), "delete"); + try { + snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); + } + finally { + _snapshotDetailsDao.remove(snapshotDetails.getId()); + } - try { - snapshotInfo.getDataStore().getDriver().createAsync(snapshotInfo.getDataStore(), snapshotInfo, null); - } - finally { - _snapshotDetailsDao.remove(snapshotDetails.getId()); + } catch (Throwable e) { + LOGGER.warn("Failed to clean up temporary volume created for copy from a snapshot, transction will not be failed but an adminstrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e); } } @@ -2496,15 +2599,17 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); - CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand copyCommand = null; try { handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); - if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { + if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) { _volumeService.grantAccess(volumeInfo, hostVO, srcDataStore); } + copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + Map srcDetails = getVolumeDetails(volumeInfo); copyCommand.setOptions(srcDetails); @@ -2533,7 +2638,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa throw new CloudRuntimeException(msg + ex.getMessage(), ex); } finally { - if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType()) { + if (srcVolumeDetached || StoragePoolType.PowerFlex == storagePoolVO.getPoolType() || StoragePoolType.FiberChannel == storagePoolVO.getPoolType()) { try { _volumeService.revokeAccess(volumeInfo, hostVO, srcDataStore); } @@ -2628,13 +2733,7 @@ private Map getSnapshotDetails(SnapshotInfo snapshotInfo) { long snapshotId = snapshotInfo.getId(); - // if the snapshot required a temporary volume be created check if the UUID is set so we can - // retrieve the temporary volume's path to use during remote copy - List storedDetails = _snapshotDetailsDao.findDetails(snapshotInfo.getId(), "TemporaryVolumeCopyPath"); - if (storedDetails != null && storedDetails.size() > 0) { - String value = storedDetails.get(0).getValue(); - snapshotDetails.put(DiskTO.PATH, value); - } else if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) { + if (storagePoolVO.getPoolType() == StoragePoolType.PowerFlex || storagePoolVO.getPoolType() == StoragePoolType.FiberChannel) { snapshotDetails.put(DiskTO.IQN, snapshotInfo.getPath()); } else { snapshotDetails.put(DiskTO.IQN, getSnapshotProperty(snapshotId, DiskTO.IQN)); @@ -2850,6 +2949,8 @@ private String migrateVolumeForKVM(VolumeInfo srcVolumeInfo, VolumeInfo destVolu Map srcDetails = getVolumeDetails(srcVolumeInfo); Map destDetails = getVolumeDetails(destVolumeInfo); + _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); + MigrateVolumeCommand migrateVolumeCommand = new MigrateVolumeCommand(srcVolumeInfo.getTO(), destVolumeInfo.getTO(), srcDetails, destDetails, StorageManager.KvmStorageOfflineMigrationWait.value()); @@ -2892,18 +2993,18 @@ private String copyManagedVolumeToSecondaryStorage(VolumeInfo srcVolumeInfo, Vol StoragePoolVO storagePoolVO = _storagePoolDao.findById(srcVolumeInfo.getPoolId()); Map srcDetails = getVolumeDetails(srcVolumeInfo); - CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO, - destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true); - - copyVolumeCommand.setSrcData(srcVolumeInfo.getTO()); - copyVolumeCommand.setSrcDetails(srcDetails); - handleQualityOfServiceForVolumeMigration(srcVolumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); if (srcVolumeDetached) { _volumeService.grantAccess(srcVolumeInfo, hostVO, srcVolumeInfo.getDataStore()); } + CopyVolumeCommand copyVolumeCommand = new CopyVolumeCommand(srcVolumeInfo.getId(), destVolumeInfo.getPath(), storagePoolVO, + destVolumeInfo.getDataStore().getUri(), true, StorageManager.KvmStorageOfflineMigrationWait.value(), true); + + copyVolumeCommand.setSrcData(srcVolumeInfo.getTO()); + copyVolumeCommand.setSrcDetails(srcDetails); + CopyVolumeAnswer copyVolumeAnswer = (CopyVolumeAnswer)agentManager.send(hostVO.getId(), copyVolumeCommand); if (copyVolumeAnswer == null || !copyVolumeAnswer.getResult()) { @@ -2975,7 +3076,7 @@ private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snaps srcData = cacheData; } - CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand copyCommand = null; try { if (Snapshot.LocationType.PRIMARY.equals(locationType)) { @@ -2983,11 +3084,13 @@ private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snaps Map srcDetails = getSnapshotDetails(snapshotInfo); + copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); copyCommand.setOptions(srcDetails); + } else { + _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); + copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); } - _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); - Map destDetails = getVolumeDetails(volumeInfo); copyCommand.setOptions2(destDetails); diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java index 6c03c3ce9e16..7c73a27f71f8 100644 --- a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java @@ -101,7 +101,10 @@ public void with(ModuleDefinition def, Stack parents) { log.debug(String.format("Trying to obtain module [%s] context.", moduleDefinitionName)); ApplicationContext context = getApplicationContext(moduleDefinitionName); try { - if (context.containsBean("moduleStartup")) { + if (context == null) { + log.warn(String.format("Application context not found for module definition [%s]", moduleDefinitionName)); + + } else if (context.containsBean("moduleStartup")) { Runnable runnable = context.getBean("moduleStartup", Runnable.class); log.info(String.format("Starting module [%s].", moduleDefinitionName)); runnable.run(); diff --git a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java index cca9e3388687..1dfe20a10be2 100644 --- a/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java +++ b/plugins/acl/dynamic-role-based/src/main/java/org/apache/cloudstack/acl/DynamicRoleBasedAPIAccessChecker.java @@ -122,7 +122,9 @@ public boolean checkAccess(Account account, String commandName) { } if (accountRole.getRoleType() == RoleType.Admin && accountRole.getId() == RoleType.Admin.getId()) { - LOGGER.info(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Account [%s] is Root Admin or Domain Admin, all APIs are allowed.", account)); + } return true; } diff --git a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java index 0306a062df98..cffda4681c66 100644 --- a/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java +++ b/plugins/acl/project-role-based/src/main/java/org/apache/cloudstack/acl/ProjectRoleBasedApiAccessChecker.java @@ -76,7 +76,9 @@ public List getApisAllowedToUser(Role role, User user, List apiN Project project = CallContext.current().getProject(); if (project == null) { - LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning APIs [%s] for user [%s] as allowed.", apiNames, user)); + } return apiNames; } @@ -114,8 +116,10 @@ public boolean checkAccess(User user, String apiCommandName) throws PermissionDe Project project = CallContext.current().getProject(); if (project == null) { - LOGGER.warn(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(String.format("Project is null, ProjectRoleBasedApiAccessChecker only applies to projects, returning API [%s] for user [%s] as allowed.", apiCommandName, user)); + } return true; } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java index be7cb727ad77..1bc96dd396e3 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java @@ -16,13 +16,39 @@ // under the License. package com.cloud.hypervisor.kvm.storage; +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.apache.log4j.Logger; + import com.cloud.storage.Storage; import com.cloud.utils.exception.CloudRuntimeException; @StorageAdaptorInfo(storagePoolType=Storage.StoragePoolType.FiberChannel) public class FiberChannelAdapter extends MultipathSCSIAdapterBase { + + private Logger LOGGER = Logger.getLogger(getClass()); + + private String hostname = null; + private String hostnameFq = null; + public FiberChannelAdapter() { LOGGER.info("Loaded FiberChannelAdapter for StorageLayer"); + // get the hostname - we need this to compare to connid values + try { + InetAddress inetAddress = InetAddress.getLocalHost(); + hostname = inetAddress.getHostName(); // basic hostname + if (hostname.indexOf(".") > 0) { + hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain + } + if (hostname.indexOf(".") > 0) { + hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain + } + hostnameFq = inetAddress.getCanonicalHostName(); // fully qualified hostname + LOGGER.info("Loaded FiberChannelAdapter for StorageLayer on host [" + hostname + "]"); + } catch (UnknownHostException e) { + LOGGER.error("Error getting hostname", e); + } } @Override @@ -72,6 +98,11 @@ public AddressInfo parseAndValidatePath(String inPath) { address = value; } else if (key.equals("connid")) { connectionId = value; + } else if (key.startsWith("connid.")) { + String inHostname = key.substring(7); + if (inHostname != null && (inHostname.equals(this.hostname) || inHostname.equals(this.hostnameFq))) { + connectionId = value; + } } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index f0ce56e0f3b8..7d4962e57295 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -266,10 +266,16 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { Map details = primaryStore.getDetails(); - String path = details != null ? details.get("managedStoreTarget") : null; + String path = null; + if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { + path = destData.getPath(); + } else { + path = details != null ? details.get("managedStoreTarget") : null; + } if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); @@ -405,7 +411,12 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds()); } if (primaryPool.getType() == StoragePoolType.PowerFlex) { Map details = primaryStore.getDetails(); - String path = details != null ? details.get("managedStoreTarget") : null; + String path = null; + if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { + path = destData.getPath(); + } else { + path = details != null ? details.get("managedStoreTarget") : null; + } if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); @@ -1021,7 +1032,9 @@ public Answer backupSnapshot(final CopyCommand cmd) { command.add(NAME_OPTION, snapshotName); command.add("-p", snapshotDestPath); - descName = UUID.randomUUID().toString(); + if (isCreatedFromVmSnapshot) { + descName = UUID.randomUUID().toString(); + } command.add("-t", descName); final String result = command.execute(); @@ -1046,7 +1059,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { srcVolume.clearPassphrase(); if (isCreatedFromVmSnapshot) { s_logger.debug("Ignoring removal of vm snapshot on primary as this snapshot is created from vm snapshot"); - } else if (primaryPool.getType() != StoragePoolType.RBD) { + } else if (primaryPool != null && primaryPool.getType() != StoragePoolType.RBD) { deleteSnapshotOnPrimary(cmd, snapshot, primaryPool); } @@ -2463,8 +2476,12 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { s_logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); } - String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; - destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath; + if (destPrimaryStore.getPoolType() == StoragePoolType.FiberChannel) { + destVolumeName = destData.getPath(); + } else { + String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; + destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath; + } } else { final String volumeName = UUID.randomUUID().toString(); destVolumeName = volumeName + "." + destFormat.getFileExtension(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 06dea46a98dd..df1d7035581d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -21,20 +21,17 @@ import java.io.File; import java.io.IOException; import java.io.InputStreamReader; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Timer; import java.util.TimerTask; -import java.util.UUID; import java.util.concurrent.TimeUnit; import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; -import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; +import org.joda.time.Duration; import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; @@ -43,8 +40,7 @@ import com.cloud.utils.script.OutputInterpreter; import com.cloud.utils.script.Script; import org.apache.commons.lang3.StringUtils; -import org.libvirt.LibvirtException; -import org.joda.time.Duration; +import org.apache.log4j.Logger; public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { static final Logger LOGGER = Logger.getLogger(MultipathSCSIAdapterBase.class); @@ -82,6 +78,7 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { * Initialize static program-wide configurations and background jobs */ static { + long cleanupFrequency = CLEANUP_FREQUENCY_SECS.getFinalValue() * 1000; boolean cleanupEnabled = CLEANUP_ENABLED.getFinalValue(); @@ -96,16 +93,13 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { throw new Error("Unable to find the disconnectVolume.sh script"); } - resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript); - if (resizeScript == null) { - throw new Error("Unable to find the resizeVolume.sh script"); - } - copyScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), copyScript); if (copyScript == null) { throw new Error("Unable to find the copyVolume.sh script"); } + resizeScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), resizeScript); + if (cleanupEnabled) { cleanupScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), cleanupScript); if (cleanupScript == null) { @@ -137,9 +131,6 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) { public abstract boolean isStoragePoolTypeSupported(Storage.StoragePoolType type); - /** - * We expect WWN values in the volumePath so need to convert it to an actual physical path - */ public abstract AddressInfo parseAndValidatePath(String path); @Override @@ -151,6 +142,7 @@ public KVMPhysicalDisk getPhysicalDisk(String volumePath, KVMStoragePool pool) { return null; } + // we expect WWN values in the volumePath so need to convert it to an actual physical path AddressInfo address = parseAndValidatePath(volumePath); return getPhysicalDisk(address, pool); } @@ -194,7 +186,14 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map volumeToDisconnect) { @Override public boolean disconnectPhysicalDiskByPath(String localPath) { LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath)); + if (localPath == null || (localPath != null && !localPath.startsWith("/dev/mapper/"))) { + LOGGER.debug(String.format("isconnectPhysicalDiskByPath(localPath) returning FALSE, volume path is not a multipath volume: %s", localPath)); + return false; + } ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", "")); if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); return true; + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); + return true; } @Override public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.ImageFormat format) { - LOGGER.info(String.format("deletePhysicalDisk(uuid,pool,format) called with args (%s, %s, %s) [not implemented]", uuid, pool.getUuid(), format.toString())); - return true; + return false; } @Override @@ -275,15 +283,9 @@ public boolean createFolder(String uuid, String path, String localPath) { return true; } - /** - * Validate inputs and return the source file for a template copy - * @param templateFilePath - * @param destTemplatePath - * @param destPool - * @param format - * @return - */ - File createTemplateFromDirectDownloadFileValidate(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format) { + + @Override + public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { if (StringUtils.isAnyEmpty(templateFilePath, destTemplatePath) || destPool == null) { LOGGER.error("Unable to create template from direct download template file due to insufficient data"); throw new CloudRuntimeException("Unable to create template from direct download template file due to insufficient data"); @@ -296,57 +298,18 @@ File createTemplateFromDirectDownloadFileValidate(String templateFilePath, Strin throw new CloudRuntimeException("Direct download template file " + templateFilePath + " does not exist on this host"); } - if (destTemplatePath == null || destTemplatePath.isEmpty()) { - LOGGER.error("Failed to create template, target template disk path not provided"); - throw new CloudRuntimeException("Target template disk path not provided"); - } - - if (this.isStoragePoolTypeSupported(destPool.getType())) { - throw new CloudRuntimeException("Unsupported storage pool type: " + destPool.getType().toString()); - } - - if (Storage.ImageFormat.RAW.equals(format) && Storage.ImageFormat.QCOW2.equals(format)) { - LOGGER.error("Failed to create template, unsupported template format: " + format.toString()); - throw new CloudRuntimeException("Unsupported template format: " + format.toString()); - } - return sourceFile; - } - - String extractSourceTemplateIfNeeded(File sourceFile, String templateFilePath) { - String srcTemplateFilePath = templateFilePath; - if (isTemplateExtractable(templateFilePath)) { - srcTemplateFilePath = sourceFile.getParent() + "/" + UUID.randomUUID().toString(); - LOGGER.debug("Extract the downloaded template " + templateFilePath + " to " + srcTemplateFilePath); - String extractCommand = getExtractCommandForDownloadedFile(templateFilePath, srcTemplateFilePath); - Script.runSimpleBashScript(extractCommand); - Script.runSimpleBashScript("rm -f " + templateFilePath); - } - return srcTemplateFilePath; - } - - QemuImg.PhysicalDiskFormat deriveImgFileFormat(Storage.ImageFormat format) { - if (format == Storage.ImageFormat.RAW) { - return QemuImg.PhysicalDiskFormat.RAW; - } else if (format == Storage.ImageFormat.QCOW2) { - return QemuImg.PhysicalDiskFormat.QCOW2; - } else { - return QemuImg.PhysicalDiskFormat.RAW; - } - } - - @Override - public KVMPhysicalDisk createTemplateFromDirectDownloadFile(String templateFilePath, String destTemplatePath, KVMStoragePool destPool, Storage.ImageFormat format, int timeout) { - File sourceFile = createTemplateFromDirectDownloadFileValidate(templateFilePath, destTemplatePath, destPool, format); - LOGGER.debug("Create template from direct download template - file path: " + templateFilePath + ", dest path: " + destTemplatePath + ", format: " + format.toString()); - KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(sourceFile.getAbsolutePath()); + KVMPhysicalDisk sourceDisk = destPool.getPhysicalDisk(templateFilePath); return copyPhysicalDisk(sourceDisk, destTemplatePath, destPool, timeout, null, null, Storage.ProvisioningType.THIN); } @Override public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout, byte[] srcPassphrase, byte[] dstPassphrase, Storage.ProvisioningType provisioningType) { + if (StringUtils.isEmpty(name) || disk == null || destPool == null) { + LOGGER.error("Unable to copy physical disk due to insufficient data"); + throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); + } - validateForDiskCopy(disk, name, destPool); LOGGER.info("Copying FROM source physical disk " + disk.getPath() + ", size: " + disk.getSize() + ", virtualsize: " + disk.getVirtualSize()+ ", format: " + disk.getFormat()); KVMPhysicalDisk destDisk = destPool.getPhysicalDisk(name); @@ -366,8 +329,18 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt LOGGER.info("Copying TO destination physical disk " + destDisk.getPath() + ", size: " + destDisk.getSize() + ", virtualsize: " + destDisk.getVirtualSize()+ ", format: " + destDisk.getFormat()); QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); + LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath()); + ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()); + /**Script script = new Script( + String.format("%s %s %s %s", copyScript, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()), + Duration.millis(timeout), + LOGGER); + + script.execute(); + int rc = script.getExitValue(); + */ int rc = result.getExitCode(); if (rc != 0) { throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult()); @@ -377,49 +350,21 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt return destDisk; } - void validateForDiskCopy(KVMPhysicalDisk disk, String name, KVMStoragePool destPool) { - if (StringUtils.isEmpty(name) || disk == null || destPool == null) { - LOGGER.error("Unable to copy physical disk due to insufficient data"); - throw new CloudRuntimeException("Unable to copy physical disk due to insufficient data"); - } - } - - /** - * Copy a disk path to another disk path using QemuImg command - * @param disk - * @param destDisk - * @param name - * @param timeout - */ - void qemuCopy(KVMPhysicalDisk disk, KVMPhysicalDisk destDisk, String name, int timeout) { - QemuImg qemu; - try { - qemu = new QemuImg(timeout); - } catch (LibvirtException | QemuImgException e) { - throw new CloudRuntimeException (e); - } - QemuImgFile srcFile = null; - QemuImgFile destFile = null; - - try { - srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); - destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); - - LOGGER.debug("Starting copy from source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); - qemu.convert(srcFile, destFile, true); - LOGGER.debug("Successfully converted source disk image " + srcFile.getFileName() + " to volume: " + destDisk.getPath()); - } catch (QemuImgException | LibvirtException e) { - try { - Map srcInfo = qemu.info(srcFile); - LOGGER.debug("Source disk info: " + Arrays.asList(srcInfo)); - } catch (Exception ignored) { - LOGGER.warn("Unable to get info from source disk: " + disk.getName()); - } - - String errMsg = String.format("Unable to convert/copy from %s to %s, due to: %s", disk.getName(), name, ((StringUtils.isEmpty(e.getMessage())) ? "an unknown error" : e.getMessage())); - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg, e); + private static final ScriptResult runScript(String script, long timeout, String...args) { + ScriptResult result = new ScriptResult(); + Script cmd = new Script(script, Duration.millis(timeout), LOGGER); + cmd.add(args); + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + String output = cmd.execute(parser); + // its possible the process never launches which causes an NPE on getExitValue below + if (output != null && output.contains("Unable to execute the command")) { + result.setResult(output); + result.setExitCode(-1); + return result; } + result.setResult(output); + result.setExitCode(cmd.getExitValue()); + return result; } @Override @@ -460,25 +405,9 @@ String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String } } - private static final ScriptResult runScript(String script, long timeout, String...args) { - ScriptResult result = new ScriptResult(); - Script cmd = new Script(script, Duration.millis(timeout), LOGGER); - cmd.add(args); - OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); - String output = cmd.execute(parser); - // its possible the process never launches which causes an NPE on getExitValue below - if (output != null && output.contains("Unable to execute the command")) { - result.setResult(output); - result.setExitCode(-1); - return result; - } - result.setResult(output); - result.setExitCode(cmd.getExitValue()); - return result; - } - boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) { LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs"); + long scriptTimeoutSecs = 30; // how long to wait for each script execution to run long maxTries = 10; // how many max retries to attempt the script long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait @@ -553,41 +482,8 @@ boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, l } LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); - return false; - } - - void runConnectScript(String lun, AddressInfo address) { - try { - ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress()); - Process p = builder.start(); - int rc = p.waitFor(); - StringBuffer output = new StringBuffer(); - if (rc == 0) { - BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); - String line = null; - while ((line = input.readLine()) != null) { - output.append(line); - output.append(" "); - } - } else { - LOGGER.warn("Failure discovering LUN via " + connectScript); - BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream())); - String line = null; - while ((line = error.readLine()) != null) { - LOGGER.warn("error --> " + line); - } - } - } catch (IOException | InterruptedException e) { - throw new CloudRuntimeException("Problem performing scan on SCSI hosts", e); - } - } - - void sleep(long sleepTimeMs) { - try { - Thread.sleep(sleepTimeMs); - } catch (Exception ex) { - // don't do anything - } + throw new CloudRuntimeException("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid()); + //return false; } long getPhysicalDiskSize(String diskPath) { diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java index 0cd44cd04c2a..9c0db25d52eb 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapter.java @@ -69,14 +69,14 @@ public interface ProviderAdapter { * @param request * @return */ - public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request); + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname); /** * Detach the host from the storage context * @param context * @param request */ - public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request); + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname); /** * Delete the provided volume/object @@ -154,4 +154,22 @@ public interface ProviderAdapter { * @return */ public boolean canAccessHost(ProviderAdapterContext context, String hostname); + + /** + * Returns true if the provider allows direct attach/connection of snapshots to a host + * @return + */ + public boolean canDirectAttachSnapshot(); + + + /** + * Given a ProviderAdapterDataObject, return a map of connection IDs to connection values. Generally + * this would be used to return a map of hostnames and the VLUN ID for the attachment associated with + * that hostname. If the provider is using a hostgroup/hostset model where the ID is assigned in common + * across all hosts in the group, then the map MUST contain a single entry with host key set as a wildcard + * character (exactly '*'). + * @param dataIn + * @return + */ + public Map getConnectionIdMap(ProviderAdapterDataObject dataIn); } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java index 13a843d47635..ceedc6530513 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderAdapterFactory.java @@ -19,6 +19,10 @@ import java.util.Map; public interface ProviderAdapterFactory { + /** Name of the provider */ public String getProviderName(); + /** create a new instance of a provider adapter */ public ProviderAdapter create(String url, Map details); + /** returns true if this type of adapter can directly attach snapshots to hosts */ + public Object canDirectAttachSnapshot(); } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java index 5a72871e9c0d..f578b1cc460d 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/adapter/ProviderVolumeNamer.java @@ -21,7 +21,6 @@ public class ProviderVolumeNamer { private static final String SNAPSHOT_PREFIX = "snap"; private static final String VOLUME_PREFIX = "vol"; private static final String TEMPLATE_PREFIX = "tpl"; - /** Simple method to allow sharing storage setup, primarily in lab/testing environment */ private static final String ENV_PREFIX = System.getProperty("adaptive.storage.provider.envIdentifier"); public static String generateObjectName(ProviderAdapterContext context, ProviderAdapterDataObject obj) { diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java index d908d48c7dad..32342b951542 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -32,6 +32,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -43,6 +44,7 @@ import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterContext; import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDataObject; import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterDiskOffering; +import org.apache.cloudstack.storage.datastore.adapter.ProviderAdapterFactory; import org.apache.cloudstack.storage.datastore.adapter.ProviderSnapshot; import org.apache.cloudstack.storage.datastore.adapter.ProviderVolume; import org.apache.cloudstack.storage.datastore.adapter.ProviderVolumeStats; @@ -53,10 +55,12 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.provider.AdaptivePrimaryDatastoreAdapterFactoryMap; +import org.apache.cloudstack.storage.image.store.TemplateObject; +import org.apache.cloudstack.storage.snapshot.SnapshotObject; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.cloudstack.storage.snapshot.SnapshotObject; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; @@ -73,7 +77,6 @@ import com.cloud.storage.ResizeVolumePayload; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage.ImageFormat; - import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; @@ -133,6 +136,8 @@ public class AdaptiveDataStoreDriverImpl extends CloudStackPrimaryDataStoreDrive DomainDao _domainDao; @Inject VolumeService _volumeService; + @Inject + VolumeDataFactory volumeDataFactory; private AdaptivePrimaryDatastoreAdapterFactoryMap _adapterFactoryMap = null; @@ -142,7 +147,52 @@ public AdaptiveDataStoreDriverImpl(AdaptivePrimaryDatastoreAdapterFactoryMap fac @Override public DataTO getTO(DataObject data) { - return null; + // we need to get connectionId and and the VLUN ID for currently attached hosts to add to the DataTO object + DataTO to = null; + if (data.getType() == DataObjectType.VOLUME) { + VolumeObjectTO vto = new VolumeObjectTO((VolumeObject)data); + vto.setPath(getPath(data)); + to = vto; + } else if (data.getType() == DataObjectType.TEMPLATE) { + TemplateObjectTO tto = new TemplateObjectTO((TemplateObject)data); + tto.setPath(getPath(data)); + to = tto; + } else if (data.getType() == DataObjectType.SNAPSHOT) { + SnapshotObjectTO sto = new SnapshotObjectTO((SnapshotObject)data); + sto.setPath(getPath(data)); + to = sto; + } else { + to = super.getTO(data); + } + return to; + } + + /* + * For the given data object, return the path with current connection info. If a snapshot + * object is passed, we will determine if a temporary volume is avialable for that + * snapshot object and return that conneciton info instead. + */ + String getPath(DataObject data) { + StoragePoolVO storagePool = _storagePoolDao.findById(data.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterDataObject dataIn = newManagedDataObject(data, storagePool); + + /** This means the object is not yet associated with the external provider so path is null */ + if (dataIn.getExternalName() == null) { + return null; + } + + ProviderAdapterContext context = newManagedVolumeContext(data); + Map connIdMap = api.getConnectionIdMap(dataIn); + ProviderVolume volume = api.getVolume(context, dataIn); + // if this is an existing object, generate the path for it. + String finalPath = null; + if (volume != null) { + finalPath = generatePathInfo(volume, connIdMap); + } + return finalPath; } @Override @@ -217,11 +267,8 @@ public void createAsync(DataStore dataStore, DataObject dataObject, dataIn.setExternalName(volume.getExternalName()); dataIn.setExternalUuid(volume.getExternalUuid()); - // add the volume to the host set - String connectionId = api.attach(context, dataIn); - // update the cloudstack metadata about the volume - persistVolumeOrTemplateData(storagePool, details, dataObject, volume, connectionId); + persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null); result = new CreateCmdResult(dataObject.getUuid(), new Answer(null)); result.setSuccess(true); @@ -288,6 +335,7 @@ public void copyAsync(DataObject srcdata, DataObject destdata, ProviderAdapterContext context = newManagedVolumeContext(destdata); ProviderAdapterDataObject sourceIn = newManagedDataObject(srcdata, storagePool); ProviderAdapterDataObject destIn = newManagedDataObject(destdata, storagePool); + outVolume = api.copy(context, sourceIn, destIn); // populate this data - it may be needed later @@ -302,17 +350,9 @@ public void copyAsync(DataObject srcdata, DataObject destdata, api.resize(context, destIn, destdata.getSize()); } - String connectionId = api.attach(context, destIn); - - String finalPath; - // format: type=fiberwwn; address=
; connid= - if (connectionId != null) { - finalPath = String.format("type=%s; address=%s; connid=%s", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase(), connectionId); - } else { - finalPath = String.format("type=%s; address=%s;", outVolume.getAddressType().toString(), outVolume.getAddress().toLowerCase()); - } - - persistVolumeData(storagePool, details, destdata, outVolume, connectionId); + // initial volume info does not have connection map yet. That is added when grantAccess is called later. + String finalPath = generatePathInfo(outVolume, null); + persistVolumeData(storagePool, details, destdata, outVolume, null); s_logger.info("Copy completed from [" + srcdata.getUuid() + "] to [" + destdata.getUuid() + "]"); VolumeObjectTO voto = new VolumeObjectTO(); @@ -442,6 +482,66 @@ public void resize(DataObject data, AsyncCompletionCallback cal } + public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) { + s_logger.debug("Granting host " + host.getName() + " access to volume " + dataObject.getUuid()); + + try { + StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool); + api.attach(context, sourceIn, host.getName()); + + // rewrite the volume data, especially the connection string for informational purposes - unless it was turned off above + ProviderVolume vol = api.getVolume(context, sourceIn); + ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool); + Map connIdMap = api.getConnectionIdMap(dataIn); + persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); + + + s_logger.info("Granted host " + host.getName() + " access to volume " + dataObject.getUuid()); + return true; + } catch (Throwable e) { + String msg = "Error granting host " + host.getName() + " access to volume " + dataObject.getUuid() + ":" + e.getMessage(); + s_logger.error(msg); + throw new CloudRuntimeException(msg, e); + } + } + + public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) { + // nothing to do if the host is null + if (dataObject == null || host == null || dataStore == null) { + return; + } + + s_logger.debug("Revoking access for host " + host.getName() + " to volume " + dataObject.getUuid()); + + try { + StoragePoolVO storagePool = _storagePoolDao.findById(dataObject.getDataStore().getId()); + Map details = _storagePoolDao.getDetails(storagePool.getId()); + ProviderAdapter api = getAPI(storagePool, details); + + ProviderAdapterContext context = newManagedVolumeContext(dataObject); + ProviderAdapterDataObject sourceIn = newManagedDataObject(dataObject, storagePool); + + api.detach(context, sourceIn, host.getName()); + + // rewrite the volume data, especially the connection string for informational purposes + ProviderVolume vol = api.getVolume(context, sourceIn); + ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool); + Map connIdMap = api.getConnectionIdMap(dataIn); + persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap); + + s_logger.info("Revoked access for host " + host.getName() + " to volume " + dataObject.getUuid()); + } catch (Throwable e) { + String msg = "Error revoking access for host " + host.getName() + " to volume " + dataObject.getUuid() + ":" + e.getMessage(); + s_logger.error(msg); + throw new CloudRuntimeException(msg, e); + } + } + @Override public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) { @@ -492,15 +592,7 @@ public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback getCapabilities() { mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString()); mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString()); - // indicates the datastore can create temporary volumes for use when copying - // data from a snapshot - mapCapabilities.put("CAN_CREATE_TEMP_VOLUME_FROM_SNAPSHOT", Boolean.TRUE.toString()); - + ProviderAdapterFactory factory = _adapterFactoryMap.getFactory(this.getProviderName()); + if (factory != null) { + mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", factory.canDirectAttachSnapshot().toString()); + } else { + mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", Boolean.FALSE.toString()); + } return mapCapabilities; } @@ -667,6 +761,11 @@ public boolean canProvideVolumeStats() { return true; } + @Override + public boolean requiresAccessForMigration(DataObject dataObject) { + return true; + } + public String getProviderName() { return providerName; } @@ -715,8 +814,13 @@ public Pair getVolumeStats(StoragePool storagePool, String volumePat object.setType(ProviderAdapterDataObject.Type.VOLUME); ProviderVolumeStats stats = api.getVolumeStats(context, object); - Long provisionedSizeInBytes = stats.getActualUsedInBytes(); - Long allocatedSizeInBytes = stats.getAllocatedInBytes(); + Long provisionedSizeInBytes = null; + Long allocatedSizeInBytes = null; + if (stats != null) { + provisionedSizeInBytes = stats.getActualUsedInBytes(); + allocatedSizeInBytes = stats.getAllocatedInBytes(); + } + if (provisionedSizeInBytes == null || allocatedSizeInBytes == null) { return null; } @@ -734,31 +838,19 @@ public boolean canHostAccessStoragePool(Host host, StoragePool pool) { } void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map storagePoolDetails, - DataObject dataObject, ProviderVolume volume, String connectionId) { + DataObject dataObject, ProviderVolume volume, Map connIdMap) { if (dataObject.getType() == DataObjectType.VOLUME) { - persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connectionId); + persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap); } else if (dataObject.getType() == DataObjectType.TEMPLATE) { - persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connectionId); + persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap); } } void persistVolumeData(StoragePoolVO storagePool, Map details, DataObject dataObject, - ProviderVolume managedVolume, String connectionId) { + ProviderVolume managedVolume, Map connIdMap) { VolumeVO volumeVO = _volumeDao.findById(dataObject.getId()); - // if its null check if the storage provider returned one that is already set - if (connectionId == null) { - connectionId = managedVolume.getExternalConnectionId(); - } - - String finalPath; - // format: type=fiberwwn; address=
; connid= - if (connectionId != null) { - finalPath = String.format("type=%s; address=%s; connid=%s", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase(), connectionId); - } else { - finalPath = String.format("type=%s; address=%s;", managedVolume.getAddressType().toString(), managedVolume.getAddress().toLowerCase()); - } - + String finalPath = generatePathInfo(managedVolume, connIdMap); volumeVO.setPath(finalPath); volumeVO.setFormat(ImageFormat.RAW); volumeVO.setPoolId(storagePool.getId()); @@ -783,25 +875,31 @@ void persistVolumeData(StoragePoolVO storagePool, Map details, D } void persistTemplateData(StoragePoolVO storagePool, Map details, DataObject dataObject, - ProviderVolume volume, String connectionId) { + ProviderVolume volume, Map connIdMap) { TemplateInfo templateInfo = (TemplateInfo) dataObject; VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(), templateInfo.getId(), null); - // template pool ref doesn't have a details object so we'll save: - // 1. external name ==> installPath - // 2. address ==> local download path - if (connectionId == null) { - templatePoolRef.setInstallPath(String.format("type=%s; address=%s", volume.getAddressType().toString(), - volume.getAddress().toLowerCase())); - } else { - templatePoolRef.setInstallPath(String.format("type=%s; address=%s; connid=%s", volume.getAddressType().toString(), - volume.getAddress().toLowerCase(), connectionId)); - } + + templatePoolRef.setInstallPath(generatePathInfo(volume, connIdMap)); templatePoolRef.setLocalDownloadPath(volume.getExternalName()); templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes()); _vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef); } + String generatePathInfo(ProviderVolume volume, Map connIdMap) { + String finalPath = String.format("type=%s; address=%s; providerName=%s; providerID=%s;", + volume.getAddressType().toString(), volume.getAddress().toLowerCase(), volume.getExternalName(), volume.getExternalUuid()); + + // if a map was provided, add the connection IDs to the path info. the map is all the possible vlun id's used + // across each host or the hostset (represented with host name key as "*"); + if (connIdMap != null && connIdMap.size() > 0) { + for (String key: connIdMap.keySet()) { + finalPath += String.format(" connid.%s=%s;", key, connIdMap.get(key)); + } + } + return finalPath; + } + ProviderAdapterContext newManagedVolumeContext(DataObject obj) { ProviderAdapterContext ctx = new ProviderAdapterContext(); if (obj instanceof VolumeInfo) { diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java index 56d9a25f34f8..26d2494eacf8 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java @@ -189,7 +189,6 @@ public DataStore initialize(Map dsInfos) { parameters.setName(dsName); parameters.setProviderName(providerName); parameters.setManaged(true); - parameters.setCapacityBytes(capacityBytes); parameters.setUsedBytes(0); parameters.setCapacityIops(capacityIops); parameters.setHypervisorType(HypervisorType.KVM); @@ -223,7 +222,7 @@ public DataStore initialize(Map dsInfos) { // if we have user-provided capacity bytes, validate they do not exceed the manaaged storage capacity bytes ProviderVolumeStorageStats stats = api.getManagedStorageStats(); - if (capacityBytes != null && capacityBytes != 0) { + if (capacityBytes != null && capacityBytes != 0 && stats != null) { if (stats.getCapacityInBytes() > 0) { if (stats.getCapacityInBytes() < capacityBytes) { throw new InvalidParameterValueException("Capacity bytes provided exceeds the capacity of the storage endpoint: provided by user: " + capacityBytes + ", storage capacity from storage provider: " + stats.getCapacityInBytes()); @@ -233,8 +232,8 @@ public DataStore initialize(Map dsInfos) { } // if we have no user-provided capacity bytes, use the ones provided by storage else { - if (stats.getCapacityInBytes() <= 0) { - throw new InvalidParameterValueException("Capacity bytes note available from the storage provider, user provided capacity bytes must be specified"); + if (stats == null || stats.getCapacityInBytes() <= 0) { + throw new InvalidParameterValueException("Capacity bytes not available from the storage provider, user provided capacity bytes must be specified"); } parameters.setCapacityBytes(stats.getCapacityInBytes()); } @@ -383,8 +382,58 @@ public boolean migrateToObjectStore(DataStore store) { * Update the storage pool configuration */ @Override - public void updateStoragePool(StoragePool storagePool, Map details) { - _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), details); + public void updateStoragePool(StoragePool storagePool, Map newDetails) { + /**String newAuthnType = newDetails.get(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY); + String newUser = newDetails.get(ProviderAdapter.API_USERNAME_KEY); + String newToken = newDetails.get(ProviderAdapter.API_TOKEN_KEY); + String newPassword = fetchMightBeEncryptedProperty(ProviderAdapter.API_PASSWORD_KEY, newDetails); + String newSecret = fetchMightBeEncryptedProperty(ProviderAdapter.API_TOKEN_KEY, newDetails); + String newUrl = newDetails.get(ProviderAdapter.API_URL_KEY); + String skipTlsValidationStr = newDetails.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); + Boolean newSkipTlsValidation = null; + if (skipTlsValidationStr != null) { + newSkipTlsValidation = Boolean.parseBoolean(skipTlsValidationStr); + } + + String capacityInBytesStr = newDetails.get("capacityBytes"); + Long newCapacityInBytes = null; + if (capacityInBytesStr != null) { + newCapacityInBytes = Long.parseLong(capacityInBytesStr); + } + + String capacityIopsStr = newDetails.get("capacityIops"); + Long newCapacityIops = null; + if (capacityIopsStr != null) { + newCapacityIops = Long.parseLong(capacityIopsStr); + } + + + Map existingDetails = _primaryDataStoreDao.getDetails(storagePool.getId()); + if (newAuthnType != null) { + existingDetails.put(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY, newAuthnType); + } + + if (newUser != null) existingDetails.put(ProviderAdapter.API_USERNAME_KEY, newUser); + if (newToken != null) existingDetails.put(ProviderAdapter.API_TOKEN_KEY, newToken); + if (newPassword != null) existingDetails.put(ProviderAdapter.API_PASSWORD_KEY, newPassword); + if (newSecret != null) existingDetails.put(ProviderAdapter.API_TOKEN_KEY, newSecret); + if (newUrl != null) existingDetails.put(ProviderAdapter.API_URL_KEY, newUrl); + if (newSkipTlsValidation != null) existingDetails.put(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY, newSkipTlsValidation.toString()); + if (newCapacityInBytes != null) existingDetails.put("capacityBytes", capacityInBytesStr); + if (newCapacityIops != null) existingDetails.put("capacityIops", capacityIopsStr); + + _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), existingDetails);*/ + _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), newDetails); + } + + private String fetchMightBeEncryptedProperty(String key, Map details) { + String value; + try { + value = DBEncryptionUtil.decrypt(details.get(key)); + } catch (Exception e) { + value = details.get(key); + } + return value; } /** diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java index ee5caa7178ef..e68153512d3d 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryDatastoreAdapterFactoryMap.java @@ -131,4 +131,8 @@ protected ProviderAdapter createNewAdapter(String uuid, String providerName, Map logger.debug("Creating new ProviderAdapter object for endpoint: " + providerName + "@" + url); return api; } + + public ProviderAdapterFactory getFactory(String providerName) { + return this.factoryMap.get(providerName); + } } diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java index 68dd4a15c62a..2a58c8f86f26 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/provider/AdaptivePrimaryHostListener.java @@ -54,6 +54,8 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep if (storagePoolHost == null) { storagePoolHost = new StoragePoolHostVO(poolId, hostId, ""); storagePoolHostDao.persist(storagePoolHost); + } else { + return false; } return true; } diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index 3082a19c7324..e26f593189d8 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -25,7 +25,6 @@ import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; import javax.net.ssl.HostnameVerifier; @@ -109,7 +108,8 @@ protected FlashArrayAdapter(String url, Map details) { } @Override - public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, ProviderAdapterDiskOffering offering, long size) { + public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, + ProviderAdapterDiskOffering offering, long size) { FlashArrayVolume request = new FlashArrayVolume(); request.setExternalName( pod + "::" + ProviderVolumeNamer.generateObjectName(context, dataObject)); @@ -128,30 +128,50 @@ public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterData * cluster (depending on Cloudstack Storage Pool configuration) */ @Override - public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) { + + // should not happen but double check for sanity + if (dataObject.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) { + throw new RuntimeException("This storage provider does not support direct attachments of snapshots to hosts"); + } + String volumeName = normalizeName(pod, dataObject.getExternalName()); try { - FlashArrayList list = POST("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName, null, new TypeReference> () { }); + FlashArrayList list = null; + FlashArrayHost host = getHost(hostname); + if (host != null) { + list = POST("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName, null, + new TypeReference>() { + }); + } if (list == null || list.getItems() == null || list.getItems().size() == 0) { throw new RuntimeException("Volume attach did not return lun information"); } - FlashArrayConnection connection = (FlashArrayConnection)this.getFlashArrayItem(list); + FlashArrayConnection connection = (FlashArrayConnection) this.getFlashArrayItem(list); if (connection.getLun() == null) { throw new RuntimeException("Volume attach missing lun field"); } - return ""+connection.getLun(); + return "" + connection.getLun(); } catch (Throwable e) { - // the volume is already attached. happens in some scenarios where orchestration creates the volume before copying to it + // the volume is already attached. happens in some scenarios where orchestration + // creates the volume before copying to it if (e.toString().contains("Connection already exists")) { FlashArrayList list = GET("/connections?volume_names=" + volumeName, - new TypeReference>() { - }); + new TypeReference>() { + }); if (list != null && list.getItems() != null) { - return ""+list.getItems().get(0).getLun(); + for (FlashArrayConnection conn : list.getItems()) { + if (conn.getHost() != null && conn.getHost().getName() != null && + (conn.getHost().getName().equals(hostname) || conn.getHost().getName().equals(hostname.substring(0, hostname.indexOf('.')))) && + conn.getLun() != null) { + return "" + conn.getLun(); + } + } + throw new RuntimeException("Volume lun is not found in existing connection"); } else { throw new RuntimeException("Volume lun is not found in existing connection"); } @@ -162,9 +182,18 @@ public String attach(ProviderAdapterContext context, ProviderAdapterDataObject d } @Override - public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject) { + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, String hostname) { String volumeName = normalizeName(pod, dataObject.getExternalName()); - DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName); + // hostname is always provided by cloudstack, but we will detach from hostgroup + // if this pool is configured to use hostgroup for attachments + if (hostgroup != null) { + DELETE("/connections?host_group_names=" + hostgroup + "&volume_names=" + volumeName); + } + + FlashArrayHost host = getHost(hostname); + if (host != null) { + DELETE("/connections?host_names=" + host.getName() + "&volume_names=" + volumeName); + } } @Override @@ -205,8 +234,6 @@ public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterD return null; } - populateConnectionId(volume); - return volume; } catch (Exception e) { // assume any exception is a not found. Flash returns 400's for most errors @@ -217,7 +244,7 @@ public ProviderVolume getVolume(ProviderAdapterContext context, ProviderAdapterD @Override public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, AddressType addressType, String address) { // public FlashArrayVolume getVolumeByWwn(String wwn) { - if (address == null ||addressType == null) { + if (address == null || addressType == null) { throw new RuntimeException("Invalid search criteria provided for getVolumeByAddress"); } @@ -234,21 +261,19 @@ public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, Address FlashArrayVolume volume = null; try { FlashArrayList list = GET("/volumes?filter=" + query, - new TypeReference>() { - }); + new TypeReference>() { + }); // if we didn't get an address back its likely an empty object if (list == null || list.getItems() == null || list.getItems().size() == 0) { return null; } - volume = (FlashArrayVolume)this.getFlashArrayItem(list); + volume = (FlashArrayVolume) this.getFlashArrayItem(list); if (volume != null && volume.getAddress() == null) { return null; } - populateConnectionId(volume); - return volume; } catch (Exception e) { // assume any exception is a not found. Flash returns 400's for most errors @@ -256,32 +281,6 @@ public ProviderVolume getVolumeByAddress(ProviderAdapterContext context, Address } } - private void populateConnectionId(FlashArrayVolume volume) { - // we need to see if there is a connection (lun) associated with this volume. - // note we assume 1 lun for the hostgroup associated with this object - FlashArrayList list = null; - try { - list = GET("/connections?volume_names=" + volume.getExternalName(), - new TypeReference>() { - }); - } catch (CloudRuntimeException e) { - // this means there is no attachment associated with this volume on the array - if (e.toString().contains("Bad Request")) { - return; - } - } - - if (list != null && list.getItems() != null) { - for (FlashArrayConnection conn: list.getItems()) { - if (conn.getHostGroup() != null && conn.getHostGroup().getName().equals(this.hostgroup)) { - volume.setExternalConnectionId(""+conn.getLun()); - break; - } - } - - } - } - @Override public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dataObject, long newSizeInBytes) { // public void resizeVolume(String volumeNamespace, String volumeName, long @@ -299,7 +298,8 @@ public void resize(ProviderAdapterContext context, ProviderAdapterDataObject dat * @return */ @Override - public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject targetDataObject) { + public ProviderSnapshot snapshot(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, + ProviderAdapterDataObject targetDataObject) { // public FlashArrayVolume snapshotVolume(String volumeNamespace, String // volumeName, String snapshotName) { FlashArrayList list = POST( @@ -354,11 +354,12 @@ public ProviderSnapshot getSnapshot(ProviderAdapterContext context, ProviderAdap } @Override - public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, ProviderAdapterDataObject destDataObject) { + public ProviderVolume copy(ProviderAdapterContext context, ProviderAdapterDataObject sourceDataObject, + ProviderAdapterDataObject destDataObject) { // private ManagedVolume copy(ManagedVolume sourceVolume, String destNamespace, // String destName) { if (sourceDataObject == null || sourceDataObject.getExternalName() == null - ||sourceDataObject.getType() == null) { + || sourceDataObject.getType() == null) { throw new RuntimeException("Provided volume has no external source information"); } @@ -424,12 +425,6 @@ public void refresh(Map details) { @Override public void validate() { login(); - // check if hostgroup and pod from details really exist - we will - // require a distinct configuration object/connection object for each type - if (this.getHostgroup(hostgroup) == null) { - throw new RuntimeException("Hostgroup [" + hostgroup + "] not found in FlashArray at [" + url - + "], please validate configuration"); - } if (this.getVolumeNamespace(pod) == null) { throw new RuntimeException( @@ -477,36 +472,33 @@ public boolean canAccessHost(ProviderAdapterContext context, String hostname) { throw new RuntimeException("Unable to validate host access because a hostname was not provided"); } - List members = getHostgroupMembers(hostgroup); - - // check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack - // hostname configuration - String shortname; - if (hostname.indexOf('.') > 0) { - shortname = hostname.substring(0, (hostname.indexOf('.'))); - } else { - shortname = hostname; + FlashArrayHost host = getHost(hostname); + if (host != null) { + return true; } - for (String member : members) { - // exact match (short or long names) - if (member.equals(hostname)) { - return true; - } + return false; + } - // primera has short name and cloudstack had long name - if (member.equals(shortname)) { - return true; - } + private FlashArrayHost getHost(String hostname) { + FlashArrayList list = null; - // member has long name but cloudstack had shortname - if (member.indexOf('.') > 0) { - if (member.substring(0, (member.indexOf('.'))).equals(shortname)) { - return true; - } + try { + list = GET("/hosts?names=" + hostname, + new TypeReference>() { + }); + } catch (Exception e) { + + } + + if (list == null) { + if (hostname.indexOf('.') > 0) { + list = GET("/hosts?names=" + hostname.substring(0, (hostname.indexOf('.'))), + new TypeReference>() { + }); } } - return false; + return (FlashArrayHost) getFlashArrayItem(list); } private String getAccessToken() { @@ -527,13 +519,21 @@ private synchronized void refreshSession(boolean force) { } } catch (Exception e) { // retry frequently but not every request to avoid DDOS on storage API - logger.warn("Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds", + logger.warn( + "Failed to refresh FlashArray API key for " + username + "@" + url + ", will retry in 5 seconds", e); keyExpiration = System.currentTimeMillis() + (5 * 1000); } } - private void validateLoginInfo(String urlStr) { + /** + * Login to the array and get an access token + */ + private void login() { + username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); + password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); + String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); + URL urlFull; try { urlFull = new URL(urlStr); @@ -571,15 +571,6 @@ private void validateLoginInfo(String urlStr) { } } - hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP); - if (hostgroup == null) { - hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP); - if (hostgroup == null) { - throw new RuntimeException( - FlashArrayAdapter.STORAGE_POD + " paramater/option required to configure this storage pool"); - } - } - apiLoginVersion = connectionDetails.get(FlashArrayAdapter.API_LOGIN_VERSION); if (apiLoginVersion == null) { apiLoginVersion = queryParms.get(FlashArrayAdapter.API_LOGIN_VERSION); @@ -596,6 +587,12 @@ private void validateLoginInfo(String urlStr) { } } + // retrieve for legacy purposes. if set, we'll remove any connections to hostgroup we find and use the host + hostgroup = connectionDetails.get(FlashArrayAdapter.HOSTGROUP); + if (hostgroup == null) { + hostgroup = queryParms.get(FlashArrayAdapter.HOSTGROUP); + } + String connTimeoutStr = connectionDetails.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); if (connTimeoutStr == null) { connTimeoutStr = queryParms.get(FlashArrayAdapter.CONNECT_TIMEOUT_MS); @@ -651,16 +648,7 @@ private void validateLoginInfo(String urlStr) { } else { skipTlsValidation = true; } - } - /** - * Login to the array and get an access token - */ - private void login() { - username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); - password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); - String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); - validateLoginInfo(urlStr); CloseableHttpResponse response = null; try { HttpPost request = new HttpPost(url + "/" + apiLoginVersion + "/auth/apitoken"); @@ -749,7 +737,13 @@ private void removeVlunsAll(ProviderAdapterContext context, String volumeNamespa if (list != null && list.getItems() != null) { for (FlashArrayConnection conn : list.getItems()) { - DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names=" + volumeName); + if (hostgroup != null && conn.getHostGroup() != null && conn.getHostGroup().getName() != null) { + DELETE("/connections?host_group_names=" + conn.getHostGroup().getName() + "&volume_names=" + + volumeName); + break; + } else if (conn.getHost() != null && conn.getHost().getName() != null) { + DELETE("/connections?host_names=" + conn.getHost().getName() + "&volume_names=" + volumeName); + } } } } @@ -762,30 +756,10 @@ private FlashArrayVolume getVolume(String volumeName) { } private FlashArrayPod getVolumeNamespace(String name) { - FlashArrayList list = GET("/pods?names=" + name, new TypeReference>() { - }); - return (FlashArrayPod) getFlashArrayItem(list); - } - - private FlashArrayHostgroup getHostgroup(String name) { - FlashArrayList list = GET("/host-groups?name=" + name, - new TypeReference>() { - }); - return (FlashArrayHostgroup) getFlashArrayItem(list); - } - - private List getHostgroupMembers(String groupname) { - FlashArrayGroupMemberReferenceList list = GET("/hosts/host-groups?group_names=" + groupname, - new TypeReference() { + FlashArrayList list = GET("/pods?names=" + name, + new TypeReference>() { }); - if (list == null || list.getItems().size() == 0) { - return null; - } - List hostnames = new ArrayList(); - for (FlashArrayGroupMemberReference ref : list.getItems()) { - hostnames.add(ref.getMember().getName()); - } - return hostnames; + return (FlashArrayPod) getFlashArrayItem(list); } private FlashArrayVolume getSnapshot(String snapshotName) { @@ -856,7 +830,8 @@ private T POST(String path, Object input, final TypeReference type) { } return null; } catch (UnsupportedOperationException | IOException e) { - throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]", e); + throw new CloudRuntimeException("Error processing response from FlashArray [" + url + path + "]", + e); } } else if (statusCode == 400) { try { @@ -1083,4 +1058,39 @@ private Long roundUp512Boundary(Long sizeInBytes) { } return sizeInBytes; } + + @Override + public Map getConnectionIdMap(ProviderAdapterDataObject dataIn) { + Map map = new HashMap(); + + // flasharray doesn't let you directly map a snapshot to a host, so we'll just return an empty map + if (dataIn.getType() == ProviderAdapterDataObject.Type.SNAPSHOT) { + return map; + } + + try { + FlashArrayList list = GET("/connections?volume_names=" + dataIn.getExternalName(), + new TypeReference>() { + }); + + if (list != null && list.getItems() != null) { + for (FlashArrayConnection conn : list.getItems()) { + if (conn.getHost() != null) { + map.put(conn.getHost().getName(), "" + conn.getLun()); + } + } + } + } catch (Exception e) { + // flasharray returns a 400 if the volume doesn't exist, so we'll just return an empty object. + if (logger.isTraceEnabled()) { + logger.trace("Error getting connection map for volume [" + dataIn.getExternalName() + "]: " + e.toString(), e); + } + } + return map; + } + + @Override + public boolean canDirectAttachSnapshot() { + return false; + } } diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java index d1c3cee8fa8b..de55b27884eb 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapterFactory.java @@ -33,4 +33,9 @@ public ProviderAdapter create(String url, Map details) { return new FlashArrayAdapter(url, details); } + @Override + public Object canDirectAttachSnapshot() { + return false; + } + } diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java new file mode 100644 index 000000000000..0c3a1e7179d5 --- /dev/null +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java @@ -0,0 +1,29 @@ +package org.apache.cloudstack.storage.datastore.adapter.flasharray; + +import java.util.List; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlashArrayHost { + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public List getWwns() { + return wwns; + } + public void setWwns(List wwns) { + this.wwns = wwns; + } + @JsonProperty("name") + private String name; + @JsonProperty("wwns") + private List wwns; + +} diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java index f939d70a77f4..a3201a753a75 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolume.java @@ -83,7 +83,7 @@ public String getName() { @JsonIgnore public String getPodName() { if (pod != null) { - return pod.getName(); + return pod.name; } else { return null; } @@ -129,7 +129,7 @@ public void setName(String name) { } public void setPodName(String podname) { FlashArrayVolumePod pod = new FlashArrayVolumePod(); - pod.setName(podname); + pod.name = podname; this.pod = pod; } @Override diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java index 1e46441e7d1e..e9c10f84a750 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayVolumePod.java @@ -24,20 +24,7 @@ @JsonInclude(JsonInclude.Include.NON_NULL) public class FlashArrayVolumePod { @JsonProperty("id") - private String id; + public String id; @JsonProperty("name") - private String name; - - public String getId() { - return id; - } - public void setId(String id) { - this.id = id; - } - public String getName() { - return name; - } - public void setName(String name) { - this.name = name; - } + public String name; } diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java index 69f98567f728..8e1375a77027 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java @@ -24,7 +24,6 @@ import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.util.HashMap; -import java.util.List; import java.util.Map; import javax.net.ssl.HostnameVerifier; @@ -106,18 +105,11 @@ public void refresh(Map details) { this.refreshSession(true); } - /** - * Validate that the hostgroup and pod from the details data exists. Each - * configuration object/connection needs a distinct set of these 2 things. - */ @Override public void validate() { login(); - if (this.getHostset(hostset) == null) { - throw new RuntimeException("Hostgroup [" + hostset + "] not found in FlashArray at [" + url - + "], please validate configuration"); - } - + // check if hostgroup and pod from details really exist - we will + // require a distinct configuration object/connection object for each type if (this.getCpg(cpg) == null) { throw new RuntimeException( "Pod [" + cpg + "] not found in FlashArray at [" + url + "], please validate configuration"); @@ -176,10 +168,15 @@ public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterData } @Override - public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn) { + public String attach(ProviderAdapterContext context, ProviderAdapterDataObject dataIn, String hostname) { assert dataIn.getExternalName() != null : "External name not provided internally on volume attach"; PrimeraHostset.PrimeraHostsetVLUNRequest request = new PrimeraHostset.PrimeraHostsetVLUNRequest(); - request.setHostname("set:" + hostset); + PrimeraHost host = getHost(hostname); + if (host == null) { + throw new RuntimeException("Unable to find host " + hostname + " on storage provider"); + } + request.setHostname(host.getName()); + request.setVolumeName(dataIn.getExternalName()); request.setAutoLun(true); // auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4 @@ -194,12 +191,36 @@ public String attach(ProviderAdapterContext context, ProviderAdapterDataObject d return toks[1]; } - @Override + /** + * This detaches ALL vlun's for the provided volume name IF they are associated to this hostset + * @param context + * @param request + */ public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request) { + detach(context, request, null); + } + + @Override + public void detach(ProviderAdapterContext context, ProviderAdapterDataObject request, String hostname) { // we expect to only be attaching one hostset to the vluns, so on detach we'll // remove ALL vluns we find. assert request.getExternalName() != null : "External name not provided internally on volume detach"; - removeAllVluns(request.getExternalName()); + + PrimeraVlunList list = getVluns(request.getExternalName()); + if (list != null && list.getMembers().size() > 0) { + list.getMembers().forEach(vlun -> { + // remove any hostset from old code if configured + if (hostset != null && vlun.getHostname() != null && vlun.getHostname().equals("set:" + hostset)) { + removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname()); + } + + if (hostname != null) { + if (vlun.getHostname().equals(hostname) || vlun.getHostname().equals(hostname.split("\\.")[0])) { + removeVlun(request.getExternalName(), vlun.getLun(), vlun.getHostname()); + } + } + }); + } } public void removeVlun(String name, Integer lunid, String hostString) { @@ -208,20 +229,7 @@ public void removeVlun(String name, Integer lunid, String hostString) { DELETE("/vluns/" + name + "," + lunid + "," + hostString); } - /** - * Removes all vluns - this should only be done when you are sure the volume is no longer in use - * @param name - */ - public void removeAllVluns(String name) { - PrimeraVlunList list = getVolumeHostsets(name); - if (list != null && list.getMembers() != null) { - for (PrimeraVlun vlun: list.getMembers()) { - removeVlun(vlun.getVolumeName(), vlun.getLun(), vlun.getHostname()); - } - } - } - - public PrimeraVlunList getVolumeHostsets(String name) { + public PrimeraVlunList getVluns(String name) { String query = "%22volumeName%20EQ%20" + name + "%22"; return GET("/vluns?query=" + query, new TypeReference() {}); } @@ -231,7 +239,7 @@ public void delete(ProviderAdapterContext context, ProviderAdapterDataObject req assert request.getExternalName() != null : "External name not provided internally on volume delete"; // first remove vluns (take volumes from vluns) from hostset - removeAllVluns(request.getExternalName()); + detach(context, request); DELETE("/volumes/" + request.getExternalName()); } @@ -420,6 +428,7 @@ public ProviderVolumeStorageStats getManagedStorageStats() { if (cpgobj == null || cpgobj.getTotalSpaceMiB() == 0) { return null; } + Long capacityBytes = 0L; if (cpgobj.getsDGrowth() != null) { capacityBytes = cpgobj.getsDGrowth().getLimitMiB() * PrimeraAdapter.BYTES_IN_MiB; @@ -453,39 +462,25 @@ public ProviderVolumeStats getVolumeStats(ProviderAdapterContext context, Provid @Override public boolean canAccessHost(ProviderAdapterContext context, String hostname) { - PrimeraHostset hostset = getHostset(this.hostset); - - List members = hostset.getSetmembers(); - - // check for fqdn and shortname combinations. this assumes there is at least a shortname match in both the storage array and cloudstack - // hostname configuration - String shortname; - if (hostname.indexOf('.') > 0) { - shortname = hostname.substring(0, (hostname.indexOf('.'))); - } else { - shortname = hostname; + // check that the array has the host configured + PrimeraHost host = this.getHost(hostname); + if (host != null) { + // if hostset is configured we'll additionally check if the host is in it (legacy/original behavior) + return true; } - for (String member: members) { - // exact match (short or long names) - if (member.equals(hostname)) { - return true; - } - // primera has short name and cloudstack had long name - if (member.equals(shortname)) { - return true; - } + return false; + } - // member has long name but cloudstack had shortname - int index = member.indexOf("."); - if (index > 0) { - if (member.substring(0, (member.indexOf('.'))).equals(shortname)) { - return true; - } + private PrimeraHost getHost(String name) { + PrimeraHost host = GET("/hosts/" + name, new TypeReference() { }); + if (host == null) { + if (name.indexOf('.') > 0) { + host = this.getHost(name.substring(0, (name.indexOf('.')))); } } + return host; - return false; } private PrimeraCpg getCpg(String name) { @@ -493,11 +488,6 @@ private PrimeraCpg getCpg(String name) { }); } - private PrimeraHostset getHostset(String name) { - return GET("/hostsets/" + name, new TypeReference() { - }); - } - private String getSessionKey() { refreshSession(false); return key; @@ -518,8 +508,14 @@ private synchronized void refreshSession(boolean force) { keyExpiration = System.currentTimeMillis() + (5*1000); } } + /** + * Login to the array and get an access token + */ + private void login() { + username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); + password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); + String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); - private void validateLoginInfo(String urlStr) { URL urlFull; try { urlFull = new URL(urlStr); @@ -566,13 +562,10 @@ private void validateLoginInfo(String urlStr) { } } + // if this is null, we will use direct-to-host vlunids (preferred) hostset = connectionDetails.get(PrimeraAdapter.HOSTSET); if (hostset == null) { hostset = queryParms.get(PrimeraAdapter.HOSTSET); - if (hostset == null) { - throw new RuntimeException( - PrimeraAdapter.HOSTSET + " paramater/option required to configure this storage pool"); - } } String connTimeoutStr = connectionDetails.get(PrimeraAdapter.CONNECT_TIMEOUT_MS); @@ -629,16 +622,7 @@ private void validateLoginInfo(String urlStr) { } else { skipTlsValidation = true; } - } - /** - * Login to the array and get an access token - */ - private void login() { - username = connectionDetails.get(ProviderAdapter.API_USERNAME_KEY); - password = connectionDetails.get(ProviderAdapter.API_PASSWORD_KEY); - String urlStr = connectionDetails.get(ProviderAdapter.API_URL_KEY); - validateLoginInfo(urlStr); CloseableHttpResponse response = null; try { HttpPost request = new HttpPost(url + "/credentials"); @@ -720,7 +704,7 @@ private T POST(String path, Object input, final TypeReference type) { try { String data = mapper.writeValueAsString(input); request.setEntity(new StringEntity(data)); - logger.debug("POST data: " + request.getEntity()); + if (logger.isTraceEnabled()) logger.trace("POST data: " + request.getEntity()); } catch (UnsupportedEncodingException | JsonProcessingException e) { throw new RuntimeException( "Error processing request payload to [" + url + "] for path [" + path + "]", e); @@ -926,5 +910,22 @@ private void DELETE(String path) { } } + @Override + public Map getConnectionIdMap(ProviderAdapterDataObject dataIn) { + Map connIdMap = new HashMap(); + PrimeraVlunList list = this.getVluns(dataIn.getExternalName()); + + if (list != null && list.getMembers() != null && list.getMembers().size() > 0) { + for (PrimeraVlun vlun: list.getMembers()) { + connIdMap.put(vlun.getHostname(), ""+vlun.getLun()); + } + } + + return connIdMap; + } + @Override + public boolean canDirectAttachSnapshot() { + return true; + } } diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java index 81ae442b38df..43a0245431e8 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapterFactory.java @@ -33,4 +33,9 @@ public ProviderAdapter create(String url, Map details) { return new PrimeraAdapter(url, details); } + @Override + public Object canDirectAttachSnapshot() { + return true; + } + } diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHost.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHost.java new file mode 100644 index 000000000000..e7371c329c49 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHost.java @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import java.util.List; + + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraHost { + private Integer id; + private String name; + private List fcPaths; + private PrimeraHostDescriptor descriptors; + public Integer getId() { + return id; + } + public void setId(Integer id) { + this.id = id; + } + public String getName() { + return name; + } + public void setName(String name) { + this.name = name; + } + public List getFcPaths() { + return fcPaths; + } + public void setFcPaths(List fcPaths) { + this.fcPaths = fcPaths; + } + public PrimeraHostDescriptor getDescriptors() { + return descriptors; + } + public void setDescriptors(PrimeraHostDescriptor descriptors) { + this.descriptors = descriptors; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java new file mode 100644 index 000000000000..29ba90ffe7c7 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java @@ -0,0 +1,23 @@ +package org.apache.cloudstack.storage.datastore.adapter.primera; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraHostDescriptor { + private String IPAddr = null; + private String os = null; + public String getIPAddr() { + return IPAddr; + } + public void setIPAddr(String iPAddr) { + IPAddr = iPAddr; + } + public String getOs() { + return os; + } + public void setOs(String os) { + this.os = os; + } + +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java index e062f0782af5..0d3c6146a79a 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostset.java @@ -34,105 +34,115 @@ public class PrimeraHostset { private String uuid; private Map additionalProperties = new LinkedHashMap(); + + public String getComment() { return comment; } + + public void setComment(String comment) { this.comment = comment; } + + public Integer getId() { return id; } + + public void setId(Integer id) { this.id = id; } + + public String getName() { return name; } + + public void setName(String name) { this.name = name; } + + public List getSetmembers() { return setmembers; } + + public void setSetmembers(List setmembers) { this.setmembers = setmembers; } + + public String getUuid() { return uuid; } + + public void setUuid(String uuid) { this.uuid = uuid; } + + public Map getAdditionalProperties() { return additionalProperties; } + + public void setAdditionalProperties(Map additionalProperties) { this.additionalProperties = additionalProperties; } + + // adds members to a hostset public static class PrimeraHostsetVLUNRequest { private String volumeName; private Boolean autoLun = true; private Integer lun = 0; private Integer maxAutoLun = 0; - /** - * This can be a single hostname OR the set of hosts in the format - * "set:". - * For the purposes of this driver, its expected that the predominate usecase is - * to use - * a hostset that is aligned with a CloudStack Cluster. - */ + // hostset format: "set:" private String hostname; - public String getVolumeName() { return volumeName; } - public void setVolumeName(String volumeName) { this.volumeName = volumeName; } - public Boolean getAutoLun() { return autoLun; } - public void setAutoLun(Boolean autoLun) { this.autoLun = autoLun; } - public Integer getLun() { return lun; } - public void setLun(Integer lun) { this.lun = lun; } - public Integer getMaxAutoLun() { return maxAutoLun; } - public void setMaxAutoLun(Integer maxAutoLun) { this.maxAutoLun = maxAutoLun; } - public String getHostname() { return hostname; } - public void setHostname(String hostname) { this.hostname = hostname; } diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java new file mode 100644 index 000000000000..e6e84faeb292 --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java @@ -0,0 +1,23 @@ +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraPort { + private String wwn; + private PrimeraPortPos portPos; + public String getWwn() { + return wwn; + } + public void setWwn(String wwn) { + this.wwn = wwn; + } + public PrimeraPortPos getPortPos() { + return portPos; + } + public void setPortPos(PrimeraPortPos portPos) { + this.portPos = portPos; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java new file mode 100644 index 000000000000..e05de43542fd --- /dev/null +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java @@ -0,0 +1,30 @@ +package org.apache.cloudstack.storage.datastore.adapter.primera; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; + +@JsonIgnoreProperties(ignoreUnknown = true) +@JsonInclude(JsonInclude.Include.NON_NULL) +public class PrimeraPortPos { + private Integer cardPort; + private Integer node; + private Integer slot; + public Integer getCardPort() { + return cardPort; + } + public void setCardPort(Integer cardPort) { + this.cardPort = cardPort; + } + public Integer getNode() { + return node; + } + public void setNode(Integer node) { + this.node = node; + } + public Integer getSlot() { + return slot; + } + public void setSlot(Integer slot) { + this.slot = slot; + } +} diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java index 33ad0d445f85..6bfa9f5920dd 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumeCopyRequestParameters.java @@ -35,7 +35,7 @@ public class PrimeraVolumeCopyRequestParameters { private String snapCPG = null; private Boolean skipZero = null; private Boolean saveSnapshot = null; - /** 1=HIGH, 2=MED, 3=LOW */ + // 1=HIGH, 2=MED, 3=LOW private Integer priority = null; public String getDestVolume() { return destVolume; diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java index 48898c272771..10ba364a5c5e 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraVolumePromoteRequest.java @@ -22,10 +22,7 @@ @JsonIgnoreProperties(ignoreUnknown = true) @JsonInclude(JsonInclude.Include.NON_NULL) public class PrimeraVolumePromoteRequest { - /** - * Defines action for the request as described at https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html - */ - private Integer action = 4; + private Integer action = 4; // PROMOTE_VIRTUAL_COPY, https://support.hpe.com/hpesc/public/docDisplay?docId=a00114827en_us&page=v25706371.html private Boolean online = true; private Integer priority = 2; // MEDIUM private Boolean allowRemoteCopyParent = true; diff --git a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java index 8484a5ef798d..b479436dab2c 100644 --- a/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java +++ b/plugins/user-authenticators/oauth2/src/main/java/org/apache/cloudstack/oauth2/OAuth2UserAuthenticator.java @@ -58,10 +58,17 @@ public Pair authenticate(String username, final String[] provider = (String[])requestParameters.get(ApiConstants.PROVIDER); final String[] emailArray = (String[])requestParameters.get(ApiConstants.EMAIL); final String[] secretCodeArray = (String[])requestParameters.get(ApiConstants.SECRET_CODE); + + if (provider == null) { + return new Pair(false, null); + } + String oauthProvider = ((provider == null) ? null : provider[0]); String email = ((emailArray == null) ? null : emailArray[0]); String secretCode = ((secretCodeArray == null) ? null : secretCodeArray[0]); + + UserOAuth2Authenticator authenticator = _userOAuth2mgr.getUserOAuth2AuthenticationProvider(oauthProvider); if (user != null && authenticator.verifyUser(email, secretCode)) { return new Pair(true, null); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index ae0d66ee482e..ebc120c980f7 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.vm; +import static com.cloud.configuration.ConfigurationManager.VM_USERDATA_MAX_LENGTH; import static com.cloud.configuration.ConfigurationManagerImpl.VM_USERDATA_MAX_LENGTH; import static com.cloud.utils.NumbersUtil.toHumanReadableSize; @@ -4363,14 +4364,7 @@ public boolean checkIfDynamicScalingCanBeEnabled(VirtualMachine vm, ServiceOffer */ protected long configureCustomRootDiskSize(Map customParameters, VMTemplateVO template, HypervisorType hypervisorType, DiskOfferingVO rootDiskOffering) { verifyIfHypervisorSupportsRootdiskSizeOverride(hypervisorType); - Long rootDiskSizeCustomParam = null; - if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { - rootDiskSizeCustomParam = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1); - if (rootDiskSizeCustomParam <= 0) { - throw new InvalidParameterValueException("Root disk size should be a positive number."); - } - } - long rootDiskSizeInBytes = verifyAndGetDiskSize(rootDiskOffering, rootDiskSizeCustomParam); + long rootDiskSizeInBytes = verifyAndGetDiskSize(rootDiskOffering, NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1)); if (rootDiskSizeInBytes > 0) { //if the size at DiskOffering is not zero then the Service Offering had it configured, it holds priority over the User custom size _volumeService.validateVolumeSizeInBytes(rootDiskSizeInBytes); long rootDiskSizeInGiB = rootDiskSizeInBytes / GiB_TO_BYTES; @@ -4379,7 +4373,11 @@ protected long configureCustomRootDiskSize(Map customParameters, } if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { - Long rootDiskSize = rootDiskSizeCustomParam * GiB_TO_BYTES; + Long rootDiskSize = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1); + if (rootDiskSize <= 0) { + throw new InvalidParameterValueException("Root disk size should be a positive number."); + } + rootDiskSize *= GiB_TO_BYTES; _volumeService.validateVolumeSizeInBytes(rootDiskSize); return rootDiskSize; } else { @@ -6424,6 +6422,12 @@ private VMInstanceVO preVmStorageMigrationCheck(Long vmId) { + " hypervisors: [%s].", hypervisorType, HYPERVISORS_THAT_CAN_DO_STORAGE_MIGRATION_ON_NON_USER_VMS)); } + List vols = _volsDao.findByInstance(vm.getId()); + if (vols.size() > 1 && + !(HypervisorType.VMware.equals(hypervisorType) || HypervisorType.KVM.equals(hypervisorType))) { + throw new InvalidParameterValueException("Data disks attached to the vm, can not migrate. Need to detach data disks first"); + } + // Check that Vm does not have VM Snapshots if (_vmSnapshotDao.findByVm(vmId).size() > 0) { throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM"); diff --git a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java index a11593a86080..7f0ee77deaf3 100644 --- a/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java +++ b/server/src/main/java/org/apache/cloudstack/snapshot/SnapshotHelper.java @@ -92,7 +92,9 @@ public class SnapshotHelper { */ public void expungeTemporarySnapshot(boolean kvmSnapshotOnlyInPrimaryStorage, SnapshotInfo snapInfo) { if (!kvmSnapshotOnlyInPrimaryStorage) { - logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); + if (snapInfo != null) { + logger.trace(String.format("Snapshot [%s] is not a temporary backup to create a volume from snapshot. Not expunging it.", snapInfo.getId())); + } return; } diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index 1ed5a8f1648a..bca52c6e54b9 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -558,6 +558,7 @@ private StoragePool getStoragePool(final UnmanagedInstanceTO.Disk disk, final Da } } } + if (storagePool == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Storage pool for disk %s(%s) with datastore: %s not found in zone ID: %s", disk.getLabel(), disk.getDiskId(), disk.getDatastoreName(), zone.getUuid())); } From c622afe3e2dd6a470037c88212b8d6aa71ec44c0 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 1 Apr 2024 18:32:55 +0000 Subject: [PATCH 11/47] Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes --- .../cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index df1d7035581d..8bd6d4f6fcac 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -482,8 +482,7 @@ boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, l } LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid()); - throw new CloudRuntimeException("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid()); - //return false; + return false; } long getPhysicalDiskSize(String diskPath) { From d9f10167541e03b6e13e0c219e6f6ebeef512e7c Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 1 Apr 2024 21:19:59 +0000 Subject: [PATCH 12/47] Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes --- .../hypervisor/kvm/storage/MultipathSCSIAdapterBase.java | 7 ++++--- .../datastore/adapter/flasharray/FlashArrayAdapter.java | 1 - 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 8bd6d4f6fcac..eff6767a06b6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -178,12 +178,12 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map Date: Tue, 2 Apr 2024 01:06:11 +0000 Subject: [PATCH 13/47] Updates to change PUre and Primera to host-centric vlun assignments; various small bug fixes --- .../storage/datastore/driver/AdaptiveDataStoreDriverImpl.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java index 32342b951542..549604b35777 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java @@ -996,4 +996,8 @@ ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool stor dataIn.setType(ProviderAdapterDataObject.Type.valueOf(data.getType().toString())); return dataIn; } + + public boolean volumesRequireGrantAccessWhenUsed() { + return true; + } } From 2dcfdb9f3bbcddbbae159961a2bac1376fe04f89 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Tue, 2 Apr 2024 16:38:43 +0000 Subject: [PATCH 14/47] update to add timestamp when deleting pure volumes to avoid future conflicts --- .../datastore/adapter/flasharray/FlashArrayAdapter.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index 66ddac4e855a..a1b277c7e54c 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -23,6 +23,7 @@ import java.security.KeyManagementException; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; +import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; @@ -203,6 +204,10 @@ public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dat String fullName = normalizeName(pod, dataObject.getExternalName()); FlashArrayVolume volume = new FlashArrayVolume(); + // rename as we delete so it doesn't conflict if the template or volume is ever recreated + // pure keeps the volume(s) around in a Destroyed bucket for a period of time post delete + String timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new java.util.Date()); + volume.setName(fullName + "-" + timestamp); volume.setDestroyed(true); try { PATCH("/volumes?names=" + fullName, volume, new TypeReference>() { From 35b76c9bb6b5cd4d7107450ba46699c6f1f24ec9 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Wed, 3 Apr 2024 15:27:26 +0000 Subject: [PATCH 15/47] update to migrate to properly check disk offering is valid for the target storage pool --- .../cloud/storage/VolumeApiServiceImpl.java | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 88d45d54aa52..966903eabe39 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -3403,12 +3403,15 @@ private DiskOfferingVO retrieveAndValidateNewDiskOffering(MigrateVolumeCmd cmd) Account caller = CallContext.current().getCallingAccount(); DataCenter zone = null; Volume volume = _volsDao.findById(cmd.getId()); - if (volume != null) { - zone = _dcDao.findById(volume.getDataCenterId()); + if (volume == null) { + throw new InvalidParameterValueException(String.format("Provided volume id is not valid: %s", cmd.getId())); } + zone = _dcDao.findById(volume.getDataCenterId()); + _accountMgr.checkAccess(caller, newDiskOffering, zone); - DiskOfferingVO currentDiskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); - if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingHasTagsAsOldDiskOffering(currentDiskOffering, newDiskOffering)) { + StoragePool destStoragePool = _storagePoolDao.findById(cmd.getStoragePoolId()); + + if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingMatchTargetStoragePool(destStoragePool, newDiskOffering)) { throw new InvalidParameterValueException(String.format("Existing disk offering storage tags of the volume %s does not contain in the new disk offering %s ", volume.getUuid(), newDiskOffering.getUuid())); } return newDiskOffering; @@ -3524,16 +3527,24 @@ public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String return result; } - public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) { - String[] oldDOStorageTags = oldDO.getTagsArray(); + public boolean doesNewDiskOfferingMatchTargetStoragePool(StoragePool destPool, DiskOfferingVO newDO) { String[] newDOStorageTags = newDO.getTagsArray(); - if (oldDOStorageTags.length == 0) { - return true; + List destPoolTags = storagePoolTagsDao.findStoragePoolTags(destPool.getId()); + if (newDOStorageTags == null || newDOStorageTags.length == 0) { + if (destPoolTags == null || destPoolTags.isEmpty()) { + return true; + } else { + return false; + } } - if (newDOStorageTags.length == 0) { - return false; + for (StoragePoolTagVO spt: destPoolTags) { + for (String doTag: newDOStorageTags) { + if (doTag.equals(spt.getTag())) { + return true; + } + } } - return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags)); + return false; } /** From 638a30fc834f290d68bc955de10a78df51811141 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Wed, 3 Apr 2024 15:36:04 +0000 Subject: [PATCH 16/47] update to migrate to properly check disk offering is valid for the target storage pool --- .../cloud/storage/VolumeApiServiceImpl.java | 36 ++++++++----------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index 966903eabe39..aec7b12e7eff 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -3411,8 +3411,8 @@ private DiskOfferingVO retrieveAndValidateNewDiskOffering(MigrateVolumeCmd cmd) _accountMgr.checkAccess(caller, newDiskOffering, zone); StoragePool destStoragePool = _storagePoolDao.findById(cmd.getStoragePoolId()); - if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesNewDiskOfferingMatchTargetStoragePool(destStoragePool, newDiskOffering)) { - throw new InvalidParameterValueException(String.format("Existing disk offering storage tags of the volume %s does not contain in the new disk offering %s ", volume.getUuid(), newDiskOffering.getUuid())); + if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesTargetStorageSupportDiskOffering(destStoragePool, newDiskOffering)) { + throw new InvalidParameterValueException(String.format("New disk offering is not valid for the provided storage pool: volume [%s], disk offering [%s]", volume.getUuid(), newDiskOffering.getUuid())); } return newDiskOffering; } @@ -3498,6 +3498,18 @@ protected boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, Dis return doesTargetStorageSupportDiskOffering(destPool, targetStoreTags); } + public static boolean doesNewDiskOfferingHasTagsAsOldDiskOffering(DiskOfferingVO oldDO, DiskOfferingVO newDO) { + String[] oldDOStorageTags = oldDO.getTagsArray(); + String[] newDOStorageTags = newDO.getTagsArray(); + if (oldDOStorageTags.length == 0) { + return true; + } + if (newDOStorageTags.length == 0) { + return false; + } + return CollectionUtils.isSubCollection(Arrays.asList(oldDOStorageTags), Arrays.asList(newDOStorageTags)); + } + @Override public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String diskOfferingTags) { Pair, Boolean> storagePoolTags = getStoragePoolTags(destPool); @@ -3527,26 +3539,6 @@ public boolean doesTargetStorageSupportDiskOffering(StoragePool destPool, String return result; } - public boolean doesNewDiskOfferingMatchTargetStoragePool(StoragePool destPool, DiskOfferingVO newDO) { - String[] newDOStorageTags = newDO.getTagsArray(); - List destPoolTags = storagePoolTagsDao.findStoragePoolTags(destPool.getId()); - if (newDOStorageTags == null || newDOStorageTags.length == 0) { - if (destPoolTags == null || destPoolTags.isEmpty()) { - return true; - } else { - return false; - } - } - for (StoragePoolTagVO spt: destPoolTags) { - for (String doTag: newDOStorageTags) { - if (doTag.equals(spt.getTag())) { - return true; - } - } - } - return false; - } - /** * Returns a {@link Pair}, where the first value is the list of the StoragePool tags, and the second value is whether the returned tags are to be interpreted as a rule, * or a normal list of tags. From 71cb21ddad72f3cf21b73ba42ec2d2006f7ed851 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Wed, 3 Apr 2024 16:16:01 +0000 Subject: [PATCH 17/47] update to migrate to properly check disk offering is valid for the target storage pool --- .../main/java/com/cloud/storage/VolumeApiServiceImpl.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index aec7b12e7eff..f97a7f61ef0b 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -3409,11 +3409,6 @@ private DiskOfferingVO retrieveAndValidateNewDiskOffering(MigrateVolumeCmd cmd) zone = _dcDao.findById(volume.getDataCenterId()); _accountMgr.checkAccess(caller, newDiskOffering, zone); - StoragePool destStoragePool = _storagePoolDao.findById(cmd.getStoragePoolId()); - - if (VolumeApiServiceImpl.MatchStoragePoolTagsWithDiskOffering.valueIn(zone.getId()) && !doesTargetStorageSupportDiskOffering(destStoragePool, newDiskOffering)) { - throw new InvalidParameterValueException(String.format("New disk offering is not valid for the provided storage pool: volume [%s], disk offering [%s]", volume.getUuid(), newDiskOffering.getUuid())); - } return newDiskOffering; } From ad331ffbbe284cc75b83fb6711dbdb900565d014 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Thu, 4 Apr 2024 15:15:11 +0000 Subject: [PATCH 18/47] update to migrate to properly check disk offering is valid for the target storage pool --- .../main/java/com/cloud/storage/VolumeApiServiceImpl.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index f97a7f61ef0b..e923c83358d8 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -3318,6 +3318,13 @@ public Volume migrateVolume(MigrateVolumeCmd cmd) { } DiskOfferingVO newDiskOffering = retrieveAndValidateNewDiskOffering(cmd); + // if no new disk offering was provided, and match is required, default to the offering of the + // original volume. otherwise it falls through with no check and the target volume may + // not work correctly in some scenarios with the target provider. Adminstrator + // can disable this flag dynamically for certain bulk migration scenarios if required. + if (newDiskOffering == null && Boolean.TRUE.equals(MatchStoragePoolTagsWithDiskOffering.value())) { + newDiskOffering = diskOffering; + } validateConditionsToReplaceDiskOfferingOfVolume(vol, newDiskOffering, destPool); if (vm != null) { From 1151ddf35e5d19a1e738c89d82bbca189dcfe4c1 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 5 Apr 2024 20:06:48 +0000 Subject: [PATCH 19/47] improve error handling when copying volumes to add precision to which step failed --- .../LibvirtMigrateVolumeCommandWrapper.java | 18 +++++++++++++++--- .../kvm/storage/MultipathSCSIAdapterBase.java | 4 ++-- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java index 2a09c3408911..f52a4005d3d6 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java @@ -293,15 +293,27 @@ protected MigrateVolumeAnswer migrateRegularVolume(final MigrateVolumeCommand co (destVolumeObjectTO.getPath() != null ? destVolumeObjectTO.getPath() : UUID.randomUUID().toString()); try { - storagePoolManager.connectPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath, srcDetails); + KVMStoragePool sourceStoragePool = storagePoolManager.getStoragePool(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid()); + + if (!sourceStoragePool.connectPhysicalDisk(srcPath, srcDetails)) { + return new MigrateVolumeAnswer(command, false, "Unable to connect source volume on hypervisor", srcPath); + } KVMPhysicalDisk srcPhysicalDisk = storagePoolManager.getPhysicalDisk(srcPrimaryDataStore.getPoolType(), srcPrimaryDataStore.getUuid(), srcPath); + if (srcPhysicalDisk == null) { + return new MigrateVolumeAnswer(command, false, "Unable to get handle to source volume on hypervisor", srcPath); + } KVMStoragePool destPrimaryStorage = storagePoolManager.getStoragePool(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid()); - storagePoolManager.connectPhysicalDisk(destPrimaryDataStore.getPoolType(), destPrimaryDataStore.getUuid(), destPath, destDetails); + if (!destPrimaryStorage.connectPhysicalDisk(destPath, destDetails)) { + return new MigrateVolumeAnswer(command, false, "Unable to connect destination volume on hypervisor", srcPath); + } - storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds()); + KVMPhysicalDisk newDiskCopy = storagePoolManager.copyPhysicalDisk(srcPhysicalDisk, destPath, destPrimaryStorage, command.getWaitInMillSeconds()); + if (newDiskCopy == null) { + return new MigrateVolumeAnswer(command, false, "Copy command failed to return handle to copied physical disk", destPath); + } } catch (Exception ex) { return new MigrateVolumeAnswer(command, false, ex.getMessage(), null); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index eff6767a06b6..14f4f50dfdc7 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -331,7 +331,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt QemuImgFile srcFile = new QemuImgFile(disk.getPath(), disk.getFormat()); QemuImgFile destFile = new QemuImgFile(destDisk.getPath(), destDisk.getFormat()); - LOGGER.debug("Starting COPY from source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath()); + LOGGER.debug("Starting COPY from source path " + srcFile.getFileName() + " to target volume path: " + destDisk.getPath()); ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()); /**Script script = new Script( @@ -346,7 +346,7 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt if (rc != 0) { throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult()); } - LOGGER.debug("Successfully converted source downloaded template " + srcFile.getFileName() + " to Primera volume: " + destDisk.getPath() + " " + result.getResult()); + LOGGER.debug("Successfully converted source volume at " + srcFile.getFileName() + " to destination volume: " + destDisk.getPath() + " " + result.getResult()); return destDisk; } From eb72474c348fd880f9fae304dc8dd87002f26bb8 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 5 Apr 2024 23:01:08 +0000 Subject: [PATCH 20/47] rename pure volume before delete to avoid conflicts if the same name is used before its expunged on the array --- .../datastore/adapter/flasharray/FlashArrayAdapter.java | 7 ++++++- .../main/java/com/cloud/storage/VolumeApiServiceImpl.java | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index a1b277c7e54c..cd474f7b54d9 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -204,13 +204,18 @@ public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dat String fullName = normalizeName(pod, dataObject.getExternalName()); FlashArrayVolume volume = new FlashArrayVolume(); + // rename as we delete so it doesn't conflict if the template or volume is ever recreated // pure keeps the volume(s) around in a Destroyed bucket for a period of time post delete String timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new java.util.Date()); volume.setName(fullName + "-" + timestamp); + PATCH("/volumes?names=" + fullName, volume, new TypeReference>() { + }); + + // now delete it volume.setDestroyed(true); try { - PATCH("/volumes?names=" + fullName, volume, new TypeReference>() { + PATCH("/volumes?names=" + fullName + "-" + timestamp, volume, new TypeReference>() { }); } catch (CloudRuntimeException e) { if (e.toString().contains("Volume does not exist")) { diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index e923c83358d8..ee303ee28f6e 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -1045,7 +1045,7 @@ public VolumeVO createVolume(CreateVolumeCmd cmd) { created = false; VolumeInfo vol = volFactory.getVolume(cmd.getEntityId()); vol.stateTransit(Volume.Event.DestroyRequested); - throw new CloudRuntimeException("Failed to create volume: " + volume.getId(), e); + throw new CloudRuntimeException("Failed to create volume: " + volume.getUuid(), e); } finally { if (!created) { s_logger.trace("Decrementing volume resource count for account id=" + volume.getAccountId() + " as volume failed to create on the backend"); From 98225b71bf2164f2ec2bcf1f54fcafefe3888f65 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 5 Apr 2024 23:33:00 +0000 Subject: [PATCH 21/47] rename pure volume before delete to avoid conflicts if the same name is used before its expunged on the array --- .../datastore/adapter/flasharray/FlashArrayAdapter.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index cd474f7b54d9..ce3c1c9e954d 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -208,11 +208,11 @@ public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dat // rename as we delete so it doesn't conflict if the template or volume is ever recreated // pure keeps the volume(s) around in a Destroyed bucket for a period of time post delete String timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new java.util.Date()); - volume.setName(fullName + "-" + timestamp); + volume.setExternalName(fullName + "-" + timestamp); PATCH("/volumes?names=" + fullName, volume, new TypeReference>() { }); - // now delete it + // now delete it with new name volume.setDestroyed(true); try { PATCH("/volumes?names=" + fullName + "-" + timestamp, volume, new TypeReference>() { From da6eb3724e49f0ff07166ac4019d414fe0dfcc20 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Sat, 6 Apr 2024 01:25:33 +0000 Subject: [PATCH 22/47] remove dead code in AdaptiveDataStoreLifeCycleImpl.java --- .../AdaptiveDataStoreLifeCycleImpl.java | 50 ------------------- 1 file changed, 50 deletions(-) diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java index 26d2494eacf8..ab1d49d8b4f1 100644 --- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/AdaptiveDataStoreLifeCycleImpl.java @@ -383,59 +383,9 @@ public boolean migrateToObjectStore(DataStore store) { */ @Override public void updateStoragePool(StoragePool storagePool, Map newDetails) { - /**String newAuthnType = newDetails.get(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY); - String newUser = newDetails.get(ProviderAdapter.API_USERNAME_KEY); - String newToken = newDetails.get(ProviderAdapter.API_TOKEN_KEY); - String newPassword = fetchMightBeEncryptedProperty(ProviderAdapter.API_PASSWORD_KEY, newDetails); - String newSecret = fetchMightBeEncryptedProperty(ProviderAdapter.API_TOKEN_KEY, newDetails); - String newUrl = newDetails.get(ProviderAdapter.API_URL_KEY); - String skipTlsValidationStr = newDetails.get(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY); - Boolean newSkipTlsValidation = null; - if (skipTlsValidationStr != null) { - newSkipTlsValidation = Boolean.parseBoolean(skipTlsValidationStr); - } - - String capacityInBytesStr = newDetails.get("capacityBytes"); - Long newCapacityInBytes = null; - if (capacityInBytesStr != null) { - newCapacityInBytes = Long.parseLong(capacityInBytesStr); - } - - String capacityIopsStr = newDetails.get("capacityIops"); - Long newCapacityIops = null; - if (capacityIopsStr != null) { - newCapacityIops = Long.parseLong(capacityIopsStr); - } - - - Map existingDetails = _primaryDataStoreDao.getDetails(storagePool.getId()); - if (newAuthnType != null) { - existingDetails.put(ProviderAdapter.API_AUTHENTICATION_TYPE_KEY, newAuthnType); - } - - if (newUser != null) existingDetails.put(ProviderAdapter.API_USERNAME_KEY, newUser); - if (newToken != null) existingDetails.put(ProviderAdapter.API_TOKEN_KEY, newToken); - if (newPassword != null) existingDetails.put(ProviderAdapter.API_PASSWORD_KEY, newPassword); - if (newSecret != null) existingDetails.put(ProviderAdapter.API_TOKEN_KEY, newSecret); - if (newUrl != null) existingDetails.put(ProviderAdapter.API_URL_KEY, newUrl); - if (newSkipTlsValidation != null) existingDetails.put(ProviderAdapter.API_SKIP_TLS_VALIDATION_KEY, newSkipTlsValidation.toString()); - if (newCapacityInBytes != null) existingDetails.put("capacityBytes", capacityInBytesStr); - if (newCapacityIops != null) existingDetails.put("capacityIops", capacityIopsStr); - - _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), existingDetails);*/ _adapterFactoryMap.updateAPI(storagePool.getUuid(), storagePool.getStorageProviderName(), newDetails); } - private String fetchMightBeEncryptedProperty(String key, Map details) { - String value; - try { - value = DBEncryptionUtil.decrypt(details.get(key)); - } catch (Exception e) { - value = details.get(key); - } - return value; - } - /** * Enable the storage pool (allows volumes from this pool) */ From bb0c3eea66e74253325aeb087327a55202c5af66 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Sat, 6 Apr 2024 01:27:53 +0000 Subject: [PATCH 23/47] remove dead code in AdaptiveDataStoreLifeCycleImpl.java --- plugins/storage/volume/adaptive/README.md | 42 +++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/plugins/storage/volume/adaptive/README.md b/plugins/storage/volume/adaptive/README.md index 041f1f1a1289..be07f2c89f2f 100644 --- a/plugins/storage/volume/adaptive/README.md +++ b/plugins/storage/volume/adaptive/README.md @@ -56,3 +56,45 @@ This provides instructions of which provider implementation class to load when t ## Build and Deploy the Jar Once you build the new jar, start Cloudstack Management Server or, if a standalone jar, add it to the classpath before start. You should now have a new storage provider of the designated name once Cloudstack finishes loading all configured modules. + +### Test Cases +The following test cases should be run against configured installations of each storage array in a working Cloudstack installation. +1. Create New Primera Storage Pool for Zone +2. Create New Primera Storage Pool for Cluster +3. Update Primera Storage Pool for Zone +4. Update Primera Storage Pool for Cluster +5. Create VM with Root Disk using Primera pool +6. Create VM with Root and Data Disk using Primera pool +7. Create VM with Root Disk using NFS and Data Disk on Primera pool +8. Create VM with Root Disk on Primera Pool and Data Disk on NFS +9. Snapshot root disk with VM using Primera Pool for root disk +10. Snapshot data disk with VM using Primera Pool for data disk +11. Snapshot VM (non-memory) with root and data disk using Primera pool +12. Snapshot VM (non-memory) with root disk using Primera pool and data disk using NFS +13. Snapshot VM (non-memory) with root disk using NFS pool and data disk using Primera pool +14. Create new template from previous snapshot root disk on Primera pool +15. Create new volume from previous snapshot root disk on Primera pool +16. Create new volume from previous snapshot data disk on Primera pool +17. Create new VM using template created from Primera root snapshot and using Primera as root volume pool +18. Create new VM using template created from Primera root snapshot and using NFS as root volume pool +19. Delete previously created Primera snapshot +20. Create previously created Primera volume attached to a VM that is running (should fail) +21. Create previously created Primera volume attached to a VM that is not running (should fail) +22. Detach a Primera volume from a non-running VM (should work) +23. Attach a Primera volume to a running VM (should work) +24. Attach a Primera volume to a non-running VM (should work) +25. Create a 'thin' Disk Offering tagged for Primera pool and provision and attach a data volume to a VM using this offering (ttpv=true, reduce=false) +26. Create a 'sparse' Disk Offering tagged for Primera pool and provision and attach a data volume to a VM using this offering (ttpv=false, reduce=true) +27. Create a 'fat' Disk Offering and tagged for Primera pool and provision and attach a data volume to a VM using this offering (should fail as 'fat' not supported) +28. Perform volume migration of root volume from Primera pool to NFS pool on stopped VM +29. Perform volume migration of root volume from NFS pool to Primera pool on stopped VM +30. Perform volume migration of data volume from Primera pool to NFS pool on stopped VM +31. Perform volume migration of data volume from NFS pool to Primera pool on stopped VM +32. Perform VM data migration for a VM with 1 or more data volumes from all volumes on Primera pool to all volumes on NFS pool +33. Perform VM data migration for a VM with 1 or more data volumes from all volumes on NFS pool to all volumes on Primera pool +34. Perform live migration of a VM with a Primera root disk +35. Perform live migration of a VM with a Primera data disk and NFS root disk +36. Perform live migration of a VM with a Primera root disk and NFS data disk +37. Perform volume migration between 2 Primera pools on the same backend Primera IP address +38. Perform volume migration between 2 Primera pools on different Primera IP address + From 64af7a94e88f907fc20d6786e6124f315f25b5e8 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 8 Apr 2024 15:40:08 +0000 Subject: [PATCH 24/47] Fix issues found in PR checks --- plugins/storage/volume/adaptive/README.md | 1 - .../adapter/flasharray/FlashArrayHost.java | 17 +++++++++++++++++ .../adapter/primera/PrimeraHostDescriptor.java | 17 +++++++++++++++++ .../datastore/adapter/primera/PrimeraPort.java | 17 +++++++++++++++++ .../adapter/primera/PrimeraPortPos.java | 17 +++++++++++++++++ 5 files changed, 68 insertions(+), 1 deletion(-) diff --git a/plugins/storage/volume/adaptive/README.md b/plugins/storage/volume/adaptive/README.md index be07f2c89f2f..2e3e96668661 100644 --- a/plugins/storage/volume/adaptive/README.md +++ b/plugins/storage/volume/adaptive/README.md @@ -97,4 +97,3 @@ The following test cases should be run against configured installations of each 36. Perform live migration of a VM with a Primera root disk and NFS data disk 37. Perform volume migration between 2 Primera pools on the same backend Primera IP address 38. Perform volume migration between 2 Primera pools on different Primera IP address - diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java index 0c3a1e7179d5..009d964d56fb 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayHost.java @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package org.apache.cloudstack.storage.datastore.adapter.flasharray; import java.util.List; diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java index 29ba90ffe7c7..b251786579cc 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraHostDescriptor.java @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package org.apache.cloudstack.storage.datastore.adapter.primera; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java index e6e84faeb292..792371c636b6 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPort.java @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package org.apache.cloudstack.storage.datastore.adapter.primera; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java index e05de43542fd..62800802ce86 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraPortPos.java @@ -1,3 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + package org.apache.cloudstack.storage.datastore.adapter.primera; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; From 179267e7bdd3ad331fd641ff0a5555d9a00d2e76 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 26 Apr 2024 22:39:41 +0000 Subject: [PATCH 25/47] updates from PR comments --- .../StorageSystemDataMotionStrategy.java | 12 +++------- .../kvm/storage/KVMStorageProcessor.java | 24 +++++++++---------- .../vm/UnmanagedVMsManagerImpl.java | 1 - 3 files changed, 15 insertions(+), 22 deletions(-) diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 81540bec3bdc..047961403be0 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -1850,14 +1850,12 @@ private void handleCreateVolumeFromVolumeOnSecondaryStorage(VolumeInfo srcVolume private CopyCmdAnswer copyImageToVolume(DataObject srcDataObject, VolumeInfo destVolumeInfo, HostVO hostVO) { int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); - CopyCommand copyCommand = null; - CopyCmdAnswer copyCmdAnswer; try { _volumeService.grantAccess(destVolumeInfo, hostVO, destVolumeInfo.getDataStore()); - copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait, + CopyCommand copyCommand = new CopyCommand(srcDataObject.getTO(), destVolumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); Map destDetails = getVolumeDetails(destVolumeInfo); @@ -2599,8 +2597,6 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa int primaryStorageDownloadWait = StorageManager.PRIMARY_STORAGE_DOWNLOAD_WAIT.value(); - CopyCommand copyCommand = null; - try { handleQualityOfServiceForVolumeMigration(volumeInfo, PrimaryDataStoreDriver.QualityOfServiceState.MIGRATION); @@ -2608,7 +2604,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa _volumeService.grantAccess(volumeInfo, hostVO, srcDataStore); } - copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand copyCommand = new CopyCommand(volumeInfo.getTO(), templateInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); Map srcDetails = getVolumeDetails(volumeInfo); @@ -3076,15 +3072,13 @@ private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snaps srcData = cacheData; } - CopyCommand copyCommand = null; - try { if (Snapshot.LocationType.PRIMARY.equals(locationType)) { _volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore()); Map srcDetails = getSnapshotDetails(snapshotInfo); - copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); copyCommand.setOptions(srcDetails); } else { _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 7d4962e57295..766b82ecb58c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -266,12 +266,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { Map details = primaryStore.getDetails(); - String path = null; - if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { - path = destData.getPath(); - } else { - path = details != null ? details.get("managedStoreTarget") : null; - } + String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); @@ -332,6 +327,16 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { } } + private String derivePath(PrimaryDataStoreTO primaryStore, DataTO destData, Map details) { + String path = null; + if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { + path = destData.getPath(); + } else { + path = details != null ? details.get("managedStoreTarget") : null; + } + return path; + } + // this is much like PrimaryStorageDownloadCommand, but keeping it separate. copies template direct to root disk private KVMPhysicalDisk templateToPrimaryDownload(final String templateUrl, final KVMStoragePool primaryPool, final String volUuid, final Long size, final int timeout) { final int index = templateUrl.lastIndexOf("/"); @@ -411,12 +416,7 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds()); } if (primaryPool.getType() == StoragePoolType.PowerFlex) { Map details = primaryStore.getDetails(); - String path = null; - if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { - path = destData.getPath(); - } else { - path = details != null ? details.get("managedStoreTarget") : null; - } + String path = derivePath(primaryStore, destData, details); if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath, details)) { s_logger.warn("Failed to connect base template volume at path: " + templatePath + ", in storage pool id: " + primaryStore.getUuid()); diff --git a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java index 6d12d71f7209..7dc000c73752 100644 --- a/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java +++ b/server/src/main/java/org/apache/cloudstack/vm/UnmanagedVMsManagerImpl.java @@ -561,7 +561,6 @@ private StoragePool getStoragePool(final UnmanagedInstanceTO.Disk disk, final Da } } } - if (storagePool == null) { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Storage pool for disk %s(%s) with datastore: %s not found in zone ID: %s", disk.getLabel(), disk.getDiskId(), disk.getDatastoreName(), zone.getUuid())); } From d03169c61c58fedaa340329b6f1fcc8595eaeacd Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 26 Apr 2024 22:56:27 +0000 Subject: [PATCH 26/47] fix session refresh TTL logic --- .../adapter/flasharray/FlashArrayAdapter.java | 1 - .../adapter/primera/PrimeraAdapter.java | 42 +++++++++++++------ 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index ce3c1c9e954d..96e4b1c7ad27 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -511,7 +511,6 @@ private FlashArrayHost getHost(String hostname) { } private String getAccessToken() { - refreshSession(false); return accessToken; } diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java index 8e1375a77027..cc45562e1ab0 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java @@ -72,7 +72,7 @@ public class PrimeraAdapter implements ProviderAdapter { public static final String TASK_WAIT_TIMEOUT_MS = "taskWaitTimeoutMs"; private static final long KEY_TTL_DEFAULT = (1000 * 60 * 14); - private static final long CONNECT_TIMEOUT_MS_DEFAULT = 600000; + private static final long CONNECT_TIMEOUT_MS_DEFAULT = 60 * 1000; private static final long TASK_WAIT_TIMEOUT_MS_DEFAULT = 10 * 60 * 1000; public static final long BYTES_IN_MiB = 1048576; @@ -118,6 +118,15 @@ public void validate() { @Override public void disconnect() { + logger.info("PrimeraAdapter:disconnect(): closing session"); + try { + _client.close(); + } catch (IOException e) { + logger.warn("PrimeraAdapter:refreshSession(): Error closing client connection", e); + } finally { + _client = null; + keyExpiration = -1; + } return; } @@ -489,24 +498,25 @@ private PrimeraCpg getCpg(String name) { } private String getSessionKey() { - refreshSession(false); return key; } - private synchronized void refreshSession(boolean force) { + private synchronized String refreshSession(boolean force) { try { - if (force || keyExpiration < System.currentTimeMillis()) { + if (force || keyExpiration < (System.currentTimeMillis()-15000)) { // close client to force connection reset on appliance -- not doing this can result in NotAuthorized error...guessing - _client.close();; - _client = null; + disconnect(); login(); - keyExpiration = System.currentTimeMillis() + keyTtl; + logger.debug("PrimeraAdapter:refreshSession(): session created or refreshed with key=" + key + ", expiration=" + keyExpiration); + } else { + logger.debug("PrimeraAdapter:refreshSession(): using existing session key=" + key + ", expiration=" + keyExpiration); } } catch (Exception e) { // retry frequently but not every request to avoid DDOS on storage API logger.warn("Failed to refresh Primera API key for " + username + "@" + url + ", will retry in 5 seconds", e); keyExpiration = System.currentTimeMillis() + (5*1000); } + return key; } /** * Login to the array and get an access token @@ -549,7 +559,7 @@ private void login() { cpg = queryParms.get(PrimeraAdapter.CPG); if (cpg == null) { throw new RuntimeException( - PrimeraAdapter.CPG + " paramater/option required to configure this storage pool"); + PrimeraAdapter.CPG + " parameter/option required to configure this storage pool"); } } @@ -636,6 +646,9 @@ private void login() { if (statusCode == 200 | statusCode == 201) { PrimeraKey keyobj = mapper.readValue(response.getEntity().getContent(), PrimeraKey.class); key = keyobj.getKey(); + // Set the key expiration to x minutes from now + this.keyExpiration = System.currentTimeMillis() + keyTtl; + logger.info("PrimeraAdapter:login(): successful, new session: New key=" + key + ", expiration=" + this.keyExpiration); } else if (statusCode == 401 || statusCode == 403) { throw new RuntimeException("Authentication or Authorization to Primera [" + url + "] with user [" + username + "] failed, unable to retrieve session token"); @@ -696,11 +709,11 @@ private CloseableHttpClient getClient() { private T POST(String path, Object input, final TypeReference type) { CloseableHttpResponse response = null; try { - this.refreshSession(false); + String session_key = this.refreshSession(false); HttpPost request = new HttpPost(url + path); request.addHeader("Content-Type", "application/json"); request.addHeader("Accept", "application/json"); - request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key); try { String data = mapper.writeValueAsString(input); request.setEntity(new StringEntity(data)); @@ -781,10 +794,11 @@ private T PUT(String path, Object input, final TypeReference type) { CloseableHttpResponse response = null; try { this.refreshSession(false); + String session_key = this.refreshSession(false); HttpPut request = new HttpPut(url + path); request.addHeader("Content-Type", "application/json"); request.addHeader("Accept", "application/json"); - request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key); String data = mapper.writeValueAsString(input); request.setEntity(new StringEntity(data)); @@ -834,10 +848,11 @@ private T GET(String path, final TypeReference type) { CloseableHttpResponse response = null; try { this.refreshSession(false); + String session_key = this.refreshSession(false); HttpGet request = new HttpGet(url + path); request.addHeader("Content-Type", "application/json"); request.addHeader("Accept", "application/json"); - request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key); CloseableHttpClient client = getClient(); response = (CloseableHttpResponse) client.execute(request); @@ -876,10 +891,11 @@ private void DELETE(String path) { CloseableHttpResponse response = null; try { this.refreshSession(false); + String session_key = this.refreshSession(false); HttpDelete request = new HttpDelete(url + path); request.addHeader("Content-Type", "application/json"); request.addHeader("Accept", "application/json"); - request.addHeader("X-HP3PAR-WSAPI-SessionKey", getSessionKey()); + request.addHeader("X-HP3PAR-WSAPI-SessionKey", session_key); CloseableHttpClient client = getClient(); response = (CloseableHttpResponse) client.execute(request); From d8a307d1fa48677430f487f08050b93c6a99213a Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 29 Apr 2024 14:18:18 +0000 Subject: [PATCH 27/47] logic to delete by path ONLY on supported OUI --- .../kvm/storage/MultipathSCSIAdapterBase.java | 45 ++++++++++++++----- 1 file changed, 33 insertions(+), 12 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 14f4f50dfdc7..9b2f9c65fb2c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -51,6 +51,14 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor { */ static byte[] CLEANUP_LOCK = new byte[0]; + /** + * List of supported OUI's (needed for path-based cleanup logic on disconnects after live migrations) + */ + static String[] SUPPORTED_OUI_LIST = { + "0002ac", // HPE Primera 3PAR + "24a937" // Pure Flasharray + }; + /** * Property keys and defaults */ @@ -207,35 +215,48 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map volumeToDisconnect) { - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect); + LOGGER.debug(String.format("disconnectPhysicalDisk(volumeToDisconnect) called with arg bag [not implemented]:") + " " + volumeToDisconnect); return false; } @Override public boolean disconnectPhysicalDiskByPath(String localPath) { - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) STARTED", localPath)); - if (localPath == null || (localPath != null && !localPath.startsWith("/dev/mapper/"))) { - LOGGER.debug(String.format("isconnectPhysicalDiskByPath(localPath) returning FALSE, volume path is not a multipath volume: %s", localPath)); + if (localPath == null) { return false; } - ScriptResult result = runScript(disconnectScript, 60000L, localPath.replace("/dev/mapper/3", "")); - if (LOGGER.isDebugEnabled()) LOGGER.debug("multipath flush output: " + result.getResult()); - LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); - return true; + if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) START", localPath)); + if (localPath.startsWith("/dev/mapper/")) { + String multipathName = localPath.replace("/dev/mapper/3", ""); + // this ensures we only disconnect multipath devices supported by this driver + for (String oui: SUPPORTED_OUI_LIST) { + if (multipathName.length() > 1 && multipathName.substring(2).startsWith(oui)) { + ScriptResult result = runScript(disconnectScript, 60000L, multipathName); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("multipath flush output: " + result.getResult()); + LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); + } + return true; + } + } + } + if (LOGGER.isDebugEnabled()) LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) returning FALSE, volume path is not a multipath volume: %s", localPath)); + return false; } @Override From f1ebd8046aac84ca860cc0b7c692b3d683862bff Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 29 Apr 2024 14:27:07 +0000 Subject: [PATCH 28/47] fix to StorageSystemDataMotionStrategy compile error --- .../storage/motion/StorageSystemDataMotionStrategy.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 047961403be0..3e12fd72ba1f 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -3073,12 +3073,13 @@ private CopyCmdAnswer performCopyOfVdi(VolumeInfo volumeInfo, SnapshotInfo snaps } try { + CopyCommand copyCommand = null; if (Snapshot.LocationType.PRIMARY.equals(locationType)) { _volumeService.grantAccess(snapshotInfo, hostVO, snapshotInfo.getDataStore()); Map srcDetails = getSnapshotDetails(snapshotInfo); - CopyCommand copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); + copyCommand = new CopyCommand(srcData.getTO(), volumeInfo.getTO(), primaryStorageDownloadWait, VirtualMachineManager.ExecuteInSequence.value()); copyCommand.setOptions(srcDetails); } else { _volumeService.grantAccess(volumeInfo, hostVO, volumeInfo.getDataStore()); From 0b69bae64d7885cae1858670ece20048b436a8ee Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 29 Apr 2024 17:49:05 +0000 Subject: [PATCH 29/47] change noisy debug message to trace message --- .../storage/datastore/adapter/primera/PrimeraAdapter.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java index cc45562e1ab0..1fdc92feedc4 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java @@ -497,10 +497,6 @@ private PrimeraCpg getCpg(String name) { }); } - private String getSessionKey() { - return key; - } - private synchronized String refreshSession(boolean force) { try { if (force || keyExpiration < (System.currentTimeMillis()-15000)) { @@ -509,7 +505,9 @@ private synchronized String refreshSession(boolean force) { login(); logger.debug("PrimeraAdapter:refreshSession(): session created or refreshed with key=" + key + ", expiration=" + keyExpiration); } else { - logger.debug("PrimeraAdapter:refreshSession(): using existing session key=" + key + ", expiration=" + keyExpiration); + if (logger.isTraceEnabled()) { + logger.trace("PrimeraAdapter:refreshSession(): using existing session key=" + key + ", expiration=" + keyExpiration); + } } } catch (Exception e) { // retry frequently but not every request to avoid DDOS on storage API From 52f5c975a189a4fb9cdc2ce8111fc1fa9ccef62d Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 29 Apr 2024 20:08:50 +0000 Subject: [PATCH 30/47] fix double callback call in handleVolumeMigrationFromNonManagedStorageToManagedStorage --- .../StorageSystemDataMotionStrategy.java | 30 +++++-------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 3e12fd72ba1f..485d7cf9f50a 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -697,8 +697,14 @@ private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeIn if (HypervisorType.XenServer.equals(hypervisorType)) { handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo); - } - else { + CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg); + destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); + DataTO dataTO = destVolumeInfo.getTO(); + copyCmdAnswer = new CopyCmdAnswer(dataTO); + CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); + result.setResult(errMsg); + callback.complete(result); + } else { handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); } } @@ -708,26 +714,6 @@ private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeIn throw new CloudRuntimeException(errMsg, ex); } - finally { - CopyCmdAnswer copyCmdAnswer; - - if (errMsg != null) { - copyCmdAnswer = new CopyCmdAnswer(errMsg); - } - else { - destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); - - DataTO dataTO = destVolumeInfo.getTO(); - - copyCmdAnswer = new CopyCmdAnswer(dataTO); - } - - CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); - - result.setResult(errMsg); - - callback.complete(result); - } } private void handleVolumeMigrationForXenServer(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo) { From 293a0f61f491878b116149c612148bdd672a1433 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Tue, 30 Apr 2024 00:17:12 +0000 Subject: [PATCH 31/47] fix for flash array delete error --- .../motion/StorageSystemDataMotionStrategy.java | 2 +- .../adapter/flasharray/FlashArrayAdapter.java | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 485d7cf9f50a..1e83059034c8 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -683,7 +683,7 @@ private Scope getZoneScope(Scope scope) { return zoneScope; } - private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, + private vo(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { String errMsg = null; diff --git a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java index 96e4b1c7ad27..a4b335a260a2 100644 --- a/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java +++ b/plugins/storage/volume/flasharray/src/main/java/org/apache/cloudstack/storage/datastore/adapter/flasharray/FlashArrayAdapter.java @@ -209,12 +209,14 @@ public void delete(ProviderAdapterContext context, ProviderAdapterDataObject dat // pure keeps the volume(s) around in a Destroyed bucket for a period of time post delete String timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new java.util.Date()); volume.setExternalName(fullName + "-" + timestamp); - PATCH("/volumes?names=" + fullName, volume, new TypeReference>() { - }); - // now delete it with new name - volume.setDestroyed(true); try { + PATCH("/volumes?names=" + fullName, volume, new TypeReference>() { + }); + + // now delete it with new name + volume.setDestroyed(true); + PATCH("/volumes?names=" + fullName + "-" + timestamp, volume, new TypeReference>() { }); } catch (CloudRuntimeException e) { From 6c0cb4889af58ca01e82479e6b95135f2bc5efdd Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Tue, 30 Apr 2024 12:17:33 +0000 Subject: [PATCH 32/47] fix typo in StorageSystemDataMotionStrategy --- .../motion/StorageSystemDataMotionStrategy.java | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 1e83059034c8..bf09536e85a5 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -683,10 +683,8 @@ private Scope getZoneScope(Scope scope) { return zoneScope; } - private vo(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, + private void handleVolumeMigrationFromNonManagedStorageToManagedStorage(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, AsyncCompletionCallback callback) { - String errMsg = null; - try { HypervisorType hypervisorType = srcVolumeInfo.getHypervisorType(); @@ -697,19 +695,17 @@ private vo(VolumeInfo srcVolumeInfo, VolumeInfo destVolumeInfo, if (HypervisorType.XenServer.equals(hypervisorType)) { handleVolumeMigrationForXenServer(srcVolumeInfo, destVolumeInfo); - CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(errMsg); destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); DataTO dataTO = destVolumeInfo.getTO(); - copyCmdAnswer = new CopyCmdAnswer(dataTO); + CopyCmdAnswer copyCmdAnswer = new CopyCmdAnswer(dataTO); CopyCommandResult result = new CopyCommandResult(null, copyCmdAnswer); - result.setResult(errMsg); callback.complete(result); } else { handleVolumeMigrationForKVM(srcVolumeInfo, destVolumeInfo, callback); } } catch (Exception ex) { - errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " + + String errMsg = "Migration operation failed in 'StorageSystemDataMotionStrategy.handleVolumeMigrationFromNonManagedStorageToManagedStorage': " + ex.getMessage(); throw new CloudRuntimeException(errMsg, ex); @@ -883,7 +879,6 @@ private void handleVolumeMigrationForKVM(VolumeInfo srcVolumeInfo, VolumeInfo de // re-retrieve volume to get any updated information from grant destVolumeInfo = _volumeDataFactory.getVolume(destVolumeInfo.getId(), destVolumeInfo.getDataStore()); - CopyCmdAnswer copyCmdAnswer; if (errMsg != null) { copyCmdAnswer = new CopyCmdAnswer(errMsg); From dc1979b76e84c017734e08a81e4795603cad89b0 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Tue, 30 Apr 2024 13:57:00 +0000 Subject: [PATCH 33/47] change copyVolume to use writeback to speed up copy ops --- scripts/storage/multipath/copyVolume.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/storage/multipath/copyVolume.sh b/scripts/storage/multipath/copyVolume.sh index d169198251be..8e6609ea1083 100755 --- a/scripts/storage/multipath/copyVolume.sh +++ b/scripts/storage/multipath/copyVolume.sh @@ -22,7 +22,7 @@ OUTPUT_FILE=${3:?"Output file/path is required"} echo "$(date): qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE}" -qemu-img convert -n -p -W -t none -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && { +qemu-img convert -n -p -W -t writeback -O ${OUTPUT_FORMAT} ${INPUT_FILE} ${OUTPUT_FILE} && { # if its a block device make sure we flush caches before exiting lsblk ${OUTPUT_FILE} >/dev/null 2>&1 && { blockdev --flushbufs ${OUTPUT_FILE} From d4e67516dd2ffc492893e80b95ccdc84444ac572 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 31 May 2024 22:23:09 +0000 Subject: [PATCH 34/47] remove returning PrimaryStorageDownloadAnswer when connectPhysicalDisk returns false during KVMStorageProcessor template copy --- .../com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 766b82ecb58c..a86f0025b2c3 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -270,7 +270,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); - return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + //return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); From a1b4a6019e30cc78acdc1fe187b3a1d45aab7252 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Thu, 6 Jun 2024 01:38:44 +0000 Subject: [PATCH 35/47] remove returning PrimaryStorageDownloadAnswer when connectPhysicalDisk returns false during KVMStorageProcessor template copy --- .../com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java | 1 - 1 file changed, 1 deletion(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index a86f0025b2c3..ce39962d0c3b 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -270,7 +270,6 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); - //return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); From 4323252c6d8b543029b970c5ac834260dc5fd46c Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Thu, 6 Jun 2024 01:40:36 +0000 Subject: [PATCH 36/47] remove change to only set UUID on snapshot if it is a vmSnapshot --- .../com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index ce39962d0c3b..c8c355dfd61d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -1031,9 +1031,7 @@ public Answer backupSnapshot(final CopyCommand cmd) { command.add(NAME_OPTION, snapshotName); command.add("-p", snapshotDestPath); - if (isCreatedFromVmSnapshot) { - descName = UUID.randomUUID().toString(); - } + descName = UUID.randomUUID().toString(); command.add("-t", descName); final String result = command.execute(); From 9f8daa287805a230ad81dc557e6bda7d9d5a00f0 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Thu, 6 Jun 2024 02:07:19 +0000 Subject: [PATCH 37/47] reverting change to UserVmManagerImpl.configureCustomRootDiskSize --- .../main/java/com/cloud/vm/UserVmManagerImpl.java | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index ffd0a4176faa..5e8d2e715859 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -4368,7 +4368,14 @@ public boolean checkIfDynamicScalingCanBeEnabled(VirtualMachine vm, ServiceOffer */ protected long configureCustomRootDiskSize(Map customParameters, VMTemplateVO template, HypervisorType hypervisorType, DiskOfferingVO rootDiskOffering) { verifyIfHypervisorSupportsRootdiskSizeOverride(hypervisorType); - long rootDiskSizeInBytes = verifyAndGetDiskSize(rootDiskOffering, NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1)); + Long rootDiskSizeCustomParam = null; + if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { + rootDiskSizeCustomParam = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1); + if (rootDiskSizeCustomParam <= 0) { + throw new InvalidParameterValueException("Root disk size should be a positive number."); + } + } + long rootDiskSizeInBytes = verifyAndGetDiskSize(rootDiskOffering, rootDiskSizeCustomParam); if (rootDiskSizeInBytes > 0) { //if the size at DiskOffering is not zero then the Service Offering had it configured, it holds priority over the User custom size _volumeService.validateVolumeSizeInBytes(rootDiskSizeInBytes); long rootDiskSizeInGiB = rootDiskSizeInBytes / GiB_TO_BYTES; @@ -4377,11 +4384,7 @@ protected long configureCustomRootDiskSize(Map customParameters, } if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { - Long rootDiskSize = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1); - if (rootDiskSize <= 0) { - throw new InvalidParameterValueException("Root disk size should be a positive number."); - } - rootDiskSize *= GiB_TO_BYTES; + Long rootDiskSize = rootDiskSizeCustomParam * GiB_TO_BYTES; _volumeService.validateVolumeSizeInBytes(rootDiskSize); return rootDiskSize; } else { From 310a35b85a20f42de0361d48e9897df8778f4ee8 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Tue, 11 Jun 2024 12:45:49 +0000 Subject: [PATCH 38/47] add error checking/simplification per comments from @slavkap --- .../hypervisor/kvm/storage/KVMStorageProcessor.java | 7 +------ .../kvm/storage/MultipathSCSIAdapterBase.java | 13 +++++++++++-- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index c8c355dfd61d..a310f1f2a5f0 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -2473,12 +2473,7 @@ public Answer copyVolumeFromPrimaryToPrimary(CopyCommand cmd) { if (!storagePoolMgr.connectPhysicalDisk(destPrimaryStore.getPoolType(), destPrimaryStore.getUuid(), destVolumePath, destPrimaryStore.getDetails())) { s_logger.warn("Failed to connect dest volume at path: " + destVolumePath + ", in storage pool id: " + destPrimaryStore.getUuid()); } - if (destPrimaryStore.getPoolType() == StoragePoolType.FiberChannel) { - destVolumeName = destData.getPath(); - } else { - String managedStoreTarget = destPrimaryStore.getDetails() != null ? destPrimaryStore.getDetails().get("managedStoreTarget") : null; - destVolumeName = managedStoreTarget != null ? managedStoreTarget : destVolumePath; - } + destVolumeName = derivePath(destPrimaryStore, destData, destPrimaryStore.getDetails()); } else { final String volumeName = UUID.randomUUID().toString(); destVolumeName = volumeName + "." + destFormat.getFileExtension(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 9b2f9c65fb2c..23910f1a214f 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -222,11 +222,17 @@ public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool) { return false; } ScriptResult result = runScript(disconnectScript, 60000L, address.getAddress().toLowerCase()); + + if (result.getExitCode() != 0) { + LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", address.getAddress().toLowerCase(), result.getExitCode())); + } + if (LOGGER.isDebugEnabled()) { LOGGER.debug("multipath flush output: " + result.getResult()); LOGGER.debug(String.format("disconnectPhysicalDisk(volumePath,pool) called with args (%s, %s) COMPLETE [rc=%s]", volumePath, pool.getUuid(), result.getResult())); } - return true; + + return (result.getExitCode() == 0); } @Override @@ -247,11 +253,14 @@ public boolean disconnectPhysicalDiskByPath(String localPath) { for (String oui: SUPPORTED_OUI_LIST) { if (multipathName.length() > 1 && multipathName.substring(2).startsWith(oui)) { ScriptResult result = runScript(disconnectScript, 60000L, multipathName); + if (result.getExitCode() != 0) { + LOGGER.warn(String.format("Disconnect failed for path [%s] with return code [%s]", multipathName, result.getExitCode())); + } if (LOGGER.isDebugEnabled()) { LOGGER.debug("multipath flush output: " + result.getResult()); LOGGER.debug(String.format("disconnectPhysicalDiskByPath(localPath) called with args (%s) COMPLETE [rc=%s]", localPath, result.getExitCode())); } - return true; + return (result.getExitCode() == 0); } } } From 89d7a2cfa3493b324ad96616ba3520f3b49ba045 Mon Sep 17 00:00:00 2001 From: Rene Glover Date: Fri, 21 Jun 2024 09:55:33 -0500 Subject: [PATCH 39/47] Update engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java Co-authored-by: Suresh Kumar Anaparti --- .../storage/motion/StorageSystemDataMotionStrategy.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index bf09536e85a5..1a3469c0bad2 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -935,7 +935,7 @@ private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snap try { volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); - volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); + volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); _volumeDao.persist(volumeVO); tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); From c55dfc954e71d68848f839a012a292da23533941 Mon Sep 17 00:00:00 2001 From: Rene Glover Date: Fri, 21 Jun 2024 10:00:13 -0500 Subject: [PATCH 40/47] Update framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java Co-authored-by: Suresh Kumar Anaparti --- .../spring/module/model/impl/DefaultModuleDefinitionSet.java | 1 - 1 file changed, 1 deletion(-) diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java index 7c73a27f71f8..cda07c9dee6a 100644 --- a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java @@ -103,7 +103,6 @@ public void with(ModuleDefinition def, Stack parents) { try { if (context == null) { log.warn(String.format("Application context not found for module definition [%s]", moduleDefinitionName)); - } else if (context.containsBean("moduleStartup")) { Runnable runnable = context.getBean("moduleStartup", Runnable.class); log.info(String.format("Starting module [%s].", moduleDefinitionName)); From 42780e76163f72e7663b78945e74e62325fb4158 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 21 Jun 2024 15:02:36 +0000 Subject: [PATCH 41/47] address PR comments from @sureshanaparti --- .../motion/StorageSystemDataMotionStrategy.java | 16 ++++++++-------- .../kvm/storage/FiberChannelAdapter.java | 3 --- .../kvm/storage/MultipathSCSIAdapterBase.java | 8 -------- 3 files changed, 8 insertions(+), 19 deletions(-) diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index bf09536e85a5..ff211c0864d5 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -931,18 +931,18 @@ private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snapshotInfo) { VolumeInfo tempVolumeInfo = null; - VolumeVO volumeVO = null; + VolumeVO tempVolumeVO = null; try { - volumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", + tempVolumeVO = new VolumeVO(Volume.Type.DATADISK, snapshotInfo.getName() + "_" + System.currentTimeMillis() + ".TMP", snapshotInfo.getDataCenterId(), snapshotInfo.getDomainId(), snapshotInfo.getAccountId(), 0, ProvisioningType.THIN, snapshotInfo.getSize(), 0L, 0L, ""); - volumeVO.setPoolId(snapshotInfo.getDataStore().getId()); - _volumeDao.persist(volumeVO); - tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + tempVolumeVO.setPoolId(snapshotInfo.getDataStore().getId()); + _volumeDao.persist(tempVolumeVO); + tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId()); if (snapshotInfo.getDataStore().getDriver().canCopy(snapshotInfo, tempVolumeInfo)) { snapshotInfo.getDataStore().getDriver().copyAsync(snapshotInfo, tempVolumeInfo, null, null); // refresh volume info as data could have changed - tempVolumeInfo = this._volFactory.getVolume(volumeVO.getId()); + tempVolumeInfo = this._volFactory.getVolume(tempVolumeVO.getId()); } else { throw new CloudRuntimeException("Storage driver indicated it could create a volume from the snapshot but rejected the subsequent request to do so"); } @@ -954,8 +954,8 @@ private VolumeInfo createTemporaryVolumeCopyOfSnapshotAdaptive(SnapshotInfo snap } // cleanup temporary volume - if (volumeVO != null) { - _volumeDao.remove(volumeVO.getId()); + if (tempVolumeVO != null) { + _volumeDao.remove(tempVolumeVO.getId()); } } catch (Throwable e2) { LOGGER.warn("Failed to delete temporary volume created for copy", e2); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java index 1bc96dd396e3..d5192bfdb718 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/FiberChannelAdapter.java @@ -41,9 +41,6 @@ public FiberChannelAdapter() { if (hostname.indexOf(".") > 0) { hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain } - if (hostname.indexOf(".") > 0) { - hostname = hostname.substring(0, hostname.indexOf(".")); // strip off domain - } hostnameFq = inetAddress.getCanonicalHostName(); // fully qualified hostname LOGGER.info("Loaded FiberChannelAdapter for StorageLayer on host [" + hostname + "]"); } catch (UnknownHostException e) { diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 23910f1a214f..5bcb6e48d974 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -364,14 +364,6 @@ public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMSt LOGGER.debug("Starting COPY from source path " + srcFile.getFileName() + " to target volume path: " + destDisk.getPath()); ScriptResult result = runScript(copyScript, timeout, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()); - /**Script script = new Script( - String.format("%s %s %s %s", copyScript, destDisk.getFormat().toString().toLowerCase(), srcFile.getFileName(), destFile.getFileName()), - Duration.millis(timeout), - LOGGER); - - script.execute(); - int rc = script.getExitValue(); - */ int rc = result.getExitCode(); if (rc != 0) { throw new CloudRuntimeException("Failed to convert from " + srcFile.getFileName() + " to " + destFile.getFileName() + " the error was: " + rc + " - " + result.getResult()); From 6ff500dfc3d2507823ba3f8e4ddf99471131a1a7 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 20 Dec 2024 18:10:46 -0600 Subject: [PATCH 42/47] fiberchannel and various small fixes for 4.19.2 --- .../api/storage/PrimaryDataStoreDriver.java | 16 ++++++ .../StorageSystemDataMotionStrategy.java | 36 ++++++++++--- .../storage/volume/VolumeServiceImpl.java | 4 +- .../CloudStackExtendedLifeCycle.java | 9 +++- .../lifecycle/registry/RegistryLifecycle.java | 13 +++-- .../hypervisor/kvm/resource/LibvirtVMDef.java | 5 +- ...rtGetUnmanagedInstancesCommandWrapper.java | 5 +- .../kvm/storage/KVMStorageProcessor.java | 21 +++++--- .../kvm/storage/MultipathSCSIAdapterBase.java | 24 +++++++++ .../KubernetesClusterActionWorker.java | 5 ++ .../cluster/utils/KubernetesClusterUtil.java | 2 +- .../adapter/primera/PrimeraAdapter.java | 51 +++++++++++++++---- scripts/storage/multipath/cleanStaleMaps.sh | 10 +++- scripts/storage/multipath/disconnectVolume.sh | 3 ++ .../com/cloud/user/AccountManagerImpl.java | 6 ++- .../java/com/cloud/vm/UserVmManagerImpl.java | 8 ++- .../LocalNfsSecondaryStorageResource.java | 4 +- 17 files changed, 184 insertions(+), 38 deletions(-) diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index 2c7d3c602783..dbe67e6cca55 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -157,4 +157,20 @@ default boolean volumesRequireGrantAccessWhenUsed() { default boolean zoneWideVolumesAvailableWithoutClusterMotion() { return false; } + + /** + * This method returns the actual size required on the pool for a volume. + * + * @param volumeSize + * Size of volume to be created on the store + * @param templateSize + * Size of template, if any, which will be used to create the volume + * @param isEncryptionRequired + * true if volume is encrypted + * + * @return the size required on the pool for the volume + */ + default long getVolumeSizeRequiredOnPool(long volumeSize, Long templateSize, boolean isEncryptionRequired) { + return volumeSize; + } } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 4dfbfbc40cdf..a4888e2e45e6 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -39,6 +39,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; @@ -104,6 +105,7 @@ import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.MigrationOptions; +import com.cloud.storage.ScopeType; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; @@ -919,11 +921,17 @@ private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, HostVO hostVO; - if (srcStoragePoolVO.getClusterId() != null) { - hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); - } - else { - hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false); + // if either source or destination is a local storage pool, the migration MUST be performed on that host + if (ScopeType.HOST.equals(srcVolumeInfo.getDataStore().getScope().getScopeType())) { + hostVO = _hostDao.findById(srcVolumeInfo.getDataStore().getScope().getScopeId()); + } else if (ScopeType.HOST.equals(destVolumeInfo.getDataStore().getScope().getScopeType())) { + hostVO = _hostDao.findById(destVolumeInfo.getDataStore().getScope().getScopeId()); + } else { + if (srcStoragePoolVO.getClusterId() != null) { + hostVO = getHostInCluster(srcStoragePoolVO.getClusterId()); + } else { + hostVO = getHost(destVolumeInfo.getDataCenterId(), HypervisorType.KVM, false); + } } return hostVO; @@ -1524,6 +1532,16 @@ private void handleCreateVolumeFromTemplateBothOnStorageSystem(TemplateInfo temp verifyFormat(templateInfo.getFormat()); } + // this blurb handles the case where the storage system can clone a volume from a template + String canCloneVolumeFromTemplate = templateInfo.getDataStore().getDriver().getCapabilities().get("CAN_CLONE_VOLUME_FROM_TEMPLATE"); + if (canCloneVolumeFromTemplate != null && canCloneVolumeFromTemplate.toLowerCase().equals("true")) { + DataStoreDriver driver = templateInfo.getDataStore().getDriver(); + driver.createAsync(volumeInfo.getDataStore(), volumeInfo, null); + volumeInfo = _volumeDataFactory.getVolume(volumeInfo.getId(), volumeInfo.getDataStore()); + driver.copyAsync(templateInfo, volumeInfo, null); + return; + } + HostVO hostVO = null; final boolean computeClusterSupportsVolumeClone; @@ -1559,6 +1577,8 @@ else if (volumeInfo.getFormat() == ImageFormat.OVA) { } } + + VolumeDetailVO volumeDetail = new VolumeDetailVO(volumeInfo.getId(), "cloneOfTemplate", String.valueOf(templateInfo.getId()), @@ -1631,7 +1651,7 @@ else if (volumeInfo.getFormat() == ImageFormat.OVA) { errMsg = "Create volume from template failed: " + ex.getMessage(); } - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { if (copyCmdAnswer == null) { @@ -1902,7 +1922,7 @@ private void deleteVolumeFromSnapshot(SnapshotInfo snapshotInfo) { } } catch (Throwable e) { - LOGGER.warn("Failed to clean up temporary volume created for copy from a snapshot, transction will not be failed but an adminstrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e); + LOGGER.warn("Failed to clean up temporary volume created for copy from a snapshot, transaction will not be failed but an administrator should clean this up: " + snapshotInfo.getUuid() + " - " + snapshotInfo.getPath(), e); } } @@ -2651,7 +2671,7 @@ private void handleCreateTemplateFromManagedVolume(VolumeInfo volumeInfo, Templa catch (Exception ex) { errMsg = ex.getMessage(); - throw new CloudRuntimeException(errMsg); + throw new CloudRuntimeException(errMsg, ex); } finally { if (copyCmdAnswer == null) { diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index a47cb41a3237..8edf391ac7e3 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1033,7 +1033,9 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + StorageAccessException e2 = new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + e2.initCause(e); + throw e; } templateOnPrimary.processEvent(Event.CopyingRequested); diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java index b0c1dcc0760e..c35360e33787 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java @@ -71,7 +71,11 @@ public void startBeans() { with(new WithComponentLifeCycle() { @Override public void with(ComponentLifecycle lifecycle) { - lifecycle.start(); + try { + lifecycle.start(); + } catch (Throwable e) { + log.warn("Unable to start component: " + lifecycle.getName(), e); + } if (lifecycle instanceof ManagementBean) { ManagementBean mbean = (ManagementBean)lifecycle; @@ -115,6 +119,9 @@ public void with(ComponentLifecycle lifecycle) { } catch (ConfigurationException e) { log.error("Failed to configure " + lifecycle.getName(), e); throw new CloudRuntimeException(e); + } catch (Throwable e) { + log.error("Failed to configure " + lifecycle.getName(), e); + throw new CloudRuntimeException(e); } } }); diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java index 43efd8461840..c82ef556a188 100644 --- a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java @@ -108,10 +108,15 @@ public void start() { while (iter.hasNext()) { Object next = iter.next(); - if (registry.register(next)) { - log.debug("Registered " + next); - } else { - iter.remove(); + try { + if (registry.register(next)) { + log.debug("Registered " + next); + } else { + log.warn("Bean registration failed for " + next.toString()); + iter.remove(); + } + } catch (Throwable e) { + log.warn("Bean registration attempt resulted in an exception for " + next.toString(), e); } } } diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java index cfd72c28b5af..0abf8e4100b0 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtVMDef.java @@ -184,6 +184,7 @@ public String toString() { guestDef.append("Apache Software Foundation\n"); guestDef.append("CloudStack " + _type.toString() + " Hypervisor\n"); guestDef.append("" + _uuid + "\n"); + guestDef.append("" + _uuid + "\n"); guestDef.append("\n"); guestDef.append("\n"); @@ -222,7 +223,9 @@ public String toString() { guestDef.append("\n"); } } - guestDef.append("\n"); + if (_arch == null || !_arch.equals("aarch64")) { + guestDef.append("\n"); + } guestDef.append("\n"); if (iothreads) { guestDef.append(String.format("%s", NUMBER_OF_IOTHREADS)); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java index 65de4f6d3105..ea1e160466b8 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtGetUnmanagedInstancesCommandWrapper.java @@ -124,7 +124,10 @@ private UnmanagedInstanceTO getUnmanagedInstance(LibvirtComputingResource libvir instance.setName(domain.getName()); instance.setCpuCores((int) LibvirtComputingResource.countDomainRunningVcpus(domain)); - instance.setCpuSpeed(parser.getCpuTuneDef().getShares()/instance.getCpuCores()); + + if (parser.getCpuTuneDef() != null && instance.getCpuCores() != null) { + instance.setCpuSpeed(parser.getCpuTuneDef().getShares()/instance.getCpuCores()); + } if (parser.getCpuModeDef() != null) { instance.setCpuCoresPerSocket(parser.getCpuModeDef().getCoresPerSocket()); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 29d36b26417b..e860c952fe20 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -270,6 +270,7 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); @@ -413,7 +414,7 @@ public Answer cloneVolumeFromBaseTemplate(final CopyCommand cmd) { if (primaryPool.getType() == StoragePoolType.CLVM) { templatePath = ((NfsTO)imageStore).getUrl() + File.separator + templatePath; vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), volume.getSize(), cmd.getWaitInMillSeconds()); - } if (primaryPool.getType() == StoragePoolType.PowerFlex) { + } if (primaryPool.getType() == StoragePoolType.PowerFlex || primaryPool.getType() == StoragePoolType.FiberChannel) { Map details = primaryStore.getDetails(); String path = derivePath(primaryStore, destData, details); @@ -764,15 +765,19 @@ else if (srcData instanceof SnapshotObjectTO) { KVMStoragePool secondaryStorage = null; + String path = null; try { // look for options indicating an overridden path or IQN. Used when snapshots have to be // temporarily copied on the manaaged storage device before the actual copy to target object Map details = cmd.getOptions(); - String path = details != null ? details.get(DiskTO.PATH) : null; + path = details != null ? details.get(DiskTO.PATH) : null; if (path == null) { path = details != null ? details.get(DiskTO.IQN) : null; if (path == null) { - new CloudRuntimeException("The 'path' or 'iqn' field must be specified."); + path = srcData.getPath(); + if (path == null) { + new CloudRuntimeException("The 'path' or 'iqn' field must be specified."); + } } } @@ -835,8 +840,6 @@ else if (srcData instanceof SnapshotObjectTO) { loc.addFormat(info); loc.save(); - storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); - TemplateObjectTO newTemplate = new TemplateObjectTO(); newTemplate.setPath(templateFolder + File.separator + templateName + ".qcow2"); @@ -856,6 +859,10 @@ else if (srcData instanceof SnapshotObjectTO) { return new CopyCmdAnswer(ex.toString()); } finally { + if (path != null) { + storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path); + } + if (secondaryStorage != null) { secondaryStorage.delete(); } @@ -1031,7 +1038,9 @@ public Answer backupSnapshot(final CopyCommand cmd) { command.add(NAME_OPTION, snapshotName); command.add("-p", snapshotDestPath); - descName = UUID.randomUUID().toString(); + if (isCreatedFromVmSnapshot) { + descName = UUID.randomUUID().toString(); + } command.add("-t", descName); final String result = command.execute(); diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java index 5bcb6e48d974..7db77725527c 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java @@ -160,6 +160,13 @@ private KVMPhysicalDisk getPhysicalDisk(AddressInfo address, KVMStoragePool pool KVMPhysicalDisk disk = new KVMPhysicalDisk(address.getPath(), address.toString(), pool); disk.setFormat(QemuImg.PhysicalDiskFormat.RAW); + // validate we have a connection, if not we need to connect first. + if (!isConnected(address.getPath())) { + if (!connectPhysicalDisk(address, pool, null)) { + throw new CloudRuntimeException("Unable to connect to volume " + address.getPath()); + } + } + long diskSize = getPhysicalDiskSize(address.getPath()); disk.setSize(diskSize); disk.setVirtualSize(diskSize); @@ -197,6 +204,10 @@ public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) { // validate we have a connection id - we can't proceed without that if (address.getConnectionId() == null) { LOGGER.error("Unable to connect volume with address [" + address.getPath() + "] of the storage pool: " + pool.getUuid() + " - connection id is not set in provided path"); @@ -508,6 +519,19 @@ boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, l return false; } + boolean isConnected(String path) { + // run a command to test if this is a binary device at this path + Script blockTest = new Script("/bin/test", LOGGER); + blockTest.add("-b", path); + blockTest.execute(); + int rc = blockTest.getExitValue(); + if (rc == 0) { + return true; + } + return false; + } + + long getPhysicalDiskSize(String diskPath) { if (StringUtils.isEmpty(diskPath)) { return 0; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java index 199f6da90d29..dda5c3f79fcd 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/actionworkers/KubernetesClusterActionWorker.java @@ -362,6 +362,11 @@ protected IpAddress getVpcTierKubernetesPublicIp(Network network) { IpAddress address = ipAddressDao.findByUuid(detailsVO.getValue()); if (address == null || network.getVpcId() != address.getVpcId()) { LOGGER.warn(String.format("Public IP with ID: %s linked to the Kubernetes cluster: %s is not usable", detailsVO.getValue(), kubernetesCluster.getName())); + if (address == null) { + LOGGER.warn(String.format("Public IP with ID: %s was not found by uuid", detailsVO.getValue())); + } else { + LOGGER.warn(String.format("Public IP with ID: %s was associated with vpc %d instead of %d", detailsVO.getValue(), address.getVpcId().longValue(), network.getVpcId().longValue())); + } return null; } return address; diff --git a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java index e1210a607e67..9ede7c0f8301 100644 --- a/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java +++ b/plugins/integrations/kubernetes-service/src/main/java/com/cloud/kubernetes/cluster/utils/KubernetesClusterUtil.java @@ -192,7 +192,7 @@ public static String getKubernetesClusterConfig(final KubernetesCluster kubernet while (System.currentTimeMillis() < timeoutTime) { try { Pair result = SshHelper.sshExecute(ipAddress, port, user, - sshKeyFile, null, "sudo cat /etc/kubernetes/admin.conf", + sshKeyFile, null, "sudo cat /etc/kubernetes/user.conf 2>/dev/null || sudo cat /etc/kubernetes/admin.conf", 10000, 10000, 10000); if (result.first() && StringUtils.isNotEmpty(result.second())) { diff --git a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java index 1fdc92feedc4..3e37b2efd9a7 100644 --- a/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java +++ b/plugins/storage/volume/primera/src/main/java/org/apache/cloudstack/storage/datastore/adapter/primera/PrimeraAdapter.java @@ -145,16 +145,18 @@ public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterData } // determine volume type based on offering - // THIN: tpvv=true, reduce=false - // SPARSE: tpvv=true, reduce=true - // THICK: tpvv=false, tpZeroFill=true (not supported) + // tpvv -- thin provisioned virtual volume (no deduplication) + // reduce -- thin provisioned virtual volume (with duplication and compression, also known as DECO) + // these are the only choices with newer Primera devices + // we will use THIN for the deduplicated/compressed type and SPARSE for thin-only without dedup/compress + // note: DECO/reduce type must be at least 16GB in size if (diskOffering != null) { if (diskOffering.getType() == ProvisioningType.THIN) { - request.setTpvv(true); - request.setReduce(false); - } else if (diskOffering.getType() == ProvisioningType.SPARSE) { request.setTpvv(false); request.setReduce(true); + } else if (diskOffering.getType() == ProvisioningType.SPARSE) { + request.setTpvv(true); + request.setReduce(false); } else if (diskOffering.getType() == ProvisioningType.FAT) { throw new RuntimeException("This storage provider does not support FAT provisioned volumes"); } @@ -165,8 +167,16 @@ public ProviderVolume create(ProviderAdapterContext context, ProviderAdapterData } } else { // default to deduplicated volume - request.setReduce(true); request.setTpvv(false); + request.setReduce(true); + } + + if (request.getReduce() == true) { + // check if sizeMiB is less than 16GB adjust up to 16GB. The AdaptiveDatastoreDriver will automatically + // update this on the cloudstack side to match + if (request.getSizeMiB() < 16 * 1024) { + request.setSizeMiB(16 * 1024); + } } request.setComment(ProviderVolumeNamer.generateObjectComment(context, dataIn)); @@ -184,8 +194,11 @@ public String attach(ProviderAdapterContext context, ProviderAdapterDataObject d if (host == null) { throw new RuntimeException("Unable to find host " + hostname + " on storage provider"); } - request.setHostname(host.getName()); + // check if we already have a vlun for requested host + Integer vlun = hasVlun(hostname, hostname); + if (vlun == null) { + request.setHostname(host.getName()); request.setVolumeName(dataIn.getExternalName()); request.setAutoLun(true); // auto-lun returned here: Location: /api/v1/vluns/test_vv02,252,mysystem,2:2:4 @@ -197,7 +210,13 @@ public String attach(ProviderAdapterContext context, ProviderAdapterDataObject d if (toks.length <2) { throw new RuntimeException("Attach volume failed with invalid location response to vlun add command on storage provider. Provided location: " + location); } - return toks[1]; + try { + vlun = Integer.parseInt(toks[1]); + } catch (NumberFormatException e) { + throw new RuntimeException("VLUN attach request succeeded but the VLUN value is not a valid number: " + toks[1]); + } + } + return vlun.toString(); } /** @@ -232,6 +251,20 @@ public void detach(ProviderAdapterContext context, ProviderAdapterDataObject req } } + private Integer hasVlun(String externalName, String hostname) { + PrimeraVlunList list = getVluns(externalName); + if (list != null && list.getMembers().size() > 0) { + for (PrimeraVlun vlun: list.getMembers()) { + if (hostname != null) { + if (vlun.getHostname().equals(hostname) || vlun.getHostname().equals(hostname.split("\\.")[0])) { + return vlun.getLun(); + } + } + } + } + return null; + } + public void removeVlun(String name, Integer lunid, String hostString) { // hostString can be a hostname OR "set:". It is stored this way // in the appliance and returned as the vlun's name/string. diff --git a/scripts/storage/multipath/cleanStaleMaps.sh b/scripts/storage/multipath/cleanStaleMaps.sh index 90b9bef5a8de..c1ded42943c6 100755 --- a/scripts/storage/multipath/cleanStaleMaps.sh +++ b/scripts/storage/multipath/cleanStaleMaps.sh @@ -22,10 +22,18 @@ # ############################################################################################# +SCRIPT_NAME=$(basename "$0") + +if [[ $(pgrep -f ${SCRIPT_NAME}) != "$$" ]]; then + echo "Another instance of ${SCRIPT_NAME} is already running! Exiting" + exit +fi + + cd $(dirname $0) for WWID in $(multipathd list maps status | awk '{ if ($4 == 0) { print substr($1,2); }}'); do - ./removeVolume.sh ${WWID} + ./disconnectVolume.sh ${WWID} done exit 0 diff --git a/scripts/storage/multipath/disconnectVolume.sh b/scripts/storage/multipath/disconnectVolume.sh index 067e561f8a33..f894076927f1 100755 --- a/scripts/storage/multipath/disconnectVolume.sh +++ b/scripts/storage/multipath/disconnectVolume.sh @@ -66,6 +66,9 @@ fi logger -t CS_SCSI_VOL_REMOVE "${WWID} successfully purged from multipath along with slave devices" +# Added to give time for the event to be fired to the server +sleep 10 + echo "$(date): ${WWID} removed" exit 0 diff --git a/server/src/main/java/com/cloud/user/AccountManagerImpl.java b/server/src/main/java/com/cloud/user/AccountManagerImpl.java index 3eed429ed215..0488bab98329 100644 --- a/server/src/main/java/com/cloud/user/AccountManagerImpl.java +++ b/server/src/main/java/com/cloud/user/AccountManagerImpl.java @@ -690,7 +690,11 @@ public void checkAccess(Account caller, AccessType accessType, boolean sameOwner for (SecurityChecker checker : _securityCheckers) { if (checker.checkAccess(caller, entity, accessType, apiName)) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Access to " + entity + " granted to " + caller + " by " + checker.getName()); + User user = CallContext.current().getCallingUser(); + String userName = ""; + if (user != null) + userName = user.getUsername(); + s_logger.debug("Access to " + entity + " granted to " + caller + " by " + checker.getName() + " on behalf of user " + userName); } granted = true; break; diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index b65685474b49..78be4642584d 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -129,8 +129,8 @@ import org.apache.cloudstack.userdata.UserDataManager; import org.apache.cloudstack.utils.bytescale.ByteScaleUtils; import org.apache.cloudstack.utils.security.ParserUtils; -import org.apache.cloudstack.vm.UnmanagedVMsManager; import org.apache.cloudstack.vm.schedule.VMScheduleManager; +import org.apache.cloudstack.vm.UnmanagedVMsManager; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.math.NumberUtils; @@ -4400,7 +4400,11 @@ protected long configureCustomRootDiskSize(Map customParameters, } if (customParameters.containsKey(VmDetailConstants.ROOT_DISK_SIZE)) { - Long rootDiskSize = rootDiskSizeCustomParam * GiB_TO_BYTES; + Long rootDiskSize = NumbersUtil.parseLong(customParameters.get(VmDetailConstants.ROOT_DISK_SIZE), -1); + if (rootDiskSize <= 0) { + throw new InvalidParameterValueException("Root disk size should be a positive number."); + } + rootDiskSize = rootDiskSizeCustomParam * GiB_TO_BYTES; _volumeService.validateVolumeSizeInBytes(rootDiskSize); return rootDiskSize; } else { diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java index 6f189ef5f3c6..08270086e8eb 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/LocalNfsSecondaryStorageResource.java @@ -77,14 +77,14 @@ protected void mount(String localRootPath, String remoteDevice, URI uri, String // Change permissions for the mountpoint - seems to bypass authentication Script script = new Script(true, "chmod", _timeout, s_logger); - script.add("777", localRootPath); + script.add("1777", localRootPath); String result = script.execute(); if (result != null) { String errMsg = "Unable to set permissions for " + localRootPath + " due to " + result; s_logger.error(errMsg); throw new CloudRuntimeException(errMsg); } - s_logger.debug("Successfully set 777 permission for " + localRootPath); + s_logger.debug("Successfully set 1777 permission for " + localRootPath); // XXX: Adding the check for creation of snapshots dir here. Might have // to move it somewhere more logical later. From a55c3e515a080e133a126cb7e5d2b5692764f318 Mon Sep 17 00:00:00 2001 From: Rene Glover Date: Mon, 27 Jan 2025 09:11:42 -0600 Subject: [PATCH 43/47] Update VolumeServiceImpl.java --- .../apache/cloudstack/storage/volume/VolumeServiceImpl.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index d32604146f7c..f9807ae31cf4 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1035,9 +1035,7 @@ private void copyTemplateToManagedTemplateVolume(TemplateInfo srcTemplateInfo, T try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - StorageAccessException e2 = new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); - e2.initCause(e); - throw e; + throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId(), e); } templateOnPrimary.processEvent(Event.CopyingRequested); From 3914e50e30b3be8718e16b2e9b76958d1ba0ab09 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 27 Jan 2025 10:02:42 -0600 Subject: [PATCH 44/47] updates for StorageAccessException --- .../java/com/cloud/exception/StorageAccessException.java | 4 ++-- .../cloudstack/engine/orchestration/VolumeOrchestrator.java | 6 +++--- .../apache/cloudstack/storage/volume/VolumeServiceImpl.java | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/src/main/java/com/cloud/exception/StorageAccessException.java b/api/src/main/java/com/cloud/exception/StorageAccessException.java index eefbcf5518a3..d54d77d66f1e 100644 --- a/api/src/main/java/com/cloud/exception/StorageAccessException.java +++ b/api/src/main/java/com/cloud/exception/StorageAccessException.java @@ -26,7 +26,7 @@ public class StorageAccessException extends RuntimeException { private static final long serialVersionUID = SerialVersionUID.StorageAccessException; - public StorageAccessException(String message) { - super(message); + public StorageAccessException(String message, Exception causer) { + super(message, causer); } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index b461e50bf127..a08e74fc13cb 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -1827,7 +1827,7 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro try { volService.grantAccess(volFactory.getVolume(newVol.getId()), host, destPool); } catch (Exception e) { - throw new StorageAccessException(String.format("Unable to grant access to the volume [%s] on host [%s].", newVolToString, host)); + throw new StorageAccessException(String.format("Unable to grant access to the volume [%s] on host [%s].", newVolToString, host), e); } } @@ -1867,7 +1867,7 @@ protected void grantVolumeAccessToHostIfNeeded(PrimaryDataStore volumeStore, lon try { volService.grantAccess(volFactory.getVolume(volumeId), host, volumeStore); } catch (Exception e) { - throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host)); + throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host), e); } } @@ -1915,7 +1915,7 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto try { volService.grantAccess(volFactory.getVolume(vol.getId()), host, store); } catch (Exception e) { - throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host)); + throw new StorageAccessException(String.format("Unable to grant access to volume [%s] on host [%s].", volToString, host), e); } } else { grantVolumeAccessToHostIfNeeded(store, vol.getId(), host, volToString); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index f9807ae31cf4..aba24b6956b7 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1161,7 +1161,7 @@ private void createManagedVolumeCopyManagedTemplateAsync(VolumeInfo volumeInfo, try { grantAccess(srcTemplateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException("Unable to grant access to src template: " + srcTemplateOnPrimary.getId() + " on host: " + destHost.getId(), e); } _volumeDetailsDao.addDetail(volumeInfo.getId(), volumeDetailKey, String.valueOf(templatePoolRef.getId()), false); @@ -1406,7 +1406,7 @@ public TemplateInfo createManagedStorageTemplate(long srcTemplateId, long destDa try { grantAccess(templateOnPrimary, destHost, destPrimaryDataStore); } catch (Exception e) { - throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId()); + throw new StorageAccessException("Unable to grant access to template: " + templateOnPrimary.getId() + " on host: " + destHost.getId(), e); } templateOnPrimary.processEvent(Event.CopyingRequested); From 81a8b40c0f7710ae366d8a917e9f6e423cae4f6b Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Tue, 28 Jan 2025 08:59:22 -0600 Subject: [PATCH 45/47] * null path in KVMStorageProcessor * Code cleanup tabs/newlines --- .../api/storage/PrimaryDataStoreDriver.java | 16 ---------------- .../motion/StorageSystemDataMotionStrategy.java | 3 --- .../kvm/storage/KVMStorageProcessor.java | 7 ++++++- 3 files changed, 6 insertions(+), 20 deletions(-) diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java index d52c656f6dbc..0e70c7b528dd 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreDriver.java @@ -183,20 +183,4 @@ default boolean volumesRequireGrantAccessWhenUsed() { default boolean zoneWideVolumesAvailableWithoutClusterMotion() { return false; } - - /** - * This method returns the actual size required on the pool for a volume. - * - * @param volumeSize - * Size of volume to be created on the store - * @param templateSize - * Size of template, if any, which will be used to create the volume - * @param isEncryptionRequired - * true if volume is encrypted - * - * @return the size required on the pool for the volume - */ - default long getVolumeSizeRequiredOnPool(long volumeSize, Long templateSize, boolean isEncryptionRequired) { - return volumeSize; - } } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java index 0c6cc7459142..f2d54823a0cf 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/StorageSystemDataMotionStrategy.java @@ -924,7 +924,6 @@ private HostVO getHostOnWhichToExecuteMigrationCommand(VolumeInfo srcVolumeInfo, HostVO hostVO; // if either source or destination is a HOST-scoped storage pool, the migration MUST be performed on that host - if (ScopeType.HOST.equals(srcVolumeInfo.getDataStore().getScope().getScopeType())) { hostVO = _hostDao.findById(srcVolumeInfo.getDataStore().getScope().getScopeId()); } else if (ScopeType.HOST.equals(destVolumeInfo.getDataStore().getScope().getScopeType())) { @@ -1580,8 +1579,6 @@ else if (volumeInfo.getFormat() == ImageFormat.OVA) { } } - - VolumeDetailVO volumeDetail = new VolumeDetailVO(volumeInfo.getId(), "cloneOfTemplate", String.valueOf(templateInfo.getId()), diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 5a67f1569395..05c2d1fac88d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -272,7 +272,11 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { String path = derivePath(primaryStore, destData, details); - if (!storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { + if (path == null) { + path = destTempl.getUuid(); + } + + if (path != null && !storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } @@ -338,6 +342,7 @@ private String derivePath(PrimaryDataStoreTO primaryStore, DataTO destData, Map< } else { path = details != null ? details.get("managedStoreTarget") : null; } + return path; } From f066100de9e14a909be95091a5b5b0b2b8176ef1 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Fri, 31 Jan 2025 13:51:50 -0600 Subject: [PATCH 46/47] only fail on template connectPhysicalDisk if storage pool type == FiberChannel --- .../cloud/hypervisor/kvm/storage/KVMStorageProcessor.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 05c2d1fac88d..bce743cdd80d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -277,8 +277,12 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { } if (path != null && !storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { - s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); - return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { + s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + } else { + s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + } } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds()); From 1dfadad8e183fe45d269aa4a0dc4808d9fdb1c97 Mon Sep 17 00:00:00 2001 From: "Glover, Rene (rg9975)" Date: Mon, 3 Feb 2025 08:10:23 -0600 Subject: [PATCH 47/47] Revert "only fail on template connectPhysicalDisk if storage pool type == FiberChannel" This reverts commit f066100de9e14a909be95091a5b5b0b2b8176ef1. --- .../cloud/hypervisor/kvm/storage/KVMStorageProcessor.java | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index bce743cdd80d..05c2d1fac88d 100644 --- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -277,12 +277,8 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) { } if (path != null && !storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) { - if (primaryStore.getPoolType() == StoragePoolType.FiberChannel) { - s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); - return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); - } else { - s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); - } + s_logger.warn("Failed to connect physical disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); + return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid()); } primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds());