Merge branch '4.20'
diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java
index 5483331..3d398ca 100644
--- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java
@@ -1710,7 +1710,7 @@
                         }
                     }
                 } catch (final Throwable th) {
-                    logger.warn("Caught: ", th);
+                    logger.error("Caught: ", th);
                     answer = new Answer(cmd, false, th.getMessage());
                 }
                 answers[i] = answer;
@@ -1725,7 +1725,7 @@
             try {
                 link.send(response.toBytes());
             } catch (final ClosedChannelException e) {
-                logger.warn("Unable to send response because connection is closed: {}", response);
+                logger.error("Unable to send response because connection is closed: {}", response);
             }
         }
 
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index ba50d5f..7af9b6b 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -184,6 +184,7 @@
 import com.cloud.vm.dao.UserVmCloneSettingDao;
 import com.cloud.vm.dao.UserVmDao;
 import com.cloud.vm.dao.VMInstanceDetailsDao;
+import com.cloud.vm.dao.VMInstanceDao;
 
 public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable {
 
@@ -270,6 +271,8 @@
     ConfigDepot configDepot;
     @Inject
     ConfigurationDao configurationDao;
+    @Inject
+    VMInstanceDao vmInstanceDao;
 
     @Inject
     protected SnapshotHelper snapshotHelper;
@@ -972,9 +975,7 @@
 
         // Create event and update resource count for volumes if vm is a user vm
         if (vm.getType() == VirtualMachine.Type.User) {
-
             Long offeringId = null;
-
             if (!offering.isComputeOnly()) {
                 offeringId = offering.getId();
             }
@@ -1943,14 +1944,18 @@
 
         if (newSize != vol.getSize()) {
             DiskOfferingVO diskOffering = diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId());
-            if (newSize > vol.getSize()) {
-                _resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()),
-                        vol.isDisplay(), newSize - vol.getSize(), diskOffering);
-                _resourceLimitMgr.incrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
-                        newSize - vol.getSize(), diskOffering);
-            } else {
-                _resourceLimitMgr.decrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
-                        vol.getSize() - newSize, diskOffering);
+            VMInstanceVO vm = vol.getInstanceId() != null ? vmInstanceDao.findById(vol.getInstanceId()) : null;
+            if (vm == null || vm.getType() == VirtualMachine.Type.User) {
+                // Update resource count for user vm volumes when volume is attached
+                if (newSize > vol.getSize()) {
+                    _resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()),
+                            vol.isDisplay(), newSize - vol.getSize(), diskOffering);
+                    _resourceLimitMgr.incrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
+                            newSize - vol.getSize(), diskOffering);
+                } else {
+                    _resourceLimitMgr.decrementVolumePrimaryStorageResourceCount(vol.getAccountId(), vol.isDisplay(),
+                            vol.getSize() - newSize, diskOffering);
+                }
             }
             vol.setSize(newSize);
             _volsDao.persist(vol);
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index 11d7aa3..a86efeb 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -3473,7 +3473,7 @@
         }
 
         if (vmSpec.getOs().toLowerCase().contains("window")) {
-            isWindowsTemplate =true;
+            isWindowsTemplate = true;
         }
         for (final DiskTO volume : disks) {
             KVMPhysicalDisk physicalDisk = null;
@@ -3592,6 +3592,9 @@
                     disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(), pool.getAuthUserName(),
                             pool.getUuid(), devId, diskBusType, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
                 } else if (pool.getType() == StoragePoolType.PowerFlex) {
+                    if (isWindowsTemplate && isUefiEnabled) {
+                        diskBusTypeData = DiskDef.DiskBus.SATA;
+                    }
                     disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData);
                     if (physicalDisk.getFormat().equals(PhysicalDiskFormat.QCOW2)) {
                         disk.setDiskFormatType(DiskDef.DiskFmtType.QCOW2);
@@ -3622,7 +3625,6 @@
                             disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusType, DiskDef.DiskFmtType.QCOW2);
                         }
                     }
-
                 }
                 pool.customizeLibvirtDiskDef(disk);
             }
@@ -4911,6 +4913,14 @@
                     return token[1];
                 }
             } else if (token.length > 3) {
+                // for powerflex/scaleio, path = /dev/disk/by-id/emc-vol-2202eefc4692120f-540fd8fa00000003
+                if (token.length > 4 && StringUtils.isNotBlank(token[4]) && token[4].startsWith("emc-vol-")) {
+                    final String[] emcVolToken = token[4].split("-");
+                    if (emcVolToken.length == 4) {
+                        return emcVolToken[3];
+                    }
+                }
+
                 // for example, path = /mnt/pool_uuid/disk_path/
                 return token[3];
             }
diff --git a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java
index b2fc033..d2336f3 100644
--- a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java
+++ b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java
@@ -235,7 +235,7 @@
     @Override
     public ListResponse<VolumeMetricsStatsResponse> searchForVolumeMetricsStats(ListVolumesUsageHistoryCmd cmd) {
         Pair<List<VolumeVO>, Integer> volumeList = searchForVolumesInternal(cmd);
-        Map<Long,List<VolumeStatsVO>> volumeStatsList = searchForVolumeMetricsStatsInternal(cmd, volumeList.first());
+        Map<Long, List<VolumeStatsVO>> volumeStatsList = searchForVolumeMetricsStatsInternal(cmd, volumeList.first());
         return createVolumeMetricsStatsResponse(volumeList, volumeStatsList);
     }
 
diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
index d68d34c..20ca292 100644
--- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
+++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
@@ -573,8 +573,8 @@
                     }
                 }
             } else {
-                 logger.debug("No encryption configured for data volume [id: {}, uuid: {}, name: {}]",
-                         volumeInfo.getId(), volumeInfo.getUuid(), volumeInfo.getName());
+                logger.debug("No encryption configured for volume [id: {}, uuid: {}, name: {}]",
+                        volumeInfo.getId(), volumeInfo.getUuid(), volumeInfo.getName());
             }
 
             return answer;
@@ -1592,7 +1592,7 @@
      * @return true if resize is required
      */
     private boolean needsExpansionForEncryptionHeader(long srcSize, long dstSize) {
-        int headerSize = 32<<20; // ensure we have 32MiB for encryption header
+        int headerSize = 32 << 20; // ensure we have 32MiB for encryption header
         return srcSize + headerSize > dstSize;
     }
 
diff --git a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java
index c13ad61..3d7d1cf 100644
--- a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java
+++ b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java
@@ -61,6 +61,15 @@
 public class ScaleIOSDCManagerImpl implements ScaleIOSDCManager, Configurable {
     private Logger logger = LogManager.getLogger(getClass());
 
+    static ConfigKey<Boolean> ConnectOnDemand = new ConfigKey<>("Storage",
+            Boolean.class,
+            "powerflex.connect.on.demand",
+            Boolean.TRUE.toString(),
+            "Connect PowerFlex client on Host when first Volume is mapped to SDC and disconnect when last Volume is unmapped from SDC," +
+                    " otherwise no action (that is connection remains in the same state whichever it is, connected or disconnected).",
+            Boolean.TRUE,
+            ConfigKey.Scope.Zone);
+
     @Inject
     AgentManager agentManager;
     @Inject
diff --git a/scripts/util/keystore-cert-import b/scripts/util/keystore-cert-import
index a7523ca..a9465f2 100755
--- a/scripts/util/keystore-cert-import
+++ b/scripts/util/keystore-cert-import
@@ -122,7 +122,7 @@
     ln -sf /etc/pki/libvirt/private/serverkey.pem /etc/pki/libvirt-vnc/server-key.pem
     cloudstack-setup-agent -s > /dev/null
 
-    QEMU_GROUP=$(sed -n 's/^group=//p' /etc/libvirt/qemu.conf | awk -F'"' '{print $2}' | tail -n1)
+    QEMU_GROUP=$(sed -n 's/^group\s*=//p' /etc/libvirt/qemu.conf | tr -d '"' | tr -d ' ' | tr -d "'" | tail -n1)
     if [ ! -z "${QEMU_GROUP// }" ]; then
       chgrp $QEMU_GROUP /etc/pki/libvirt /etc/pki/libvirt-vnc /etc/pki/CA /etc/pki/libvirt/private /etc/pki/libvirt/servercert.pem /etc/pki/libvirt/private/serverkey.pem /etc/pki/CA/cacert.pem /etc/pki/libvirt-vnc/ca-cert.pem /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
       chmod 750 /etc/pki/libvirt /etc/pki/libvirt-vnc /etc/pki/CA /etc/pki/libvirt/private /etc/pki/libvirt/servercert.pem /etc/pki/libvirt/private/serverkey.pem /etc/pki/CA/cacert.pem /etc/pki/libvirt-vnc/ca-cert.pem /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
diff --git a/server/conf/cloudstack-sudoers.in b/server/conf/cloudstack-sudoers.in
index 7102410..6e79929 100644
--- a/server/conf/cloudstack-sudoers.in
+++ b/server/conf/cloudstack-sudoers.in
@@ -18,7 +18,7 @@
 # The CloudStack management server needs sudo permissions
 # without a password.
 
-Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool, /bin/touch, /bin/find, /bin/df, /bin/ls, /bin/qemu-img
+Cmnd_Alias CLOUDSTACK = /bin/mkdir, /bin/mount, /bin/umount, /bin/cp, /bin/chmod, /usr/bin/keytool, /bin/keytool, /bin/touch, /bin/find, /bin/df, /bin/ls, /bin/qemu-img, /usr/bin/qemu-img
 
 Defaults:@MSUSER@ !requiretty
 
diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java b/server/src/main/java/com/cloud/server/StatsCollector.java
index e82d990..70c95bd 100644
--- a/server/src/main/java/com/cloud/server/StatsCollector.java
+++ b/server/src/main/java/com/cloud/server/StatsCollector.java
@@ -1462,7 +1462,7 @@
                                 for (VmDiskStats vmDiskStat : vmDiskStats) {
                                     VmDiskStatsEntry vmDiskStatEntry = (VmDiskStatsEntry)vmDiskStat;
                                     SearchCriteria<VolumeVO> sc_volume = _volsDao.createSearchCriteria();
-                                    sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStatEntry.getPath());
+                                    sc_volume.addAnd("path", SearchCriteria.Op.LIKE, vmDiskStatEntry.getPath() + "%");
                                     List<VolumeVO> volumes = _volsDao.search(sc_volume, null);
 
                                     if (CollectionUtils.isEmpty(volumes))
diff --git a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java
index e24c6db..a278855 100755
--- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java
+++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManagerImpl.java
@@ -1965,9 +1965,14 @@
         Type snapshotType = getSnapshotType(policyId);
         Account owner = _accountMgr.getAccount(volume.getAccountId());
 
+        ResourceType storeResourceType = ResourceType.secondary_storage;
+        if (!isBackupSnapshotToSecondaryForZone(volume.getDataCenterId()) ||
+                Snapshot.LocationType.PRIMARY.equals(locationType)) {
+            storeResourceType = ResourceType.primary_storage;
+        }
         try {
             _resourceLimitMgr.checkResourceLimit(owner, ResourceType.snapshot);
-            _resourceLimitMgr.checkResourceLimit(owner, ResourceType.secondary_storage, new Long(volume.getSize()).longValue());
+            _resourceLimitMgr.checkResourceLimit(owner, storeResourceType, volume.getSize());
         } catch (ResourceAllocationException e) {
             if (snapshotType != Type.MANUAL) {
                 String msg = String.format("Snapshot resource limit exceeded for account %s. Failed to create recurring snapshots", owner);
@@ -2018,7 +2023,7 @@
         }
         CallContext.current().putContextParameter(Snapshot.class, snapshot.getUuid());
         _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.snapshot);
-        _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.secondary_storage, new Long(volume.getSize()));
+        _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), storeResourceType, volume.getSize());
         return snapshot;
     }
 
diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
index 2607297..cff0b15 100644
--- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
@@ -678,7 +678,7 @@
             "Wait Interval (in seconds) for shared network vm dhcp ip addr fetch for next iteration ", true);
 
     private static final ConfigKey<Integer> VmIpFetchTrialMax = new ConfigKey<Integer>("Advanced", Integer.class, "externaldhcp.vmip.max.retry", "10",
-            "The max number of retrieval times for shared entwork vm dhcp ip fetch, in case of failures", true);
+            "The max number of retrieval times for shared network vm dhcp ip fetch, in case of failures", true);
 
     private static final ConfigKey<Integer> VmIpFetchThreadPoolMax = new ConfigKey<Integer>("Advanced", Integer.class, "externaldhcp.vmipFetch.threadPool.max", "10",
             "number of threads for fetching vms ip address", true);
@@ -2705,7 +2705,7 @@
 
                             if (vmIdAndCount.getRetrievalCount() <= 0) {
                                 vmIdCountMap.remove(nicId);
-                                logger.debug("Vm " + vmId +" nic "+nicId + " count is zero .. removing vm nic from map ");
+                                logger.debug("Vm {} nic {} count is zero .. removing vm nic from map ", vmId, nicId);
 
                                 ActionEventUtils.onActionEvent(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM,
                                         Domain.ROOT_DOMAIN, EventTypes.EVENT_NETWORK_EXTERNAL_DHCP_VM_IPFETCH,
@@ -2714,12 +2714,15 @@
                                 continue;
                             }
 
-
                             UserVm userVm = _vmDao.findById(vmId);
                             VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId);
                             NicVO nicVo = _nicDao.findById(nicId);
-                            NetworkVO network = _networkDao.findById(nicVo.getNetworkId());
+                            if (ObjectUtils.anyNull(userVm, vmInstance, nicVo)) {
+                                logger.warn("Couldn't fetch ip addr, Vm {} or nic {} doesn't exists", vmId, nicId);
+                                continue;
+                            }
 
+                            NetworkVO network = _networkDao.findById(nicVo.getNetworkId());
                             VirtualMachineProfile vmProfile = new VirtualMachineProfileImpl(userVm);
                             VirtualMachine vm = vmProfile.getVirtualMachine();
                             boolean isWindows = _guestOSCategoryDao.findById(_guestOSDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows");
@@ -5984,7 +5987,7 @@
 
                         for (VmDiskStatsEntry vmDiskStat : vmDiskStats) {
                             SearchCriteria<VolumeVO> sc_volume = _volsDao.createSearchCriteria();
-                            sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStat.getPath());
+                            sc_volume.addAnd("path", SearchCriteria.Op.LIKE, vmDiskStat.getPath() + "%");
                             List<VolumeVO> volumes = _volsDao.search(sc_volume, null);
                             if ((volumes == null) || (volumes.size() == 0)) {
                                 break;
diff --git a/test/integration/smoke/test_deploy_vm_root_resize.py b/test/integration/smoke/test_deploy_vm_root_resize.py
index 1ef5d7d..b9d14e5 100644
--- a/test/integration/smoke/test_deploy_vm_root_resize.py
+++ b/test/integration/smoke/test_deploy_vm_root_resize.py
@@ -32,6 +32,7 @@
     RESOURCE_PRIMARY_STORAGE
 from nose.plugins.attrib import attr
 from marvin.sshClient import SshClient
+import math
 import time
 import re
 from marvin.cloudstackAPI import updateTemplate,registerTemplate
@@ -276,6 +277,14 @@
             self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list VM "
                                                         "response")
             rootvolume = list_volume_response[0]
+            list_volume_pool_response = list_storage_pools(
+                self.apiclient,
+                id=rootvolume.storageid
+            )
+            rootvolume_pool = list_volume_pool_response[0]
+            if rootvolume_pool.type.lower() == "powerflex":
+                newrootsize = (int(math.ceil(newrootsize / 8) * 8))
+
             success = False
             if rootvolume is not None and rootvolume.size  == (newrootsize << 30):
                 success = True
diff --git a/test/integration/smoke/test_import_unmanage_volumes.py b/test/integration/smoke/test_import_unmanage_volumes.py
index 9001e97..fc1c558 100644
--- a/test/integration/smoke/test_import_unmanage_volumes.py
+++ b/test/integration/smoke/test_import_unmanage_volumes.py
@@ -26,7 +26,11 @@
                              ServiceOffering,
                              DiskOffering,
                              VirtualMachine)
-from marvin.lib.common import (get_domain, get_zone, get_suitable_test_template)
+from marvin.lib.common import (get_domain,
+                             get_zone,
+                             get_suitable_test_template,
+                             list_volumes,
+                             list_storage_pools)
 
 # Import System modules
 from nose.plugins.attrib import attr
@@ -107,6 +111,22 @@
     def test_01_detach_unmanage_import_volume(self):
         """Test attach/detach/unmanage/import volume
         """
+
+        volumes = list_volumes(
+            self.apiclient,
+            virtualmachineid=self.virtual_machine.id,
+            type='ROOT',
+            listall=True
+        )
+        volume = volumes[0]
+        volume_pool_response = list_storage_pools(
+            self.apiclient,
+            id=volume.storageid
+        )
+        volume_pool = volume_pool_response[0]
+        if volume_pool.type.lower() == "powerflex":
+            self.skipTest("This test is not supported for storage pool type %s on hypervisor KVM" % volume_pool.type)
+
         # Create DATA volume
         volume = Volume.create(
             self.apiclient,
diff --git a/test/integration/smoke/test_over_provisioning.py b/test/integration/smoke/test_over_provisioning.py
index 94e4096..c2b1a5a 100644
--- a/test/integration/smoke/test_over_provisioning.py
+++ b/test/integration/smoke/test_over_provisioning.py
@@ -60,9 +60,10 @@
                             "The environment don't have storage pools required for test")
 
         for pool in storage_pools:
-            if pool.type == "NetworkFilesystem" or pool.type == "VMFS":
+            if pool.type == "NetworkFilesystem" or pool.type == "VMFS" or pool.type == "PowerFlex":
                 break
-        if pool.type != "NetworkFilesystem" and pool.type != "VMFS":
+
+        if pool.type != "NetworkFilesystem" and pool.type != "VMFS" and pool.type != "PowerFlex":
             raise self.skipTest("Storage overprovisioning currently not supported on " + pool.type + " pools")
 
         self.poolId = pool.id
@@ -101,6 +102,9 @@
         """Reset the storage.overprovisioning.factor back to its original value
         @return:
         """
+        if not hasattr(self, 'poolId'):
+            return
+
         storage_pools = StoragePool.list(
                                 self.apiClient,
                                 id = self.poolId
diff --git a/test/integration/smoke/test_restore_vm.py b/test/integration/smoke/test_restore_vm.py
index 3798bef..b961bee 100644
--- a/test/integration/smoke/test_restore_vm.py
+++ b/test/integration/smoke/test_restore_vm.py
@@ -16,10 +16,13 @@
 # under the License.
 """ P1 tests for Scaling up Vm
 """
+
+import math
+
 # Import Local Modules
 from marvin.cloudstackTestCase import cloudstackTestCase
 from marvin.lib.base import (VirtualMachine, Volume, DiskOffering, ServiceOffering, Template)
-from marvin.lib.common import (get_zone, get_domain)
+from marvin.lib.common import (get_zone, get_domain, list_storage_pools)
 from nose.plugins.attrib import attr
 
 _multiprocess_shared_ = True
@@ -78,8 +81,13 @@
         self._cleanup.append(virtual_machine)
 
         old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
+        old_root_vol_pool_res = list_storage_pools(self.apiclient, id=old_root_vol.storageid)
+        old_root_vol_pool = old_root_vol_pool_res[0]
+        expected_old_root_vol_size = self.template_t1.size
+        if old_root_vol_pool.type.lower() == "powerflex":
+            expected_old_root_vol_size = (int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3)
         self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state")
-        self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of volume and template should match")
+        self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size of volume and template should match")
 
         virtual_machine.restore(self.apiclient, self.template_t2.id, expunge=True)
 
@@ -88,8 +96,13 @@
         self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's template after restore is incorrect")
 
         root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0]
+        root_vol_pool_res = list_storage_pools(self.apiclient, id=root_vol.storageid)
+        root_vol_pool = root_vol_pool_res[0]
+        expected_root_vol_size = self.template_t2.size
+        if root_vol_pool.type.lower() == "powerflex":
+            expected_root_vol_size = (int(math.ceil((expected_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3)
         self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state")
-        self.assertEqual(root_vol.size, self.template_t2.size, "Size of volume and template should match")
+        self.assertEqual(root_vol.size, expected_root_vol_size, "Size of volume and template should match")
 
         old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id)
         self.assertEqual(old_root_vol, None, "Old volume should be deleted")
@@ -105,8 +118,13 @@
         self._cleanup.append(virtual_machine)
 
         old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
+        old_root_vol_pool_res = list_storage_pools(self.apiclient, id=old_root_vol.storageid)
+        old_root_vol_pool = old_root_vol_pool_res[0]
+        expected_old_root_vol_size = self.template_t1.size
+        if old_root_vol_pool.type.lower() == "powerflex":
+            expected_old_root_vol_size = (int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3)
         self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state")
-        self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of volume and template should match")
+        self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size of volume and template should match")
 
         virtual_machine.restore(self.apiclient, self.template_t2.id, self.disk_offering.id, expunge=True)
 
@@ -115,9 +133,14 @@
         self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's template after restore is incorrect")
 
         root_vol = Volume.list(self.apiclient, virtualmachineid=restored_vm.id)[0]
+        root_vol_pool_res = list_storage_pools(self.apiclient, id=root_vol.storageid)
+        root_vol_pool = root_vol_pool_res[0]
+        expected_root_vol_size = self.disk_offering.disksize
+        if root_vol_pool.type.lower() == "powerflex":
+            expected_root_vol_size = (int(math.ceil(expected_root_vol_size / 8) * 8))
         self.assertEqual(root_vol.diskofferingid, self.disk_offering.id, "Disk offering id should match")
         self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready state")
-        self.assertEqual(root_vol.size, self.disk_offering.disksize * 1024 * 1024 * 1024,
+        self.assertEqual(root_vol.size, expected_root_vol_size * 1024 * 1024 * 1024,
                          "Size of volume and disk offering should match")
 
         old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id)
@@ -134,8 +157,13 @@
         self._cleanup.append(virtual_machine)
 
         old_root_vol = Volume.list(self.apiclient, virtualmachineid=virtual_machine.id)[0]
+        old_root_vol_pool_res = list_storage_pools(self.apiclient, id=old_root_vol.storageid)
+        old_root_vol_pool = old_root_vol_pool_res[0]
+        expected_old_root_vol_size = self.template_t1.size
+        if old_root_vol_pool.type.lower() == "powerflex":
+            expected_old_root_vol_size = (int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 ** 3)
         self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in Ready state")
-        self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of volume and template should match")
+        self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size of volume and template should match")
 
         virtual_machine.restore(self.apiclient, self.template_t2.id, self.disk_offering.id, rootdisksize=16)
 
diff --git a/test/integration/smoke/test_sharedfs_lifecycle.py b/test/integration/smoke/test_sharedfs_lifecycle.py
index f4b2c2f..4daf0d7 100644
--- a/test/integration/smoke/test_sharedfs_lifecycle.py
+++ b/test/integration/smoke/test_sharedfs_lifecycle.py
@@ -38,7 +38,8 @@
                              )
 from marvin.lib.common import (get_domain,
                                get_zone,
-                               get_template)
+                               get_template,
+                               list_storage_pools)
 from marvin.codes import FAILED
 
 from marvin.lib.decoratorGenerators import skipTestIf
@@ -258,15 +259,23 @@
     def test_resize_shared_fs(self):
         """Resize the shared filesystem by changing the disk offering and validate
         """
+        sharedfs_pool_response = list_storage_pools(self.apiclient, id=self.sharedfs.storageid)
+        sharedfs_pool = sharedfs_pool_response[0]
+
         self.mountSharedFSOnVM(self.vm1_ssh_client, self.sharedfs)
         result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0]
         self.debug(result)
         size = result.split()[-5]
         self.debug("Size of the filesystem is " + size)
-        self.assertEqual(size, "2.0G", "SharedFS size should be 2.0G")
+        if sharedfs_pool.type.lower() == "powerflex":
+            self.assertEqual(size, "8.0G", "SharedFS size should be 8.0G")
+            new_size = 9
+        else:
+            self.assertEqual(size, "2.0G", "SharedFS size should be 2.0G")
+            new_size = 3
 
         response = SharedFS.stop(self.sharedfs, self.apiclient)
-        response = SharedFS.changediskoffering(self.sharedfs, self.apiclient, self.disk_offering.id, 3)
+        response = SharedFS.changediskoffering(self.sharedfs, self.apiclient, self.disk_offering.id, new_size)
         self.debug(response)
         response = SharedFS.start(self.sharedfs, self.apiclient)
         time.sleep(10)
@@ -274,4 +283,7 @@
         result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0]
         size = result.split()[-5]
         self.debug("Size of the filesystem is " + size)
-        self.assertEqual(size, "3.0G", "SharedFS size should be 3.0G")
+        if sharedfs_pool.type.lower() == "powerflex":
+            self.assertEqual(size, "16G", "SharedFS size should be 16G")
+        else:
+            self.assertEqual(size, "3.0G", "SharedFS size should be 3.0G")
diff --git a/test/integration/smoke/test_snapshots.py b/test/integration/smoke/test_snapshots.py
index f834609..b1a2569 100644
--- a/test/integration/smoke/test_snapshots.py
+++ b/test/integration/smoke/test_snapshots.py
@@ -18,8 +18,10 @@
 from marvin.codes import FAILED
 from nose.plugins.attrib import attr
 from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.cloudstackException import CloudstackAPIException
 from marvin.lib.utils import (cleanup_resources,
                               is_snapshot_on_nfs,
+                              is_snapshot_on_powerflex,
                               validateList)
 from marvin.lib.base import (VirtualMachine,
                              Account,
@@ -146,10 +148,16 @@
             type='ROOT',
             listall=True
         )
+        volume = volumes[0]
+        volume_pool_response = list_storage_pools(
+            self.apiclient,
+            id=volume.storageid
+        )
+        volume_pool = volume_pool_response[0]
 
         snapshot = Snapshot.create(
             self.apiclient,
-            volumes[0].id,
+            volume.id,
             account=self.account.name,
             domainid=self.account.domainid
         )
@@ -209,6 +217,11 @@
             "Check if backup_snap_id is not null"
         )
 
+        if volume_pool.type.lower() == "powerflex":
+            self.assertTrue(is_snapshot_on_powerflex(
+                self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id))
+            return
+
         self.assertTrue(is_snapshot_on_nfs(
             self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id))
         return
@@ -246,6 +259,11 @@
             PASS,
             "Invalid response returned for list volumes")
         vol_uuid = vol_res[0].id
+        volume_pool_response = list_storage_pools(self.apiclient,
+                                                    id=vol_res[0].storageid)
+        volume_pool = volume_pool_response[0]
+        if volume_pool.type.lower() != 'networkfilesystem':
+            self.skipTest("This test is not supported for volume created on storage pool type %s" % volume_pool.type)
         clusters = list_clusters(
             self.apiclient,
             zoneid=self.zone.id
@@ -437,15 +455,16 @@
             )
             cls._cleanup.append(cls.virtual_machine)
 
-            volumes =Volume.list(
+            volumes = Volume.list(
                 cls.userapiclient,
                 virtualmachineid=cls.virtual_machine.id,
                 type='ROOT',
                 listall=True
             )
+            cls.volume = volumes[0]
             cls.snapshot = Snapshot.create(
                 cls.userapiclient,
-                volumes[0].id,
+                cls.volume.id,
                 account=cls.account.name,
                 domainid=cls.account.domainid
             )
@@ -475,13 +494,28 @@
         """Test creating volume from snapshot
         """
         self.services['volume_from_snapshot']['zoneid'] = self.zone.id
-        self.volume_from_snap = Volume.create_from_snapshot(
-            self.userapiclient,
-            snapshot_id=self.snapshot.id,
-            services=self.services["volume_from_snapshot"],
-            account=self.account.name,
-            domainid=self.account.domainid
+        snapshot_volume_pool_response = list_storage_pools(
+            self.apiclient,
+            id=self.volume.storageid
         )
+        snapshot_volume_pool = snapshot_volume_pool_response[0]
+        try:
+            self.volume_from_snap = Volume.create_from_snapshot(
+                self.userapiclient,
+                snapshot_id=self.snapshot.id,
+                services=self.services["volume_from_snapshot"],
+                account=self.account.name,
+                domainid=self.account.domainid
+            )
+        except CloudstackAPIException as cs:
+           self.debug(cs.errorMsg)
+           if snapshot_volume_pool.type.lower() == "powerflex":
+                self.assertTrue(
+                    cs.errorMsg.find("Create volume from snapshot is not supported for PowerFlex volume snapshots") > 0,
+                    msg="Other than unsupported error while creating volume from snapshot for volume on PowerFlex pool")
+                return
+           self.fail("Failed to create volume from snapshot: %s" % cs)
+
         self.cleanup.append(self.volume_from_snap)
 
         self.assertEqual(
diff --git a/test/integration/smoke/test_usage.py b/test/integration/smoke/test_usage.py
index a65e491..fef0d8f 100644
--- a/test/integration/smoke/test_usage.py
+++ b/test/integration/smoke/test_usage.py
@@ -40,6 +40,7 @@
 from marvin.lib.common import (get_zone,
                                get_domain,
                                get_suitable_test_template,
+                               list_storage_pools,
                                find_storage_pool_type)
 
 
@@ -611,17 +612,17 @@
         except Exception as e:
             self.fail("Failed to stop instance: %s" % e)
 
-        volume_response = Volume.list(
+        data_volume_response = Volume.list(
             self.apiclient,
             virtualmachineid=self.virtual_machine.id,
             type='DATADISK',
             listall=True)
         self.assertEqual(
-            isinstance(volume_response, list),
+            isinstance(data_volume_response, list),
             True,
             "Check for valid list volumes response"
         )
-        data_volume = volume_response[0]
+        data_volume = data_volume_response[0]
 
         # Detach data Disk
         self.debug("Detaching volume ID: %s VM with ID: %s" % (
@@ -769,7 +770,25 @@
             "Running",
             "VM state should be running after deployment"
         )
-        self.virtual_machine.attach_volume(self.apiclient,volume_uploaded)
+        root_volume_response = Volume.list(
+            self.apiclient,
+            virtualmachineid=self.virtual_machine.id,
+            type='ROOT',
+            listall=True)
+        root_volume = root_volume_response[0]
+        rool_volume_pool_response = list_storage_pools(
+            self.apiclient,
+            id=root_volume.storageid
+        )
+        rool_volume_pool = rool_volume_pool_response[0]
+        try:
+            self.virtual_machine.attach_volume(self.apiclient,volume_uploaded)
+        except Exception as e:
+            self.debug("Exception %s: " % e)
+            if rool_volume_pool.type.lower() == "powerflex" and "this operation is unsupported on storage pool type PowerFlex" in str(e):
+               return
+            self.fail(e)
+
         self.debug("select type from usage_event where offering_id = 6 and volume_id = '%s';"
                    % volume_id)
 
diff --git a/test/integration/smoke/test_vm_autoscaling.py b/test/integration/smoke/test_vm_autoscaling.py
index 7ae61ce..782d2bc 100644
--- a/test/integration/smoke/test_vm_autoscaling.py
+++ b/test/integration/smoke/test_vm_autoscaling.py
@@ -22,6 +22,7 @@
 import logging
 import time
 import datetime
+import math
 
 from nose.plugins.attrib import attr
 from marvin.cloudstackTestCase import cloudstackTestCase
@@ -53,7 +54,8 @@
 
 from marvin.lib.common import (get_domain,
                                get_zone,
-                               get_template)
+                               get_template,
+                               list_storage_pools)
 from marvin.lib.utils import wait_until
 
 MIN_MEMBER = 1
@@ -466,8 +468,10 @@
     def verifyVmProfile(self, vm, autoscalevmprofileid, networkid=None, projectid=None):
         self.message("Verifying profiles of new VM %s (%s)" % (vm.name, vm.id))
         datadisksizeInBytes = None
+        datadiskpoolid = None
         diskofferingid = None
         rootdisksizeInBytes = None
+        rootdiskpoolid = None
         sshkeypairs = None
 
         affinitygroupIdsArray = []
@@ -496,10 +500,24 @@
         for volume in volumes:
             if volume.type == 'ROOT':
                 rootdisksizeInBytes = volume.size
+                rootdiskpoolid = volume.storageid
             elif volume.type == 'DATADISK':
                 datadisksizeInBytes = volume.size
+                datadiskpoolid = volume.storageid
                 diskofferingid = volume.diskofferingid
 
+        rootdisk_pool_response = list_storage_pools(
+            self.apiclient,
+            id=rootdiskpoolid
+        )
+        rootdisk_pool = rootdisk_pool_response[0]
+
+        datadisk_pool_response = list_storage_pools(
+            self.apiclient,
+            id=datadiskpoolid
+        )
+        datadisk_pool = datadisk_pool_response[0]
+
         vmprofiles_list = AutoScaleVmProfile.list(
             self.regular_user_apiclient,
             listall=True,
@@ -522,18 +540,26 @@
         self.assertEquals(templateid, vmprofile.templateid)
         self.assertEquals(serviceofferingid, vmprofile.serviceofferingid)
 
+        rootdisksize = None
         if vmprofile_otherdeployparams.rootdisksize:
-            self.assertEquals(int(rootdisksizeInBytes), int(vmprofile_otherdeployparams.rootdisksize) * (1024 ** 3))
+            rootdisksize = int(vmprofile_otherdeployparams.rootdisksize)
         elif vmprofile_otherdeployparams.overridediskofferingid:
             self.assertEquals(vmprofile_otherdeployparams.overridediskofferingid, self.disk_offering_override.id)
-            self.assertEquals(int(rootdisksizeInBytes), int(self.disk_offering_override.disksize) * (1024 ** 3))
+            rootdisksize = int(self.disk_offering_override.disksize)
         else:
-            self.assertEquals(int(rootdisksizeInBytes), int(self.templatesize) * (1024 ** 3))
+            rootdisksize = int(self.templatesize)
+
+        if rootdisk_pool.type.lower() == "powerflex":
+            rootdisksize = (int(math.ceil(rootdisksize / 8) * 8))
+        self.assertEquals(int(rootdisksizeInBytes), rootdisksize * (1024 ** 3))
 
         if vmprofile_otherdeployparams.diskofferingid:
             self.assertEquals(diskofferingid, vmprofile_otherdeployparams.diskofferingid)
         if vmprofile_otherdeployparams.disksize:
-            self.assertEquals(int(datadisksizeInBytes), int(vmprofile_otherdeployparams.disksize) * (1024 ** 3))
+            datadisksize = int(vmprofile_otherdeployparams.disksize)
+            if datadisk_pool.type.lower() == "powerflex":
+                datadisksize = (int(math.ceil(datadisksize / 8) * 8))
+            self.assertEquals(int(datadisksizeInBytes), datadisksize * (1024 ** 3))
 
         if vmprofile_otherdeployparams.keypairs:
             self.assertEquals(sshkeypairs, vmprofile_otherdeployparams.keypairs)
diff --git a/test/integration/smoke/test_vm_life_cycle.py b/test/integration/smoke/test_vm_life_cycle.py
index c7c9a01..8df0b99 100644
--- a/test/integration/smoke/test_vm_life_cycle.py
+++ b/test/integration/smoke/test_vm_life_cycle.py
@@ -1710,8 +1710,8 @@
     def get_target_pool(self, volid):
         target_pools = StoragePool.listForMigration(self.apiclient, id=volid)
 
-        if len(target_pools) < 1:
-            self.skipTest("Not enough storage pools found")
+        if target_pools is None or len(target_pools) == 0:
+            self.skipTest("Not enough storage pools found for migration")
 
         return target_pools[0]
 
diff --git a/test/integration/smoke/test_vm_snapshot_kvm.py b/test/integration/smoke/test_vm_snapshot_kvm.py
index 5c133f6..9dd7c52 100644
--- a/test/integration/smoke/test_vm_snapshot_kvm.py
+++ b/test/integration/smoke/test_vm_snapshot_kvm.py
@@ -77,6 +77,18 @@
         Configurations.update(cls.apiclient,
             name = "kvm.vmstoragesnapshot.enabled",
             value = "true")
+
+        cls.services["domainid"] = cls.domain.id
+        cls.services["small"]["zoneid"] = cls.zone.id
+        cls.services["zoneid"] = cls.zone.id
+
+        cls.account = Account.create(
+            cls.apiclient,
+            cls.services["account"],
+            domainid=cls.domain.id
+        )
+        cls._cleanup.append(cls.account)
+
         #The version of CentOS has to be supported
         templ = {
             "name": "CentOS8",
@@ -91,36 +103,33 @@
             "directdownload": True,
         }
 
-        template = Template.register(cls.apiclient, templ, zoneid=cls.zone.id, hypervisor=cls.hypervisor)
+        template = Template.register(
+            cls.apiclient,
+            templ,
+            zoneid=cls.zone.id,
+            account=cls.account.name,
+            domainid=cls.account.domainid,
+            hypervisor=cls.hypervisor
+        )
         if template == FAILED:
             assert False, "get_template() failed to return template\
                     with description %s" % cls.services["ostype"]
 
-        cls.services["domainid"] = cls.domain.id
-        cls.services["small"]["zoneid"] = cls.zone.id
         cls.services["templates"]["ostypeid"] = template.ostypeid
-        cls.services["zoneid"] = cls.zone.id
 
-        cls.account = Account.create(
-            cls.apiclient,
-            cls.services["account"],
-            domainid=cls.domain.id
-        )
-        cls._cleanup.append(cls.account)
-
-        service_offerings_nfs = {
+        service_offering_nfs = {
             "name": "nfs",
-                "displaytext": "nfs",
-                "cpunumber": 1,
-                "cpuspeed": 500,
-                "memory": 512,
-                "storagetype": "shared",
-                "customizediops": False,
-            }
+            "displaytext": "nfs",
+            "cpunumber": 1,
+            "cpuspeed": 500,
+            "memory": 512,
+            "storagetype": "shared",
+            "customizediops": False,
+        }
 
         cls.service_offering = ServiceOffering.create(
             cls.apiclient,
-            service_offerings_nfs,
+            service_offering_nfs,
         )
 
         cls._cleanup.append(cls.service_offering)
@@ -138,7 +147,7 @@
             rootdisksize=20,
         )
         cls.random_data_0 = random_gen(size=100)
-        cls.test_dir = "/tmp"
+        cls.test_dir = "$HOME"
         cls.random_data = "random.data"
         return
 
@@ -201,8 +210,8 @@
             self.apiclient,
             self.virtual_machine.id,
             MemorySnapshot,
-            "TestSnapshot",
-            "Display Text"
+            "TestVmSnapshot",
+            "Test VM Snapshot"
         )
         self.assertEqual(
             vm_snapshot.state,
@@ -269,6 +278,8 @@
 
         self.virtual_machine.start(self.apiclient)
 
+        time.sleep(30)
+
         try:
             ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
 
@@ -288,7 +299,7 @@
         self.assertEqual(
             self.random_data_0,
             result[0],
-            "Check the random data is equal with the ramdom file!"
+            "Check the random data is equal with the random file!"
         )
 
     @attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
@@ -320,7 +331,7 @@
         list_snapshot_response = VmSnapshot.list(
             self.apiclient,
             virtualmachineid=self.virtual_machine.id,
-            listall=False)
+            listall=True)
         self.debug('list_snapshot_response -------------------- %s' % list_snapshot_response)
 
         self.assertIsNone(list_snapshot_response, "snapshot is already deleted")
diff --git a/test/integration/smoke/test_vm_snapshots.py b/test/integration/smoke/test_vm_snapshots.py
index 07779e7..8c106f0 100644
--- a/test/integration/smoke/test_vm_snapshots.py
+++ b/test/integration/smoke/test_vm_snapshots.py
@@ -27,7 +27,9 @@
 from marvin.lib.common import (get_zone,
                                get_domain,
                                get_suitable_test_template,
+                               list_volumes,
                                list_snapshots,
+                               list_storage_pools,
                                list_virtual_machines)
 import time
 
@@ -87,6 +89,18 @@
             serviceofferingid=cls.service_offering.id,
             mode=cls.zone.networktype
         )
+        volumes = list_volumes(
+            cls.apiclient,
+            virtualmachineid=cls.virtual_machine.id,
+            type='ROOT',
+            listall=True
+        )
+        volume = volumes[0]
+        volume_pool_response = list_storage_pools(
+            cls.apiclient,
+            id=volume.storageid
+        )
+        cls.volume_pool = volume_pool_response[0]
         cls.random_data_0 = random_gen(size=100)
         cls.test_dir = "$HOME"
         cls.random_data = "random.data"
@@ -146,15 +160,15 @@
 
         #KVM VM Snapshot needs to set snapshot with memory
         MemorySnapshot = False
-        if self.hypervisor.lower() in (KVM.lower()):
+        if self.hypervisor.lower() in (KVM.lower()) and self.volume_pool.type.lower() != "powerflex":
            MemorySnapshot = True
 
         vm_snapshot = VmSnapshot.create(
             self.apiclient,
             self.virtual_machine.id,
             MemorySnapshot,
-            "TestSnapshot",
-            "Display Text"
+            "TestVmSnapshot",
+            "Test VM Snapshot"
         )
         self.assertEqual(
             vm_snapshot.state,
@@ -214,7 +228,7 @@
         )
 
         #We don't need to stop the VM when taking a VM Snapshot on KVM
-        if self.hypervisor.lower() in (KVM.lower()):
+        if self.hypervisor.lower() in (KVM.lower()) and self.volume_pool.type.lower() != "powerflex":
            pass
         else:
            self.virtual_machine.stop(self.apiclient)
@@ -224,7 +238,7 @@
             list_snapshot_response[0].id)
 
         #We don't need to start the VM when taking a VM Snapshot on KVM
-        if self.hypervisor.lower() in (KVM.lower()):
+        if self.hypervisor.lower() in (KVM.lower()) and self.volume_pool.type.lower() != "powerflex":
            pass
         else:
            self.virtual_machine.start(self.apiclient)
diff --git a/test/integration/smoke/test_volumes.py b/test/integration/smoke/test_volumes.py
index 28a029a..6cf3f08 100644
--- a/test/integration/smoke/test_volumes.py
+++ b/test/integration/smoke/test_volumes.py
@@ -19,6 +19,7 @@
 import os
 import tempfile
 import time
+import math
 import unittest
 import urllib.error
 import urllib.parse
@@ -42,6 +43,7 @@
                                get_zone,
                                find_storage_pool_type,
                                get_pod,
+                               list_storage_pools,
                                list_disk_offering)
 from marvin.lib.utils import (cleanup_resources, checkVolumeSize)
 from marvin.lib.utils import (format_volume_to_ext3,
@@ -235,7 +237,6 @@
                         "Failed to start VM (ID: %s) " % vm.id)
                 timeout = timeout - 1
 
-            vol_sz = str(list_volume_response[0].size)
             ssh = self.virtual_machine.get_ssh_client(
                 reconnect=True
             )
@@ -243,6 +244,7 @@
             list_volume_response = Volume.list(
                 self.apiClient,
                 id=volume.id)
+            vol_sz = str(list_volume_response[0].size)
             if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower():
                 volume_name = "/dev/xvd" + chr(ord('a') + int(list_volume_response[0].deviceid))
                 self.debug(" Using XenServer volume_name: %s" % (volume_name))
@@ -533,6 +535,17 @@
         # Sleep to ensure the current state will reflected in other calls
         time.sleep(self.services["sleep"])
 
+        list_volume_response = Volume.list(
+            self.apiClient,
+            id=self.volume.id
+        )
+        volume = list_volume_response[0]
+
+        list_volume_pool_response = list_storage_pools(self.apiClient, id=volume.storageid)
+        volume_pool = list_volume_pool_response[0]
+        if volume_pool.type.lower() == "powerflex":
+            self.skipTest("Extract volume operation is unsupported for volumes on storage pool type %s" % volume_pool.type)
+
         cmd = extractVolume.extractVolumeCmd()
         cmd.id = self.volume.id
         cmd.mode = "HTTP_DOWNLOAD"
@@ -658,7 +671,15 @@
                 type='DATADISK'
             )
             for vol in list_volume_response:
-                if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready':
+                list_volume_pool_response = list_storage_pools(
+                    self.apiClient,
+                    id=vol.storageid
+                )
+                volume_pool = list_volume_pool_response[0]
+                disksize = (int(disk_offering_20_GB.disksize))
+                if volume_pool.type.lower() == "powerflex":
+                    disksize = (int(math.ceil(disksize / 8) * 8))
+                if vol.id == self.volume.id and int(vol.size) == disksize * (1024 ** 3) and vol.state == 'Ready':
                     success = True
             if success:
                 break
@@ -925,7 +946,15 @@
                 type='DATADISK'
             )
             for vol in list_volume_response:
-                if vol.id == self.volume.id and int(vol.size) == (20 * (1024 ** 3)) and vol.state == 'Ready':
+                list_volume_pool_response = list_storage_pools(
+                    self.apiClient,
+                    id=vol.storageid
+                )
+                volume_pool = list_volume_pool_response[0]
+                disksize = 20
+                if volume_pool.type.lower() == "powerflex":
+                    disksize = (int(math.ceil(disksize / 8) * 8))
+                if vol.id == self.volume.id and int(vol.size) == disksize * (1024 ** 3) and vol.state == 'Ready':
                     success = True
             if success:
                 break
@@ -1283,7 +1312,6 @@
                     "Failed to start VM (ID: %s) " % vm.id)
             timeout = timeout - 1
 
-        vol_sz = str(list_volume_response[0].size)
         ssh = virtual_machine.get_ssh_client(
             reconnect=True
         )
@@ -1292,6 +1320,7 @@
         list_volume_response = Volume.list(
             self.apiclient,
             id=volume.id)
+        vol_sz = str(list_volume_response[0].size)
 
         volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
         self.debug(" Using KVM volume_name: %s" % (volume_name))
@@ -1410,7 +1439,6 @@
                     "Failed to start VM (ID: %s) " % vm.id)
             timeout = timeout - 1
 
-        vol_sz = str(list_volume_response[0].size)
         ssh = virtual_machine.get_ssh_client(
             reconnect=True
         )
@@ -1419,6 +1447,12 @@
         list_volume_response = Volume.list(
             self.apiclient,
             id=volume.id)
+        vol_sz = str(list_volume_response[0].size)
+        list_volume_pool_response = list_storage_pools(self.apiclient, id=list_volume_response[0].storageid)
+        volume_pool = list_volume_pool_response[0]
+        if volume_pool.type.lower() == "powerflex":
+            vol_sz = int(vol_sz)
+            vol_sz = str(vol_sz - (128 << 20) - ((vol_sz >> 30) * 200704))
 
         volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
         self.debug(" Using KVM volume_name: %s" % (volume_name))
@@ -1543,7 +1577,6 @@
                     "Failed to start VM (ID: %s) " % vm.id)
             timeout = timeout - 1
 
-        vol_sz = str(list_volume_response[0].size)
         ssh = virtual_machine.get_ssh_client(
             reconnect=True
         )
@@ -1552,6 +1585,12 @@
         list_volume_response = Volume.list(
             self.apiclient,
             id=volume.id)
+        vol_sz = str(list_volume_response[0].size)
+        list_volume_pool_response = list_storage_pools(self.apiclient, id=list_volume_response[0].storageid)
+        volume_pool = list_volume_pool_response[0]
+        if volume_pool.type.lower() == "powerflex":
+            vol_sz = int(vol_sz)
+            vol_sz = str(vol_sz - (128 << 20) - ((vol_sz >> 30) * 200704))
 
         volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
         self.debug(" Using KVM volume_name: %s" % (volume_name))
diff --git a/tools/marvin/marvin/lib/utils.py b/tools/marvin/marvin/lib/utils.py
index f80eccf..c822a58 100644
--- a/tools/marvin/marvin/lib/utils.py
+++ b/tools/marvin/marvin/lib/utils.py
@@ -300,12 +300,63 @@
     assert hosts_list_validation_result[0] == PASS, "host list validation failed"
     return hosts_list_validation_result[1].hypervisorversion
 
+def is_snapshot_on_powerflex(apiclient, dbconn, config, zoneid, snapshotid):
+    """
+    Checks whether a snapshot with id (not UUID) `snapshotid` is present on the powerflex storage
+
+    @param apiclient: api client connection
+    @param dbconn:  connection to the cloudstack db
+    @param config: marvin configuration file
+    @param zoneid: uuid of the zone on which the secondary nfs storage pool is mounted
+    @param snapshotid: uuid of the snapshot
+    @return: True if snapshot is found, False otherwise
+    """
+
+    qresultset = dbconn.execute(
+    "SELECT id FROM snapshots WHERE uuid = '%s';" \
+                        % str(snapshotid)
+    )
+    if len(qresultset) == 0:
+        raise Exception(
+            "No snapshot found in cloudstack with id %s" % snapshotid)
+
+
+    snapshotid = qresultset[0][0]
+    qresultset = dbconn.execute(
+    "SELECT install_path, store_id FROM snapshot_store_ref WHERE snapshot_id='%s' AND store_role='Primary';" % snapshotid
+    )
+
+    assert isinstance(qresultset, list), "Invalid db query response for snapshot %s" % snapshotid
+
+    if len(qresultset) == 0:
+        #Snapshot does not exist
+        return False
+
+    from .base import StoragePool
+    #pass store_id to get the exact storage pool where snapshot is stored
+    primaryStores = StoragePool.list(apiclient, zoneid=zoneid, id=int(qresultset[0][1]))
+
+    assert isinstance(primaryStores, list), "Not a valid response for listStoragePools"
+    assert len(primaryStores) != 0, "No storage pools found in zone %s" % zoneid
+
+    primaryStore = primaryStores[0]
+
+    if str(primaryStore.provider).lower() != "powerflex":
+        raise Exception(
+            "is_snapshot_on_powerflex works only against powerflex storage pool. found %s" % str(primaryStore.provider))
+
+    snapshotPath = str(qresultset[0][0])
+    if not snapshotPath:
+        return False
+
+    return True
+
 def is_snapshot_on_nfs(apiclient, dbconn, config, zoneid, snapshotid):
     """
     Checks whether a snapshot with id (not UUID) `snapshotid` is present on the nfs storage
 
     @param apiclient: api client connection
-    @param @dbconn:  connection to the cloudstack db
+    @param dbconn:  connection to the cloudstack db
     @param config: marvin configuration file
     @param zoneid: uuid of the zone on which the secondary nfs storage pool is mounted
     @param snapshotid: uuid of the snapshot
diff --git a/ui/src/components/view/ResourceView.vue b/ui/src/components/view/ResourceView.vue
index 17d2a13..4359afd 100644
--- a/ui/src/components/view/ResourceView.vue
+++ b/ui/src/components/view/ResourceView.vue
@@ -33,17 +33,17 @@
             :is="tabs[0].component"
             :resource="resource"
             :loading="loading"
-            :tab="tabs[0].name" />
+            :tab="tabName(tabs[0])" />
         </keep-alive>
         <a-tabs
           v-else
           style="width: 100%; margin-top: -12px"
           :animated="false"
-          :activeKey="activeTab || tabs[0].name"
+          :activeKey="activeTab || tabName(tabs[0])"
           @change="onTabChange" >
-          <template v-for="tab in tabs" :key="tab.name">
+          <template v-for="tab in tabs" :key="tabName(tab)">
             <a-tab-pane
-              :key="tab.name"
+              :key="tabName(tab)"
               :tab="$t('label.' + tabName(tab))"
               v-if="showTab(tab)">
               <keep-alive>
@@ -183,12 +183,12 @@
         return
       }
       if (!this.historyTab || !this.$route.meta.tabs || this.$route.meta.tabs.length === 0) {
-        this.activeTab = this.tabs[0].name
+        this.activeTab = this.tabName(this.tabs[0])
         return
       }
-      const tabIdx = this.$route.meta.tabs.findIndex(tab => tab.name === this.historyTab)
+      const tabIdx = this.$route.meta.tabs.findIndex(tab => this.tabName(tab) === this.historyTab)
       if (tabIdx === -1) {
-        this.activeTab = this.tabs[0].name
+        this.activeTab = this.tabName(this.tabs[0])
       } else {
         this.activeTab = this.historyTab
       }
diff --git a/ui/src/config/section/offering.js b/ui/src/config/section/offering.js
index 53dd20b..4a32619 100644
--- a/ui/src/config/section/offering.js
+++ b/ui/src/config/section/offering.js
@@ -142,7 +142,7 @@
           }
         },
         show: (record) => { return record.state === 'Active' },
-        groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
+        groupMap: (selection) => { return selection.map(x => { return { id: x, state: 'Inactive' } }) }
       }]
     },
     {
@@ -224,7 +224,7 @@
           }
         },
         show: (record) => { return record.state === 'Active' },
-        groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
+        groupMap: (selection) => { return selection.map(x => { return { id: x, state: 'Inactive' } }) }
       }]
     },
     {
@@ -331,7 +331,7 @@
           }
         },
         show: (record) => { return record.state === 'Active' },
-        groupMap: (selection) => { return selection.map(x => { return { id: x } }) }
+        groupMap: (selection) => { return selection.map(x => { return { id: x, state: 'Inactive' } }) }
       }]
     },
     {
diff --git a/ui/src/views/AutogenView.vue b/ui/src/views/AutogenView.vue
index c77e732..f55767d 100644
--- a/ui/src/views/AutogenView.vue
+++ b/ui/src/views/AutogenView.vue
@@ -1986,9 +1986,8 @@
     },
     onSearch (opts) {
       const query = Object.assign({}, this.$route.query)
-      for (const key in this.searchParams) {
-        delete query[key]
-      }
+      const searchFilters = this.$route?.meta?.searchFilters || []
+      searchFilters.forEach(key => delete query[key])
       delete query.name
       delete query.templatetype
       delete query.keyword