Storage pool response improvements (#10740)
* Return details of the storage pool in the response including url, and update capacityBytes and capacityIops if applicable while creating storage pool
* Added capacitybytes parameter to the storage pool response in sync with the capacityiops response parameter and createStoragePool cmd request parameter (existing disksizetotal parameter in the storage pool response can be deprecated)
* Don't keep url in details
* Persist the capacityBytes and capacityIops in the storage_pool_details table while creating storage pool as well, for consistency - as these are updated with during update storage pool
* rebase with main fixes
diff --git a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java
index abc674f..7867c68 100644
--- a/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java
+++ b/api/src/main/java/org/apache/cloudstack/api/response/StoragePoolResponse.java
@@ -77,19 +77,24 @@
@Param(description = "the name of the cluster for the storage pool")
private String clusterName;
+ @SerializedName(ApiConstants.CAPACITY_BYTES)
+ @Param(description = "bytes CloudStack can provision from this storage pool", since = "4.22.0")
+ private Long capacityBytes;
+
+ @Deprecated(since = "4.22.0")
@SerializedName("disksizetotal")
@Param(description = "the total disk size of the storage pool")
private Long diskSizeTotal;
@SerializedName("disksizeallocated")
- @Param(description = "the host's currently allocated disk size")
+ @Param(description = "the pool's currently allocated disk size")
private Long diskSizeAllocated;
@SerializedName("disksizeused")
- @Param(description = "the host's currently used disk size")
+ @Param(description = "the pool's currently used disk size")
private Long diskSizeUsed;
- @SerializedName("capacityiops")
+ @SerializedName(ApiConstants.CAPACITY_IOPS)
@Param(description = "IOPS CloudStack can provision from this storage pool")
private Long capacityIops;
@@ -288,6 +293,14 @@
this.clusterName = clusterName;
}
+ public Long getCapacityBytes() {
+ return capacityBytes;
+ }
+
+ public void setCapacityBytes(Long capacityBytes) {
+ this.capacityBytes = capacityBytes;
+ }
+
public Long getDiskSizeTotal() {
return diskSizeTotal;
}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java
index 54f3c63..1acaccf 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/PrimaryDataStoreLifeCycle.java
@@ -24,8 +24,8 @@
import com.cloud.storage.StoragePool;
public interface PrimaryDataStoreLifeCycle extends DataStoreLifeCycle {
- public static final String CAPACITY_BYTES = "capacityBytes";
- public static final String CAPACITY_IOPS = "capacityIops";
+ String CAPACITY_BYTES = "capacityBytes";
+ String CAPACITY_IOPS = "capacityIops";
void updateStoragePool(StoragePool storagePool, Map<String, String> details);
void enableStoragePool(DataStore store);
diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java
index 8b230d0..6da02d7 100644
--- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java
+++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java
@@ -320,6 +320,9 @@
pool = super.persist(pool);
if (details != null) {
for (Map.Entry<String, String> detail : details.entrySet()) {
+ if (detail.getKey().toLowerCase().contains("password") || detail.getKey().toLowerCase().contains("token")) {
+ displayDetails = false;
+ }
StoragePoolDetailVO vo = new StoragePoolDetailVO(pool.getId(), detail.getKey(), detail.getValue(), displayDetails);
_detailsDao.persist(vo);
}
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql
index 62ae10b..d6087ed 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-42100to42200.sql
@@ -34,3 +34,7 @@
-- Add the column cross_zone_instance_creation to cloud.backup_repository. if enabled it means that new Instance can be created on all Zones from Backups on this Repository.
CALL `cloud`.`IDEMPOTENT_ADD_COLUMN`('cloud.backup_repository', 'cross_zone_instance_creation', 'TINYINT(1) DEFAULT NULL COMMENT ''Backup Repository can be used for disaster recovery on another zone''');
+
+-- Updated display to false for password/token detail of the storage pool details
+UPDATE `cloud`.`storage_pool_details` SET display = 0 WHERE name LIKE '%password%';
+UPDATE `cloud`.`storage_pool_details` SET display = 0 WHERE name LIKE '%token%';
diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java
index 32a321c..d17dae1 100644
--- a/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java
+++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java
@@ -85,8 +85,7 @@
DataStoreProviderManager dataStoreProviderMgr;
public DataStore createPrimaryDataStore(PrimaryDataStoreParameters params) {
- if(params == null)
- {
+ if (params == null) {
throw new InvalidParameterValueException("createPrimaryDataStore: Input params is null, please check");
}
StoragePoolVO dataStoreVO = dataStoreDao.findPoolByUUID(params.getUuid());
diff --git a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
index 2b44b0b..a98ea8e 100644
--- a/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
+++ b/plugins/storage/volume/default/src/main/java/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java
@@ -18,6 +18,7 @@
*/
package org.apache.cloudstack.storage.datastore.lifecycle;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
@@ -139,7 +140,6 @@
Long clusterId = (Long)dsInfos.get("clusterId");
Long podId = (Long)dsInfos.get("podId");
Long zoneId = (Long)dsInfos.get("zoneId");
- String url = (String)dsInfos.get("url");
String providerName = (String)dsInfos.get("providerName");
HypervisorType hypervisorType = (HypervisorType)dsInfos.get("hypervisorType");
if (clusterId != null && podId == null) {
@@ -148,19 +148,43 @@
PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
- String tags = (String)dsInfos.get("tags");
- String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
Map<String, String> details = (Map<String, String>)dsInfos.get("details");
+ if (dsInfos.get("capacityBytes") != null) {
+ Long capacityBytes = (Long)dsInfos.get("capacityBytes");
+ if (capacityBytes <= 0) {
+ throw new IllegalArgumentException("'capacityBytes' must be greater than 0.");
+ }
+ if (details == null) {
+ details = new HashMap<>();
+ }
+ details.put(PrimaryDataStoreLifeCycle.CAPACITY_BYTES, String.valueOf(capacityBytes));
+ parameters.setCapacityBytes(capacityBytes);
+ }
- parameters.setTags(tags);
- parameters.setStorageAccessGroups(storageAccessGroups);
- parameters.setIsTagARule((Boolean)dsInfos.get("isTagARule"));
+ if (dsInfos.get("capacityIops") != null) {
+ Long capacityIops = (Long)dsInfos.get("capacityIops");
+ if (capacityIops <= 0) {
+ throw new IllegalArgumentException("'capacityIops' must be greater than 0.");
+ }
+ if (details == null) {
+ details = new HashMap<>();
+ }
+ details.put(PrimaryDataStoreLifeCycle.CAPACITY_IOPS, String.valueOf(capacityIops));
+ parameters.setCapacityIops(capacityIops);
+ }
+
parameters.setDetails(details);
+ String tags = (String)dsInfos.get("tags");
+ parameters.setTags(tags);
+ parameters.setIsTagARule((Boolean)dsInfos.get("isTagARule"));
+
+ String storageAccessGroups = (String)dsInfos.get(ApiConstants.STORAGE_ACCESS_GROUPS);
+ parameters.setStorageAccessGroups(storageAccessGroups);
+
String scheme = dsInfos.get("scheme").toString();
String storageHost = dsInfos.get("host").toString();
String hostPath = dsInfos.get("hostPath").toString();
- String uri = String.format("%s://%s%s", scheme, storageHost, hostPath);
Object localStorage = dsInfos.get("localStorage");
if (localStorage != null) {
diff --git a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
index ce38727e..8bfce47 100644
--- a/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
+++ b/server/src/main/java/com/cloud/api/query/dao/StoragePoolJoinDaoImpl.java
@@ -40,6 +40,7 @@
import com.cloud.api.ApiDBUtils;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.capacity.CapacityManager;
+import com.cloud.server.ResourceTag;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
@@ -152,6 +153,7 @@
}
}
}
+ poolResponse.setCapacityBytes(pool.getCapacityBytes());
poolResponse.setDiskSizeTotal(pool.getCapacityBytes());
poolResponse.setDiskSizeAllocated(allocatedSize);
poolResponse.setDiskSizeUsed(pool.getUsedBytes());
@@ -180,6 +182,8 @@
poolResponse.setIsTagARule(pool.getIsTagARule());
poolResponse.setOverProvisionFactor(Double.toString(CapacityManager.StorageOverprovisioningFactor.valueIn(pool.getId())));
poolResponse.setManaged(storagePool.isManaged());
+ Map<String, String> details = ApiDBUtils.getResourceDetails(pool.getId(), ResourceTag.ResourceObjectType.Storage);
+ poolResponse.setDetails(details);
// set async job
if (pool.getJobId() != null) {
@@ -252,6 +256,7 @@
}
long allocatedSize = pool.getUsedCapacity();
+ poolResponse.setCapacityBytes(pool.getCapacityBytes());
poolResponse.setDiskSizeTotal(pool.getCapacityBytes());
poolResponse.setDiskSizeAllocated(allocatedSize);
poolResponse.setCapacityIops(pool.getCapacityIops());