Web console: compaction dialog update (#10417)

* compaction dialog update

* fix test snapshot

* Update web-console/src/dialogs/compaction-dialog/compaction-dialog.tsx

Co-authored-by: Chi Cao Minh <chi.caominh@imply.io>

* Update web-console/src/dialogs/compaction-dialog/compaction-dialog.tsx

Co-authored-by: Chi Cao Minh <chi.caominh@imply.io>

* feedback changes

Co-authored-by: Chi Cao Minh <chi.caominh@imply.io>
diff --git a/web-console/src/dialogs/compaction-dialog/__snapshots__/compaction-dialog.spec.tsx.snap b/web-console/src/dialogs/compaction-dialog/__snapshots__/compaction-dialog.spec.tsx.snap
index f3b66e8..3a0a838 100644
--- a/web-console/src/dialogs/compaction-dialog/__snapshots__/compaction-dialog.spec.tsx.snap
+++ b/web-console/src/dialogs/compaction-dialog/__snapshots__/compaction-dialog.spec.tsx.snap
@@ -1,79 +1,206 @@
 // Jest Snapshot v1, https://goo.gl/fbAQLP
 
-exports[`compaction dialog matches snapshot 1`] = `
+exports[`CompactionDialog matches snapshot with compactionConfig (dynamic partitionsSpec) 1`] = `
 <Blueprint3.Dialog
   canOutsideClickClose={false}
   className="compaction-dialog"
   isOpen={true}
   onClose={[Function]}
-  title="Compaction config: test"
+  title="Compaction config: test1"
 >
-  <AutoForm
-    fields={
-      Array [
+  <Blueprint3.FormGroup
+    className="tabs"
+  >
+    <Blueprint3.ButtonGroup
+      fill={true}
+    >
+      <Blueprint3.Button
+        active={true}
+        onClick={[Function]}
+        text="Form"
+      />
+      <Blueprint3.Button
+        active={false}
+        onClick={[Function]}
+        text="JSON"
+      />
+    </Blueprint3.ButtonGroup>
+  </Blueprint3.FormGroup>
+  <div
+    className="content"
+  >
+    <AutoForm
+      fields={
+        Array [
+          Object {
+            "defaultValue": "P1D",
+            "info": <p>
+              The offset for searching segments to be compacted. Strongly recommended to set for realtime dataSources.
+            </p>,
+            "name": "skipOffsetFromLatest",
+            "type": "string",
+          },
+          Object {
+            "info": <p>
+              For perfect rollup, you should use either 
+              <Unknown>
+                hashed
+              </Unknown>
+               (partitioning based on the hash of dimensions in each row) or 
+              <Unknown>
+                single_dim
+              </Unknown>
+               (based on ranges of a single dimension). For best-effort rollup, you should use 
+              <Unknown>
+                dynamic
+              </Unknown>
+              .
+            </p>,
+            "label": "Partitioning type",
+            "name": "tuningConfig.partitionsSpec.type",
+            "suggestions": Array [
+              "dynamic",
+              "hashed",
+              "single_dim",
+            ],
+            "type": "string",
+          },
+          Object {
+            "defaultValue": 5000000,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Determines how many rows are in each segment.
+            </React.Fragment>,
+            "label": "Max rows per segment",
+            "name": "tuningConfig.partitionsSpec.maxRowsPerSegment",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 20000000,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Total number of rows in segments waiting for being pushed.
+            </React.Fragment>,
+            "label": "Max total rows",
+            "name": "tuningConfig.partitionsSpec.maxTotalRows",
+            "type": "number",
+          },
+          Object {
+            "defined": [Function],
+            "info": <React.Fragment>
+              Directly specify the number of shards to create. If this is specified and 'intervals' is specified in the granularitySpec, the index task can skip the determine intervals/partitions pass through the data.
+            </React.Fragment>,
+            "label": "Num shards",
+            "name": "tuningConfig.partitionsSpec.numShards",
+            "required": true,
+            "type": "number",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              The dimensions to partition on. Leave blank to select all dimensions.
+            </p>,
+            "label": "Partition dimensions",
+            "name": "tuningConfig.partitionsSpec.partitionDimensions",
+            "type": "string-array",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              The dimension to partition on.
+            </p>,
+            "label": "Partition dimension",
+            "name": "tuningConfig.partitionsSpec.partitionDimension",
+            "required": true,
+            "type": "string",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              Target number of rows to include in a partition, should be a number that targets segments of 500MB~1GB.
+            </p>,
+            "label": "Target rows per segment",
+            "name": "tuningConfig.partitionsSpec.targetRowsPerSegment",
+            "required": [Function],
+            "type": "number",
+            "zeroMeansUndefined": true,
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              Maximum number of rows to include in a partition.
+            </p>,
+            "label": "Max rows per segment",
+            "name": "tuningConfig.partitionsSpec.maxRowsPerSegment",
+            "required": [Function],
+            "type": "number",
+            "zeroMeansUndefined": true,
+          },
+          Object {
+            "defaultValue": false,
+            "defined": [Function],
+            "info": <p>
+              Assume that input data has already been grouped on time and dimensions. Ingestion will run faster, but may choose sub-optimal partitions if this assumption is violated.
+            </p>,
+            "label": "Assume grouped",
+            "name": "tuningConfig.partitionsSpec.assumeGrouped",
+            "type": "boolean",
+          },
+          Object {
+            "defaultValue": 1,
+            "info": <React.Fragment>
+              Maximum number of tasks which can be run at the same time. The supervisor task would spawn worker tasks up to maxNumConcurrentSubTasks regardless of the available task slots. If this value is set to 1, the supervisor task processes data ingestion on its own instead of spawning worker tasks. If this value is set to too large, too many worker tasks can be created which might block other ingestion.
+            </React.Fragment>,
+            "label": "Max num concurrent sub tasks",
+            "min": 1,
+            "name": "tuningConfig.maxNumConcurrentSubTasks",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 419430400,
+            "info": <p>
+              Maximum number of total segment bytes processed per compaction task. Since a time chunk must be processed in its entirety, if the segments for a particular time chunk have a total size in bytes greater than this parameter, compaction will not run for that time chunk. Because each compaction task runs with a single thread, setting this value too far above 1–2GB will result in compaction tasks taking an excessive amount of time.
+            </p>,
+            "name": "inputSegmentSizeBytes",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 1,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Maximum number of merge tasks which can be run at the same time.
+            </React.Fragment>,
+            "label": "Max num merge tasks",
+            "min": 1,
+            "name": "tuningConfig.maxNumMergeTasks",
+            "type": "number",
+          },
+          Object {
+            "adjustment": [Function],
+            "defaultValue": 500000000,
+            "info": <React.Fragment>
+              Maximum number of bytes of input segments to process in a single task. If a single segment is larger than this number, it will be processed by itself in a single task (input segments are never split across tasks).
+            </React.Fragment>,
+            "label": "Max input segment bytes per task",
+            "min": 1000000,
+            "name": "tuningConfig.splitHintSpec.maxInputSegmentBytesPerTask",
+            "type": "number",
+          },
+        ]
+      }
+      model={
         Object {
-          "defaultValue": 419430400,
-          "info": <p>
-            Maximum number of total segment bytes processed per compaction task. Since a time chunk must be processed in its entirety, if the segments for a particular time chunk have a total size in bytes greater than this parameter, compaction will not run for that time chunk. Because each compaction task runs with a single thread, setting this value too far above 1–2GB will result in compaction tasks taking an excessive amount of time.
-          </p>,
-          "name": "inputSegmentSizeBytes",
-          "type": "number",
-        },
-        Object {
-          "defaultValue": "P1D",
-          "info": <p>
-            The offset for searching segments to be compacted. Strongly recommended to set for realtime dataSources.
-          </p>,
-          "name": "skipOffsetFromLatest",
-          "type": "string",
-        },
-        Object {
-          "defaultValue": 5000000,
-          "info": <p>
-            Determines how many rows are in each segment.
-          </p>,
-          "name": "maxRowsPerSegment",
-          "type": "number",
-        },
-        Object {
-          "info": <p>
-            <Memo(ExternalLink)
-              href="https://druid.apache.org/docs/0.19.0/ingestion/tasks.html#task-context"
-            >
-              Task context
-            </Memo(ExternalLink)>
-             
-            for compaction tasks.
-          </p>,
-          "name": "taskContext",
-          "type": "json",
-        },
-        Object {
-          "defaultValue": 25,
-          "info": <p>
-            Priority of the compaction task.
-          </p>,
-          "name": "taskPriority",
-          "type": "number",
-        },
-        Object {
-          "info": <p>
-            <Memo(ExternalLink)
-              href="https://druid.apache.org/docs/0.19.0/configuration/index.html#compact-task-tuningconfig"
-            >
-              Tuning config
-            </Memo(ExternalLink)>
-             
-            for compaction tasks.
-          </p>,
-          "name": "tuningConfig",
-          "type": "json",
-        },
-      ]
-    }
-    model={Object {}}
-    onChange={[Function]}
-  />
+          "dataSource": "test1",
+          "tuningConfig": Object {
+            "partitionsSpec": Object {
+              "type": "dynamic",
+            },
+          },
+        }
+      }
+      onChange={[Function]}
+    />
+  </div>
   <div
     className="bp3-dialog-footer"
   >
@@ -81,7 +208,6 @@
       className="bp3-dialog-footer-actions"
     >
       <Blueprint3.Button
-        disabled={false}
         intent="danger"
         onClick={[Function]}
         text="Delete"
@@ -100,3 +226,679 @@
   </div>
 </Blueprint3.Dialog>
 `;
+
+exports[`CompactionDialog matches snapshot with compactionConfig (hashed partitionsSpec) 1`] = `
+<Blueprint3.Dialog
+  canOutsideClickClose={false}
+  className="compaction-dialog"
+  isOpen={true}
+  onClose={[Function]}
+  title="Compaction config: test1"
+>
+  <Blueprint3.FormGroup
+    className="tabs"
+  >
+    <Blueprint3.ButtonGroup
+      fill={true}
+    >
+      <Blueprint3.Button
+        active={true}
+        onClick={[Function]}
+        text="Form"
+      />
+      <Blueprint3.Button
+        active={false}
+        onClick={[Function]}
+        text="JSON"
+      />
+    </Blueprint3.ButtonGroup>
+  </Blueprint3.FormGroup>
+  <div
+    className="content"
+  >
+    <AutoForm
+      fields={
+        Array [
+          Object {
+            "defaultValue": "P1D",
+            "info": <p>
+              The offset for searching segments to be compacted. Strongly recommended to set for realtime dataSources.
+            </p>,
+            "name": "skipOffsetFromLatest",
+            "type": "string",
+          },
+          Object {
+            "info": <p>
+              For perfect rollup, you should use either 
+              <Unknown>
+                hashed
+              </Unknown>
+               (partitioning based on the hash of dimensions in each row) or 
+              <Unknown>
+                single_dim
+              </Unknown>
+               (based on ranges of a single dimension). For best-effort rollup, you should use 
+              <Unknown>
+                dynamic
+              </Unknown>
+              .
+            </p>,
+            "label": "Partitioning type",
+            "name": "tuningConfig.partitionsSpec.type",
+            "suggestions": Array [
+              "dynamic",
+              "hashed",
+              "single_dim",
+            ],
+            "type": "string",
+          },
+          Object {
+            "defaultValue": 5000000,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Determines how many rows are in each segment.
+            </React.Fragment>,
+            "label": "Max rows per segment",
+            "name": "tuningConfig.partitionsSpec.maxRowsPerSegment",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 20000000,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Total number of rows in segments waiting for being pushed.
+            </React.Fragment>,
+            "label": "Max total rows",
+            "name": "tuningConfig.partitionsSpec.maxTotalRows",
+            "type": "number",
+          },
+          Object {
+            "defined": [Function],
+            "info": <React.Fragment>
+              Directly specify the number of shards to create. If this is specified and 'intervals' is specified in the granularitySpec, the index task can skip the determine intervals/partitions pass through the data.
+            </React.Fragment>,
+            "label": "Num shards",
+            "name": "tuningConfig.partitionsSpec.numShards",
+            "required": true,
+            "type": "number",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              The dimensions to partition on. Leave blank to select all dimensions.
+            </p>,
+            "label": "Partition dimensions",
+            "name": "tuningConfig.partitionsSpec.partitionDimensions",
+            "type": "string-array",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              The dimension to partition on.
+            </p>,
+            "label": "Partition dimension",
+            "name": "tuningConfig.partitionsSpec.partitionDimension",
+            "required": true,
+            "type": "string",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              Target number of rows to include in a partition, should be a number that targets segments of 500MB~1GB.
+            </p>,
+            "label": "Target rows per segment",
+            "name": "tuningConfig.partitionsSpec.targetRowsPerSegment",
+            "required": [Function],
+            "type": "number",
+            "zeroMeansUndefined": true,
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              Maximum number of rows to include in a partition.
+            </p>,
+            "label": "Max rows per segment",
+            "name": "tuningConfig.partitionsSpec.maxRowsPerSegment",
+            "required": [Function],
+            "type": "number",
+            "zeroMeansUndefined": true,
+          },
+          Object {
+            "defaultValue": false,
+            "defined": [Function],
+            "info": <p>
+              Assume that input data has already been grouped on time and dimensions. Ingestion will run faster, but may choose sub-optimal partitions if this assumption is violated.
+            </p>,
+            "label": "Assume grouped",
+            "name": "tuningConfig.partitionsSpec.assumeGrouped",
+            "type": "boolean",
+          },
+          Object {
+            "defaultValue": 1,
+            "info": <React.Fragment>
+              Maximum number of tasks which can be run at the same time. The supervisor task would spawn worker tasks up to maxNumConcurrentSubTasks regardless of the available task slots. If this value is set to 1, the supervisor task processes data ingestion on its own instead of spawning worker tasks. If this value is set to too large, too many worker tasks can be created which might block other ingestion.
+            </React.Fragment>,
+            "label": "Max num concurrent sub tasks",
+            "min": 1,
+            "name": "tuningConfig.maxNumConcurrentSubTasks",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 419430400,
+            "info": <p>
+              Maximum number of total segment bytes processed per compaction task. Since a time chunk must be processed in its entirety, if the segments for a particular time chunk have a total size in bytes greater than this parameter, compaction will not run for that time chunk. Because each compaction task runs with a single thread, setting this value too far above 1–2GB will result in compaction tasks taking an excessive amount of time.
+            </p>,
+            "name": "inputSegmentSizeBytes",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 1,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Maximum number of merge tasks which can be run at the same time.
+            </React.Fragment>,
+            "label": "Max num merge tasks",
+            "min": 1,
+            "name": "tuningConfig.maxNumMergeTasks",
+            "type": "number",
+          },
+          Object {
+            "adjustment": [Function],
+            "defaultValue": 500000000,
+            "info": <React.Fragment>
+              Maximum number of bytes of input segments to process in a single task. If a single segment is larger than this number, it will be processed by itself in a single task (input segments are never split across tasks).
+            </React.Fragment>,
+            "label": "Max input segment bytes per task",
+            "min": 1000000,
+            "name": "tuningConfig.splitHintSpec.maxInputSegmentBytesPerTask",
+            "type": "number",
+          },
+        ]
+      }
+      model={
+        Object {
+          "dataSource": "test1",
+          "tuningConfig": Object {
+            "partitionsSpec": Object {
+              "type": "hashed",
+            },
+          },
+        }
+      }
+      onChange={[Function]}
+    />
+  </div>
+  <div
+    className="bp3-dialog-footer"
+  >
+    <div
+      className="bp3-dialog-footer-actions"
+    >
+      <Blueprint3.Button
+        intent="danger"
+        onClick={[Function]}
+        text="Delete"
+      />
+      <Blueprint3.Button
+        onClick={[Function]}
+        text="Close"
+      />
+      <Blueprint3.Button
+        disabled={true}
+        intent="primary"
+        onClick={[Function]}
+        text="Submit"
+      />
+    </div>
+  </div>
+</Blueprint3.Dialog>
+`;
+
+exports[`CompactionDialog matches snapshot with compactionConfig (single_dim partitionsSpec) 1`] = `
+<Blueprint3.Dialog
+  canOutsideClickClose={false}
+  className="compaction-dialog"
+  isOpen={true}
+  onClose={[Function]}
+  title="Compaction config: test1"
+>
+  <Blueprint3.FormGroup
+    className="tabs"
+  >
+    <Blueprint3.ButtonGroup
+      fill={true}
+    >
+      <Blueprint3.Button
+        active={true}
+        onClick={[Function]}
+        text="Form"
+      />
+      <Blueprint3.Button
+        active={false}
+        onClick={[Function]}
+        text="JSON"
+      />
+    </Blueprint3.ButtonGroup>
+  </Blueprint3.FormGroup>
+  <div
+    className="content"
+  >
+    <AutoForm
+      fields={
+        Array [
+          Object {
+            "defaultValue": "P1D",
+            "info": <p>
+              The offset for searching segments to be compacted. Strongly recommended to set for realtime dataSources.
+            </p>,
+            "name": "skipOffsetFromLatest",
+            "type": "string",
+          },
+          Object {
+            "info": <p>
+              For perfect rollup, you should use either 
+              <Unknown>
+                hashed
+              </Unknown>
+               (partitioning based on the hash of dimensions in each row) or 
+              <Unknown>
+                single_dim
+              </Unknown>
+               (based on ranges of a single dimension). For best-effort rollup, you should use 
+              <Unknown>
+                dynamic
+              </Unknown>
+              .
+            </p>,
+            "label": "Partitioning type",
+            "name": "tuningConfig.partitionsSpec.type",
+            "suggestions": Array [
+              "dynamic",
+              "hashed",
+              "single_dim",
+            ],
+            "type": "string",
+          },
+          Object {
+            "defaultValue": 5000000,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Determines how many rows are in each segment.
+            </React.Fragment>,
+            "label": "Max rows per segment",
+            "name": "tuningConfig.partitionsSpec.maxRowsPerSegment",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 20000000,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Total number of rows in segments waiting for being pushed.
+            </React.Fragment>,
+            "label": "Max total rows",
+            "name": "tuningConfig.partitionsSpec.maxTotalRows",
+            "type": "number",
+          },
+          Object {
+            "defined": [Function],
+            "info": <React.Fragment>
+              Directly specify the number of shards to create. If this is specified and 'intervals' is specified in the granularitySpec, the index task can skip the determine intervals/partitions pass through the data.
+            </React.Fragment>,
+            "label": "Num shards",
+            "name": "tuningConfig.partitionsSpec.numShards",
+            "required": true,
+            "type": "number",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              The dimensions to partition on. Leave blank to select all dimensions.
+            </p>,
+            "label": "Partition dimensions",
+            "name": "tuningConfig.partitionsSpec.partitionDimensions",
+            "type": "string-array",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              The dimension to partition on.
+            </p>,
+            "label": "Partition dimension",
+            "name": "tuningConfig.partitionsSpec.partitionDimension",
+            "required": true,
+            "type": "string",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              Target number of rows to include in a partition, should be a number that targets segments of 500MB~1GB.
+            </p>,
+            "label": "Target rows per segment",
+            "name": "tuningConfig.partitionsSpec.targetRowsPerSegment",
+            "required": [Function],
+            "type": "number",
+            "zeroMeansUndefined": true,
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              Maximum number of rows to include in a partition.
+            </p>,
+            "label": "Max rows per segment",
+            "name": "tuningConfig.partitionsSpec.maxRowsPerSegment",
+            "required": [Function],
+            "type": "number",
+            "zeroMeansUndefined": true,
+          },
+          Object {
+            "defaultValue": false,
+            "defined": [Function],
+            "info": <p>
+              Assume that input data has already been grouped on time and dimensions. Ingestion will run faster, but may choose sub-optimal partitions if this assumption is violated.
+            </p>,
+            "label": "Assume grouped",
+            "name": "tuningConfig.partitionsSpec.assumeGrouped",
+            "type": "boolean",
+          },
+          Object {
+            "defaultValue": 1,
+            "info": <React.Fragment>
+              Maximum number of tasks which can be run at the same time. The supervisor task would spawn worker tasks up to maxNumConcurrentSubTasks regardless of the available task slots. If this value is set to 1, the supervisor task processes data ingestion on its own instead of spawning worker tasks. If this value is set to too large, too many worker tasks can be created which might block other ingestion.
+            </React.Fragment>,
+            "label": "Max num concurrent sub tasks",
+            "min": 1,
+            "name": "tuningConfig.maxNumConcurrentSubTasks",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 419430400,
+            "info": <p>
+              Maximum number of total segment bytes processed per compaction task. Since a time chunk must be processed in its entirety, if the segments for a particular time chunk have a total size in bytes greater than this parameter, compaction will not run for that time chunk. Because each compaction task runs with a single thread, setting this value too far above 1–2GB will result in compaction tasks taking an excessive amount of time.
+            </p>,
+            "name": "inputSegmentSizeBytes",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 1,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Maximum number of merge tasks which can be run at the same time.
+            </React.Fragment>,
+            "label": "Max num merge tasks",
+            "min": 1,
+            "name": "tuningConfig.maxNumMergeTasks",
+            "type": "number",
+          },
+          Object {
+            "adjustment": [Function],
+            "defaultValue": 500000000,
+            "info": <React.Fragment>
+              Maximum number of bytes of input segments to process in a single task. If a single segment is larger than this number, it will be processed by itself in a single task (input segments are never split across tasks).
+            </React.Fragment>,
+            "label": "Max input segment bytes per task",
+            "min": 1000000,
+            "name": "tuningConfig.splitHintSpec.maxInputSegmentBytesPerTask",
+            "type": "number",
+          },
+        ]
+      }
+      model={
+        Object {
+          "dataSource": "test1",
+          "tuningConfig": Object {
+            "partitionsSpec": Object {
+              "type": "single_dim",
+            },
+          },
+        }
+      }
+      onChange={[Function]}
+    />
+  </div>
+  <div
+    className="bp3-dialog-footer"
+  >
+    <div
+      className="bp3-dialog-footer-actions"
+    >
+      <Blueprint3.Button
+        intent="danger"
+        onClick={[Function]}
+        text="Delete"
+      />
+      <Blueprint3.Button
+        onClick={[Function]}
+        text="Close"
+      />
+      <Blueprint3.Button
+        disabled={true}
+        intent="primary"
+        onClick={[Function]}
+        text="Submit"
+      />
+    </div>
+  </div>
+</Blueprint3.Dialog>
+`;
+
+exports[`CompactionDialog matches snapshot without compactionConfig 1`] = `
+<Blueprint3.Dialog
+  canOutsideClickClose={false}
+  className="compaction-dialog"
+  isOpen={true}
+  onClose={[Function]}
+  title="Compaction config: test1"
+>
+  <Blueprint3.FormGroup
+    className="tabs"
+  >
+    <Blueprint3.ButtonGroup
+      fill={true}
+    >
+      <Blueprint3.Button
+        active={true}
+        onClick={[Function]}
+        text="Form"
+      />
+      <Blueprint3.Button
+        active={false}
+        onClick={[Function]}
+        text="JSON"
+      />
+    </Blueprint3.ButtonGroup>
+  </Blueprint3.FormGroup>
+  <div
+    className="content"
+  >
+    <AutoForm
+      fields={
+        Array [
+          Object {
+            "defaultValue": "P1D",
+            "info": <p>
+              The offset for searching segments to be compacted. Strongly recommended to set for realtime dataSources.
+            </p>,
+            "name": "skipOffsetFromLatest",
+            "type": "string",
+          },
+          Object {
+            "info": <p>
+              For perfect rollup, you should use either 
+              <Unknown>
+                hashed
+              </Unknown>
+               (partitioning based on the hash of dimensions in each row) or 
+              <Unknown>
+                single_dim
+              </Unknown>
+               (based on ranges of a single dimension). For best-effort rollup, you should use 
+              <Unknown>
+                dynamic
+              </Unknown>
+              .
+            </p>,
+            "label": "Partitioning type",
+            "name": "tuningConfig.partitionsSpec.type",
+            "suggestions": Array [
+              "dynamic",
+              "hashed",
+              "single_dim",
+            ],
+            "type": "string",
+          },
+          Object {
+            "defaultValue": 5000000,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Determines how many rows are in each segment.
+            </React.Fragment>,
+            "label": "Max rows per segment",
+            "name": "tuningConfig.partitionsSpec.maxRowsPerSegment",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 20000000,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Total number of rows in segments waiting for being pushed.
+            </React.Fragment>,
+            "label": "Max total rows",
+            "name": "tuningConfig.partitionsSpec.maxTotalRows",
+            "type": "number",
+          },
+          Object {
+            "defined": [Function],
+            "info": <React.Fragment>
+              Directly specify the number of shards to create. If this is specified and 'intervals' is specified in the granularitySpec, the index task can skip the determine intervals/partitions pass through the data.
+            </React.Fragment>,
+            "label": "Num shards",
+            "name": "tuningConfig.partitionsSpec.numShards",
+            "required": true,
+            "type": "number",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              The dimensions to partition on. Leave blank to select all dimensions.
+            </p>,
+            "label": "Partition dimensions",
+            "name": "tuningConfig.partitionsSpec.partitionDimensions",
+            "type": "string-array",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              The dimension to partition on.
+            </p>,
+            "label": "Partition dimension",
+            "name": "tuningConfig.partitionsSpec.partitionDimension",
+            "required": true,
+            "type": "string",
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              Target number of rows to include in a partition, should be a number that targets segments of 500MB~1GB.
+            </p>,
+            "label": "Target rows per segment",
+            "name": "tuningConfig.partitionsSpec.targetRowsPerSegment",
+            "required": [Function],
+            "type": "number",
+            "zeroMeansUndefined": true,
+          },
+          Object {
+            "defined": [Function],
+            "info": <p>
+              Maximum number of rows to include in a partition.
+            </p>,
+            "label": "Max rows per segment",
+            "name": "tuningConfig.partitionsSpec.maxRowsPerSegment",
+            "required": [Function],
+            "type": "number",
+            "zeroMeansUndefined": true,
+          },
+          Object {
+            "defaultValue": false,
+            "defined": [Function],
+            "info": <p>
+              Assume that input data has already been grouped on time and dimensions. Ingestion will run faster, but may choose sub-optimal partitions if this assumption is violated.
+            </p>,
+            "label": "Assume grouped",
+            "name": "tuningConfig.partitionsSpec.assumeGrouped",
+            "type": "boolean",
+          },
+          Object {
+            "defaultValue": 1,
+            "info": <React.Fragment>
+              Maximum number of tasks which can be run at the same time. The supervisor task would spawn worker tasks up to maxNumConcurrentSubTasks regardless of the available task slots. If this value is set to 1, the supervisor task processes data ingestion on its own instead of spawning worker tasks. If this value is set to too large, too many worker tasks can be created which might block other ingestion.
+            </React.Fragment>,
+            "label": "Max num concurrent sub tasks",
+            "min": 1,
+            "name": "tuningConfig.maxNumConcurrentSubTasks",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 419430400,
+            "info": <p>
+              Maximum number of total segment bytes processed per compaction task. Since a time chunk must be processed in its entirety, if the segments for a particular time chunk have a total size in bytes greater than this parameter, compaction will not run for that time chunk. Because each compaction task runs with a single thread, setting this value too far above 1–2GB will result in compaction tasks taking an excessive amount of time.
+            </p>,
+            "name": "inputSegmentSizeBytes",
+            "type": "number",
+          },
+          Object {
+            "defaultValue": 1,
+            "defined": [Function],
+            "info": <React.Fragment>
+              Maximum number of merge tasks which can be run at the same time.
+            </React.Fragment>,
+            "label": "Max num merge tasks",
+            "min": 1,
+            "name": "tuningConfig.maxNumMergeTasks",
+            "type": "number",
+          },
+          Object {
+            "adjustment": [Function],
+            "defaultValue": 500000000,
+            "info": <React.Fragment>
+              Maximum number of bytes of input segments to process in a single task. If a single segment is larger than this number, it will be processed by itself in a single task (input segments are never split across tasks).
+            </React.Fragment>,
+            "label": "Max input segment bytes per task",
+            "min": 1000000,
+            "name": "tuningConfig.splitHintSpec.maxInputSegmentBytesPerTask",
+            "type": "number",
+          },
+        ]
+      }
+      model={
+        Object {
+          "dataSource": "test1",
+          "tuningConfig": Object {
+            "partitionsSpec": Object {
+              "type": "dynamic",
+            },
+          },
+        }
+      }
+      onChange={[Function]}
+    />
+  </div>
+  <div
+    className="bp3-dialog-footer"
+  >
+    <div
+      className="bp3-dialog-footer-actions"
+    >
+      <Blueprint3.Button
+        onClick={[Function]}
+        text="Close"
+      />
+      <Blueprint3.Button
+        disabled={false}
+        intent="primary"
+        onClick={[Function]}
+        text="Submit"
+      />
+    </div>
+  </div>
+</Blueprint3.Dialog>
+`;
diff --git a/web-console/src/dialogs/compaction-dialog/compaction-dialog.scss b/web-console/src/dialogs/compaction-dialog/compaction-dialog.scss
index b9a461c..65606ba 100644
--- a/web-console/src/dialogs/compaction-dialog/compaction-dialog.scss
+++ b/web-console/src/dialogs/compaction-dialog/compaction-dialog.scss
@@ -18,13 +18,21 @@
 
 .compaction-dialog {
   &.bp3-dialog {
-    top: 5%;
+    height: 80vh;
   }
 
-  .auto-form {
-    margin: 10px 15px;
-    padding: 0 5px 0 5px;
-    max-height: 70vh;
-    overflow: scroll;
+  .tabs {
+    margin: 15px;
+  }
+
+  .content {
+    margin: 0 15px 10px 0;
+    padding: 0 5px 0 15px;
+    flex: 1;
+    overflow: auto;
+  }
+
+  .ace-solarized-dark {
+    background-color: #232c35;
   }
 }
diff --git a/web-console/src/dialogs/compaction-dialog/compaction-dialog.spec.tsx b/web-console/src/dialogs/compaction-dialog/compaction-dialog.spec.tsx
index 34068c9..0164526 100644
--- a/web-console/src/dialogs/compaction-dialog/compaction-dialog.spec.tsx
+++ b/web-console/src/dialogs/compaction-dialog/compaction-dialog.spec.tsx
@@ -21,15 +21,63 @@
 
 import { CompactionDialog } from './compaction-dialog';
 
-describe('compaction dialog', () => {
-  it('matches snapshot', () => {
+describe('CompactionDialog', () => {
+  it('matches snapshot without compactionConfig', () => {
     const compactionDialog = shallow(
       <CompactionDialog
         onClose={() => {}}
         onSave={() => {}}
         onDelete={() => {}}
-        datasource={'test'}
-        compactionConfig={{}}
+        datasource={'test1'}
+        compactionConfig={undefined}
+      />,
+    );
+    expect(compactionDialog).toMatchSnapshot();
+  });
+
+  it('matches snapshot with compactionConfig (dynamic partitionsSpec)', () => {
+    const compactionDialog = shallow(
+      <CompactionDialog
+        onClose={() => {}}
+        onSave={() => {}}
+        onDelete={() => {}}
+        datasource={'test1'}
+        compactionConfig={{
+          dataSource: 'test1',
+          tuningConfig: { partitionsSpec: { type: 'dynamic' } },
+        }}
+      />,
+    );
+    expect(compactionDialog).toMatchSnapshot();
+  });
+
+  it('matches snapshot with compactionConfig (hashed partitionsSpec)', () => {
+    const compactionDialog = shallow(
+      <CompactionDialog
+        onClose={() => {}}
+        onSave={() => {}}
+        onDelete={() => {}}
+        datasource={'test1'}
+        compactionConfig={{
+          dataSource: 'test1',
+          tuningConfig: { partitionsSpec: { type: 'hashed' } },
+        }}
+      />,
+    );
+    expect(compactionDialog).toMatchSnapshot();
+  });
+
+  it('matches snapshot with compactionConfig (single_dim partitionsSpec)', () => {
+    const compactionDialog = shallow(
+      <CompactionDialog
+        onClose={() => {}}
+        onSave={() => {}}
+        onDelete={() => {}}
+        datasource={'test1'}
+        compactionConfig={{
+          dataSource: 'test1',
+          tuningConfig: { partitionsSpec: { type: 'single_dim' } },
+        }}
       />,
     );
     expect(compactionDialog).toMatchSnapshot();
diff --git a/web-console/src/dialogs/compaction-dialog/compaction-dialog.tsx b/web-console/src/dialogs/compaction-dialog/compaction-dialog.tsx
index 85ce37d..1798565a 100644
--- a/web-console/src/dialogs/compaction-dialog/compaction-dialog.tsx
+++ b/web-console/src/dialogs/compaction-dialog/compaction-dialog.tsx
@@ -16,17 +16,155 @@
  * limitations under the License.
  */
 
-import { Button, Classes, Dialog, Intent } from '@blueprintjs/core';
+import { Button, ButtonGroup, Classes, Code, Dialog, FormGroup, Intent } from '@blueprintjs/core';
 import React, { useState } from 'react';
 
-import { AutoForm, ExternalLink, Field } from '../../components';
-import { getLink } from '../../links';
+import { AutoForm, Field, JsonInput } from '../../components';
+import { deepGet, deepSet } from '../../utils/object-change';
 
 import './compaction-dialog.scss';
 
 export const DEFAULT_MAX_ROWS_PER_SEGMENT = 5000000;
 
-const COMPACTION_CONFIG_FIELDS: Field<Record<string, any>>[] = [
+type Tabs = 'form' | 'json';
+
+type CompactionConfig = Record<string, any>;
+
+const COMPACTION_CONFIG_FIELDS: Field<CompactionConfig>[] = [
+  {
+    name: 'skipOffsetFromLatest',
+    type: 'string',
+    defaultValue: 'P1D',
+    info: (
+      <p>
+        The offset for searching segments to be compacted. Strongly recommended to set for realtime
+        dataSources.
+      </p>
+    ),
+  },
+  {
+    name: 'tuningConfig.partitionsSpec.type',
+    label: 'Partitioning type',
+    type: 'string',
+    suggestions: ['dynamic', 'hashed', 'single_dim'],
+    info: (
+      <p>
+        For perfect rollup, you should use either <Code>hashed</Code> (partitioning based on the
+        hash of dimensions in each row) or <Code>single_dim</Code> (based on ranges of a single
+        dimension). For best-effort rollup, you should use <Code>dynamic</Code>.
+      </p>
+    ),
+  },
+  // partitionsSpec type: dynamic
+  {
+    name: 'tuningConfig.partitionsSpec.maxRowsPerSegment',
+    label: 'Max rows per segment',
+    type: 'number',
+    defaultValue: 5000000,
+    defined: (t: CompactionConfig) => deepGet(t, 'tuningConfig.partitionsSpec.type') === 'dynamic',
+    info: <>Determines how many rows are in each segment.</>,
+  },
+  {
+    name: 'tuningConfig.partitionsSpec.maxTotalRows',
+    label: 'Max total rows',
+    type: 'number',
+    defaultValue: 20000000,
+    defined: (t: CompactionConfig) => deepGet(t, 'tuningConfig.partitionsSpec.type') === 'dynamic',
+    info: <>Total number of rows in segments waiting for being pushed.</>,
+  },
+  // partitionsSpec type: hashed
+  {
+    name: 'tuningConfig.partitionsSpec.numShards',
+    label: 'Num shards',
+    type: 'number',
+    required: true, // ToDo: this will no longer be required after https://github.com/apache/druid/pull/10419 is merged
+    defined: (t: CompactionConfig) => deepGet(t, 'tuningConfig.partitionsSpec.type') === 'hashed',
+    info: (
+      <>
+        Directly specify the number of shards to create. If this is specified and 'intervals' is
+        specified in the granularitySpec, the index task can skip the determine intervals/partitions
+        pass through the data.
+      </>
+    ),
+  },
+  {
+    name: 'tuningConfig.partitionsSpec.partitionDimensions',
+    label: 'Partition dimensions',
+    type: 'string-array',
+    defined: (t: CompactionConfig) => deepGet(t, 'tuningConfig.partitionsSpec.type') === 'hashed',
+    info: <p>The dimensions to partition on. Leave blank to select all dimensions.</p>,
+  },
+  // partitionsSpec type: single_dim
+  {
+    name: 'tuningConfig.partitionsSpec.partitionDimension',
+    label: 'Partition dimension',
+    type: 'string',
+    defined: (t: CompactionConfig) =>
+      deepGet(t, 'tuningConfig.partitionsSpec.type') === 'single_dim',
+    required: true,
+    info: <p>The dimension to partition on.</p>,
+  },
+  {
+    name: 'tuningConfig.partitionsSpec.targetRowsPerSegment',
+    label: 'Target rows per segment',
+    type: 'number',
+    zeroMeansUndefined: true,
+    defined: (t: CompactionConfig) =>
+      deepGet(t, 'tuningConfig.partitionsSpec.type') === 'single_dim' &&
+      !deepGet(t, 'tuningConfig.partitionsSpec.maxRowsPerSegment'),
+    required: (t: CompactionConfig) =>
+      !deepGet(t, 'tuningConfig.partitionsSpec.targetRowsPerSegment') &&
+      !deepGet(t, 'tuningConfig.partitionsSpec.maxRowsPerSegment'),
+    info: (
+      <p>
+        Target number of rows to include in a partition, should be a number that targets segments of
+        500MB~1GB.
+      </p>
+    ),
+  },
+  {
+    name: 'tuningConfig.partitionsSpec.maxRowsPerSegment',
+    label: 'Max rows per segment',
+    type: 'number',
+    zeroMeansUndefined: true,
+    defined: (t: CompactionConfig) =>
+      deepGet(t, 'tuningConfig.partitionsSpec.type') === 'single_dim' &&
+      !deepGet(t, 'tuningConfig.partitionsSpec.targetRowsPerSegment'),
+    required: (t: CompactionConfig) =>
+      !deepGet(t, 'tuningConfig.partitionsSpec.targetRowsPerSegment') &&
+      !deepGet(t, 'tuningConfig.partitionsSpec.maxRowsPerSegment'),
+    info: <p>Maximum number of rows to include in a partition.</p>,
+  },
+  {
+    name: 'tuningConfig.partitionsSpec.assumeGrouped',
+    label: 'Assume grouped',
+    type: 'boolean',
+    defaultValue: false,
+    defined: (t: CompactionConfig) =>
+      deepGet(t, 'tuningConfig.partitionsSpec.type') === 'single_dim',
+    info: (
+      <p>
+        Assume that input data has already been grouped on time and dimensions. Ingestion will run
+        faster, but may choose sub-optimal partitions if this assumption is violated.
+      </p>
+    ),
+  },
+  {
+    name: 'tuningConfig.maxNumConcurrentSubTasks',
+    label: 'Max num concurrent sub tasks',
+    type: 'number',
+    defaultValue: 1,
+    min: 1,
+    info: (
+      <>
+        Maximum number of tasks which can be run at the same time. The supervisor task would spawn
+        worker tasks up to maxNumConcurrentSubTasks regardless of the available task slots. If this
+        value is set to 1, the supervisor task processes data ingestion on its own instead of
+        spawning worker tasks. If this value is set to too large, too many worker tasks can be
+        created which might block other ingestion.
+      </>
+    ),
+  },
   {
     name: 'inputSegmentSizeBytes',
     type: 'number',
@@ -42,75 +180,84 @@
     ),
   },
   {
-    name: 'skipOffsetFromLatest',
-    type: 'string',
-    defaultValue: 'P1D',
-    info: (
-      <p>
-        The offset for searching segments to be compacted. Strongly recommended to set for realtime
-        dataSources.
-      </p>
-    ),
-  },
-  {
-    name: 'maxRowsPerSegment',
+    name: 'tuningConfig.maxNumMergeTasks',
+    label: 'Max num merge tasks',
     type: 'number',
-    defaultValue: DEFAULT_MAX_ROWS_PER_SEGMENT,
-    info: <p>Determines how many rows are in each segment.</p>,
+    defaultValue: 1,
+    min: 1,
+    defined: (t: CompactionConfig) =>
+      ['hashed', 'single_dim'].includes(deepGet(t, 'tuningConfig.partitionsSpec.type')),
+    info: <>Maximum number of merge tasks which can be run at the same time.</>,
   },
   {
-    name: 'taskContext',
-    type: 'json',
-    info: (
-      <p>
-        <ExternalLink href={`${getLink('DOCS')}/ingestion/tasks.html#task-context`}>
-          Task context
-        </ExternalLink>{' '}
-        for compaction tasks.
-      </p>
-    ),
-  },
-  {
-    name: 'taskPriority',
+    name: 'tuningConfig.splitHintSpec.maxInputSegmentBytesPerTask',
+    label: 'Max input segment bytes per task',
     type: 'number',
-    defaultValue: 25,
-    info: <p>Priority of the compaction task.</p>,
-  },
-  {
-    name: 'tuningConfig',
-    type: 'json',
+    defaultValue: 500000000,
+    min: 1000000,
+    adjustment: (t: CompactionConfig) => deepSet(t, 'tuningConfig.splitHintSpec.type', 'segments'),
     info: (
-      <p>
-        <ExternalLink
-          href={`${getLink('DOCS')}/configuration/index.html#compact-task-tuningconfig`}
-        >
-          Tuning config
-        </ExternalLink>{' '}
-        for compaction tasks.
-      </p>
+      <>
+        Maximum number of bytes of input segments to process in a single task. If a single segment
+        is larger than this number, it will be processed by itself in a single task (input segments
+        are never split across tasks).
+      </>
     ),
   },
 ];
 
+function validCompactionConfig(compactionConfig: CompactionConfig): boolean {
+  const partitionsSpecType =
+    deepGet(compactionConfig, 'tuningConfig.partitionsSpec.type') || 'dynamic';
+  switch (partitionsSpecType) {
+    // case 'dynamic': // Nothing to check for dynamic
+    case 'hashed':
+      // ToDo: this will no longer be required after https://github.com/apache/druid/pull/10419 is merged
+      if (!deepGet(compactionConfig, 'tuningConfig.partitionsSpec.numShards')) {
+        return false;
+      }
+      break;
+
+    case 'single_dim':
+      if (!deepGet(compactionConfig, 'tuningConfig.partitionsSpec.partitionDimension')) {
+        return false;
+      }
+      const hasTargetRowsPerSegment = Boolean(
+        deepGet(compactionConfig, 'tuningConfig.partitionsSpec.targetRowsPerSegment'),
+      );
+      const hasMaxRowsPerSegment = Boolean(
+        deepGet(compactionConfig, 'tuningConfig.partitionsSpec.maxRowsPerSegment'),
+      );
+      if (hasTargetRowsPerSegment === hasMaxRowsPerSegment) {
+        return false;
+      }
+      break;
+  }
+
+  return true;
+}
+
 export interface CompactionDialogProps {
   onClose: () => void;
-  onSave: (config: Record<string, any>) => void;
+  onSave: (compactionConfig: CompactionConfig) => void;
   onDelete: () => void;
   datasource: string;
-  compactionConfig?: Record<string, any>;
+  compactionConfig: CompactionConfig | undefined;
 }
 
 export const CompactionDialog = React.memo(function CompactionDialog(props: CompactionDialogProps) {
   const { datasource, compactionConfig, onSave, onClose, onDelete } = props;
 
-  const [currentConfig, setCurrentConfig] = useState<Record<string, any>>(
+  const [currentTab, setCurrentTab] = useState<Tabs>('form');
+  const [currentConfig, setCurrentConfig] = useState<CompactionConfig>(
     compactionConfig || {
       dataSource: datasource,
+      tuningConfig: { partitionsSpec: { type: 'dynamic' } },
     },
   );
 
   function handleSubmit() {
-    if (!currentConfig) return;
+    if (!validCompactionConfig(currentConfig)) return;
     onSave(currentConfig);
   }
 
@@ -122,25 +269,40 @@
       canOutsideClickClose={false}
       title={`Compaction config: ${datasource}`}
     >
-      <AutoForm
-        fields={COMPACTION_CONFIG_FIELDS}
-        model={currentConfig}
-        onChange={m => setCurrentConfig(m)}
-      />
+      <FormGroup className="tabs">
+        <ButtonGroup fill>
+          <Button
+            text="Form"
+            active={currentTab === 'form'}
+            onClick={() => setCurrentTab('form')}
+          />
+          <Button
+            text="JSON"
+            active={currentTab === 'json'}
+            onClick={() => setCurrentTab('json')}
+          />
+        </ButtonGroup>
+      </FormGroup>
+      <div className="content">
+        {currentTab === 'form' ? (
+          <AutoForm
+            fields={COMPACTION_CONFIG_FIELDS}
+            model={currentConfig}
+            onChange={m => setCurrentConfig(m)}
+          />
+        ) : (
+          <JsonInput value={currentConfig} onChange={setCurrentConfig} height="100%" />
+        )}
+      </div>
       <div className={Classes.DIALOG_FOOTER}>
         <div className={Classes.DIALOG_FOOTER_ACTIONS}>
-          <Button
-            text="Delete"
-            intent={Intent.DANGER}
-            onClick={onDelete}
-            disabled={!compactionConfig}
-          />
+          {compactionConfig && <Button text="Delete" intent={Intent.DANGER} onClick={onDelete} />}
           <Button text="Close" onClick={onClose} />
           <Button
             text="Submit"
             intent={Intent.PRIMARY}
             onClick={handleSubmit}
-            disabled={!currentConfig}
+            disabled={!validCompactionConfig(currentConfig)}
           />
         </div>
       </div>
diff --git a/web-console/src/utils/ingestion-spec.tsx b/web-console/src/utils/ingestion-spec.tsx
index 530c269..d6d90bb 100644
--- a/web-console/src/utils/ingestion-spec.tsx
+++ b/web-console/src/utils/ingestion-spec.tsx
@@ -2120,10 +2120,13 @@
 
     case 'single_dim':
       if (!deepGet(tuningConfig, 'partitionsSpec.partitionDimension')) return true;
-      if (
-        !deepGet(tuningConfig, 'partitionsSpec.targetRowsPerSegment') &&
-        !deepGet(tuningConfig, 'partitionsSpec.maxRowsPerSegment')
-      ) {
+      const hasTargetRowsPerSegment = Boolean(
+        deepGet(tuningConfig, 'partitionsSpec.targetRowsPerSegment'),
+      );
+      const hasMaxRowsPerSegment = Boolean(
+        deepGet(tuningConfig, 'partitionsSpec.maxRowsPerSegment'),
+      );
+      if (hasTargetRowsPerSegment === hasMaxRowsPerSegment) {
         return true;
       }
   }
@@ -2160,7 +2163,7 @@
             <p>
               For perfect rollup, you should use either <Code>hashed</Code> (partitioning based on
               the hash of dimensions in each row) or <Code>single_dim</Code> (based on ranges of a
-              single dimension. For best-effort rollup, you should use dynamic.
+              single dimension). For best-effort rollup, you should use <Code>dynamic</Code>.
             </p>
           ),
         },
@@ -2192,8 +2195,7 @@
             <>
               Directly specify the number of shards to create. If this is specified and 'intervals'
               is specified in the granularitySpec, the index task can skip the determine
-              intervals/partitions pass through the data. numShards cannot be specified if
-              maxRowsPerSegment is set.
+              intervals/partitions pass through the data.
             </>
           ),
         },
@@ -2218,7 +2220,9 @@
           label: 'Target rows per segment',
           type: 'number',
           zeroMeansUndefined: true,
-          defined: (t: TuningConfig) => deepGet(t, 'partitionsSpec.type') === 'single_dim',
+          defined: (t: TuningConfig) =>
+            deepGet(t, 'partitionsSpec.type') === 'single_dim' &&
+            !deepGet(t, 'partitionsSpec.maxRowsPerSegment'),
           required: (t: TuningConfig) =>
             !deepGet(t, 'partitionsSpec.targetRowsPerSegment') &&
             !deepGet(t, 'partitionsSpec.maxRowsPerSegment'),
@@ -2234,7 +2238,9 @@
           label: 'Max rows per segment',
           type: 'number',
           zeroMeansUndefined: true,
-          defined: (t: TuningConfig) => deepGet(t, 'partitionsSpec.type') === 'single_dim',
+          defined: (t: TuningConfig) =>
+            deepGet(t, 'partitionsSpec.type') === 'single_dim' &&
+            !deepGet(t, 'partitionsSpec.targetRowsPerSegment'),
           required: (t: TuningConfig) =>
             !deepGet(t, 'partitionsSpec.targetRowsPerSegment') &&
             !deepGet(t, 'partitionsSpec.maxRowsPerSegment'),