blob: e6ae542cbe2847bd3e2852b627764fbfae846a29 [file] [log] [blame]
PREHOOK: query: CREATE TABLE T1_text_n5(key STRING, val STRING) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@T1_text_n5
POSTHOOK: query: CREATE TABLE T1_text_n5(key STRING, val STRING) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T1_text_n5
PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_text_n5
PREHOOK: type: LOAD
#### A masked pattern was here ####
PREHOOK: Output: default@t1_text_n5
POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_text_n5
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@t1_text_n5
PREHOOK: query: CREATE TABLE T1_n83 STORED AS ORC AS SELECT * FROM T1_text_n5
PREHOOK: type: CREATETABLE_AS_SELECT
PREHOOK: Input: default@t1_text_n5
PREHOOK: Output: database:default
PREHOOK: Output: default@T1_n83
POSTHOOK: query: CREATE TABLE T1_n83 STORED AS ORC AS SELECT * FROM T1_text_n5
POSTHOOK: type: CREATETABLE_AS_SELECT
POSTHOOK: Input: default@t1_text_n5
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T1_n83
POSTHOOK: Lineage: t1_n83.key SIMPLE [(t1_text_n5)t1_text_n5.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: t1_n83.val SIMPLE [(t1_text_n5)t1_text_n5.FieldSchema(name:val, type:string, comment:null), ]
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n83
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 3:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
Reduce Sink Vectorization:
className: VectorReduceSinkMultiKeyOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumns: 3:bigint
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: MERGEPARTIAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: MERGE_PARTIAL
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
#### A masked pattern was here ####
1 11 1
1 NULL 1
2 12 1
2 NULL 1
3 13 1
3 NULL 1
7 17 1
7 NULL 1
8 18 1
8 28 1
8 NULL 2
NULL NULL 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n83
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(DISTINCT val)
keys: key (type: string), 0L (type: bigint), val (type: string)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
Execution mode: llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
notVectorizedReason: Aggregation Function expression for GROUPBY operator: DISTINCT with Groupingsets not supported
vectorized: false
Reducer 2
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: GROUPBY operator: DISTINCT not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(DISTINCT KEY._col2:0._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col2
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col2 (type: bigint)
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
#### A masked pattern was here ####
1 1
2 1
3 1
7 1
8 2
NULL 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n83
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 3:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: rand() (type: double)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 4:double
valueColumns: 3:bigint
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: PARTIALS
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 0:string, 1:string
valueColumns: 3:bigint
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: FINAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: final
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, val, count(1) FROM T1_n83 GROUP BY key, val with rollup
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
#### A masked pattern was here ####
1 11 1
1 NULL 1
2 12 1
2 NULL 1
3 13 1
3 NULL 1
7 17 1
7 NULL 1
8 18 1
8 28 1
8 NULL 2
NULL NULL 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n83
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(DISTINCT val)
keys: key (type: string), 0L (type: bigint), val (type: string)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
Execution mode: llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
notVectorizedReason: Aggregation Function expression for GROUPBY operator: DISTINCT with Groupingsets not supported
vectorized: false
Reducer 2
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: GROUPBY operator: DISTINCT not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(DISTINCT KEY._col2:0._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: bigint)
null sort order: zz
sort order: ++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 12 Data size: 4416 Basic stats: COMPLETE Column stats: NONE
value expressions: _col2 (type: bigint)
Reducer 3
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: GROUPBY operator: DISTINCT not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
mode: final
outputColumnNames: _col0, _col2
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col2 (type: bigint)
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n83 GROUP BY key with rollup
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
#### A masked pattern was here ####
1 1
2 1
3 1
7 1
8 2
NULL 6
PREHOOK: query: CREATE TABLE T2_n52(key1 STRING, key2 STRING, val INT) STORED AS ORC
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@T2_n52
POSTHOOK: query: CREATE TABLE T2_n52(key1 STRING, key2 STRING, val INT) STORED AS ORC
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T2_n52
PREHOOK: query: CREATE TABLE T3_n17(key1 STRING, key2 STRING, val INT) STORED AS ORC
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@T3_n17
POSTHOOK: query: CREATE TABLE T3_n17(key1 STRING, key2 STRING, val INT) STORED AS ORC
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T3_n17
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
FROM T1_n83
INSERT OVERWRITE TABLE T2_n52 SELECT key, val, count(1) group by key, val with rollup
INSERT OVERWRITE TABLE T3_n17 SELECT key, val, sum(1) group by rollup(key, val)
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
PREHOOK: Output: default@t2_n52
PREHOOK: Output: default@t3_n17
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
FROM T1_n83
INSERT OVERWRITE TABLE T2_n52 SELECT key, val, count(1) group by key, val with rollup
INSERT OVERWRITE TABLE T3_n17 SELECT key, val, sum(1) group by rollup(key, val)
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
POSTHOOK: Output: default@t2_n52
POSTHOOK: Output: default@t3_n17
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-2 is a root stage
Stage-3 depends on stages: Stage-2
Stage-0 depends on stages: Stage-3
Stage-4 depends on stages: Stage-0
Stage-1 depends on stages: Stage-3
Stage-5 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-2
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
Reducer 4 <- Map 1 (SIMPLE_EDGE)
Reducer 5 <- Reducer 4 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n83
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(1)
Group By Vectorization:
aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 4:int) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 3:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: rand() (type: double)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 4:double
valueColumns: 3:bigint
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 6 Data size: 2208 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: sum(1)
Group By Vectorization:
aggregators: VectorUDAFSumLong(ConstantVectorExpression(val 1) -> 6:int) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 5:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: rand() (type: double)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 4:double
valueColumns: 3:bigint
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint, bigint, bigint, bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: PARTIALS
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 0:string, 1:string
valueColumns: 3:bigint
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: FINAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: final
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.t2_n52
Reducer 4
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: sum(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFSumLong(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: PARTIALS
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 0:string, 1:string
valueColumns: 3:bigint
Statistics: Num rows: 18 Data size: 6624 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Reducer 5
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: sum(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFSumLong(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: FINAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: final
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 9 Data size: 3312 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.t3_n17
Stage: Stage-3
Dependency Collection
Stage: Stage-0
Move Operator
tables:
replace: true
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.t2_n52
Stage: Stage-4
Stats Work
Basic Stats Work:
Stage: Stage-1
Move Operator
tables:
replace: true
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.t3_n17
Stage: Stage-5
Stats Work
Basic Stats Work:
PREHOOK: query: FROM T1_n83
INSERT OVERWRITE TABLE T2_n52 SELECT key, val, count(1) group by key, val with rollup
INSERT OVERWRITE TABLE T3_n17 SELECT key, val, sum(1) group by key, val with rollup
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n83
PREHOOK: Output: default@t2_n52
PREHOOK: Output: default@t3_n17
POSTHOOK: query: FROM T1_n83
INSERT OVERWRITE TABLE T2_n52 SELECT key, val, count(1) group by key, val with rollup
INSERT OVERWRITE TABLE T3_n17 SELECT key, val, sum(1) group by key, val with rollup
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n83
POSTHOOK: Output: default@t2_n52
POSTHOOK: Output: default@t3_n17
POSTHOOK: Lineage: t2_n52.key1 SIMPLE [(t1_n83)t1_n83.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: t2_n52.key2 SIMPLE [(t1_n83)t1_n83.FieldSchema(name:val, type:string, comment:null), ]
POSTHOOK: Lineage: t2_n52.val EXPRESSION [(t1_n83)t1_n83.null, ]
POSTHOOK: Lineage: t3_n17.key1 SIMPLE [(t1_n83)t1_n83.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: t3_n17.key2 SIMPLE [(t1_n83)t1_n83.FieldSchema(name:val, type:string, comment:null), ]
POSTHOOK: Lineage: t3_n17.val EXPRESSION [(t1_n83)t1_n83.null, ]