blob: f1fdda58ab9d026c0b2b2f7fb5beae6d1b6594ef [file] [log] [blame]
PREHOOK: query: CREATE TABLE T1_n90(key STRING, val STRING) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@T1_n90
POSTHOOK: query: CREATE TABLE T1_n90(key STRING, val STRING) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T1_n90
PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n90
PREHOOK: type: LOAD
#### A masked pattern was here ####
PREHOOK: Output: default@t1_n90
POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n90
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@t1_n90
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 3:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
Reduce Sink Vectorization:
className: VectorReduceSinkMultiKeyOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: MERGEPARTIAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: MERGE_PARTIAL
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY CUBE(key, val)
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY CUBE(key, val)
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 3:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
Reduce Sink Vectorization:
className: VectorReduceSinkMultiKeyOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: MERGEPARTIAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: MERGE_PARTIAL
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 11 1
1 NULL 1
2 12 1
2 NULL 1
3 13 1
3 NULL 1
7 17 1
7 NULL 1
8 18 1
8 28 1
8 NULL 2
NULL 11 1
NULL 12 1
NULL 13 1
NULL 17 1
NULL 18 1
NULL 28 1
NULL NULL 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: _col0, _col1
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 3:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
Reduce Sink Vectorization:
className: VectorReduceSinkMultiKeyOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: MERGEPARTIAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: MERGE_PARTIAL
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2, _col3
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2, 3]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 11 0 1
1 NULL 1 1
2 12 0 1
2 NULL 1 1
3 13 0 1
3 NULL 1 1
7 17 0 1
7 NULL 1 1
8 18 0 1
8 28 0 1
8 NULL 1 2
NULL 11 2 1
NULL 12 2 1
NULL 13 2 1
NULL 17 2 1
NULL 18 2 1
NULL 28 2 1
NULL NULL 3 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(DISTINCT val)
keys: key (type: string), 0L (type: bigint), val (type: string)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Execution mode: llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
notVectorizedReason: Aggregation Function expression for GROUPBY operator: DISTINCT with Groupingsets not supported
vectorized: false
Reducer 2
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: GROUPBY operator: DISTINCT not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(DISTINCT KEY._col2:0._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col2
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col2 (type: bigint)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 1
2 1
3 1
7 1
8 2
NULL 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 3:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: rand() (type: double)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 4:double
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: PARTIALS
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 0:string, 1:string
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: FINAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: final
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 11 1
1 NULL 1
2 12 1
2 NULL 1
3 13 1
3 NULL 1
7 17 1
7 NULL 1
8 18 1
8 28 1
8 NULL 2
NULL 11 1
NULL 12 1
NULL 13 1
NULL 17 1
NULL 18 1
NULL 28 1
NULL NULL 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(DISTINCT val)
keys: key (type: string), 0L (type: bigint), val (type: string)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Execution mode: llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
notVectorizedReason: Aggregation Function expression for GROUPBY operator: DISTINCT with Groupingsets not supported
vectorized: false
Reducer 2
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: GROUPBY operator: DISTINCT not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(DISTINCT KEY._col2:0._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: bigint)
null sort order: zz
sort order: ++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
value expressions: _col2 (type: bigint)
Reducer 3
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: GROUPBY operator: DISTINCT not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
mode: final
outputColumnNames: _col0, _col2
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col2 (type: bigint)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 1
2 1
3 1
7 1
8 2
NULL 6
PREHOOK: query: CREATE TABLE T2_n55(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@T2_n55
POSTHOOK: query: CREATE TABLE T2_n55(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T2_n55
PREHOOK: query: CREATE TABLE T3_n19(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@T3_n19
POSTHOOK: query: CREATE TABLE T3_n19(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T3_n19
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
FROM T1_n90
INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube
INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
PREHOOK: Output: default@t2_n55
PREHOOK: Output: default@t3_n19
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
FROM T1_n90
INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube
INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
POSTHOOK: Output: default@t2_n55
POSTHOOK: Output: default@t3_n19
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-2 is a root stage
Stage-3 depends on stages: Stage-2
Stage-0 depends on stages: Stage-3
Stage-4 depends on stages: Stage-0
Stage-1 depends on stages: Stage-3
Stage-5 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-2
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
Reducer 5 <- Map 1 (SIMPLE_EDGE)
Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
Reducer 7 <- Reducer 6 (CUSTOM_SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(1)
Group By Vectorization:
aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 4:int) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 3:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: rand() (type: double)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 4:double
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: sum(1)
Group By Vectorization:
aggregators: VectorUDAFSumLong(ConstantVectorExpression(val 1) -> 6:int) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 5:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: rand() (type: double)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 4:double
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint, bigint, bigint, bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: PARTIALS
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 0:string, 1:string
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Reducer 3
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: Aggregation Function expression for GROUPBY operator: UDF compute_bit_vector not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: final
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.t2_n55
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
outputColumnNames: key1, key2, val
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: max(length(key1)), avg(COALESCE(length(key1),0)), count(1), count(key1), compute_bit_vector(key1, 'hll'), max(length(key2)), avg(COALESCE(length(key2),0)), count(key2), compute_bit_vector(key2, 'hll'), min(val), max(val), count(val), compute_bit_vector(val, 'hll')
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: int), _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: int), _col6 (type: struct<count:bigint,sum:double,input:int>), _col7 (type: bigint), _col8 (type: binary), _col9 (type: int), _col10 (type: int), _col11 (type: bigint), _col12 (type: binary)
Reducer 4
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: Aggregation Function expression for GROUPBY operator: UDF compute_bit_vector not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8), min(VALUE._col9), max(VALUE._col10), count(VALUE._col11), compute_bit_vector(VALUE._col12)
mode: final
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col0,0)) (type: bigint), COALESCE(_col1,0) (type: double), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col5,0)) (type: bigint), COALESCE(_col6,0) (type: double), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'LONG' (type: string), UDFToLong(_col9) (type: bigint), UDFToLong(_col10) (type: bigint), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Reducer 5
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: sum(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFSumLong(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: PARTIALS
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 0:string, 1:string
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Reducer 6
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: Aggregation Function expression for GROUPBY operator: UDF compute_bit_vector not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: sum(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: final
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.t3_n19
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
outputColumnNames: key1, key2, val
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: max(length(key1)), avg(COALESCE(length(key1),0)), count(1), count(key1), compute_bit_vector(key1, 'hll'), max(length(key2)), avg(COALESCE(length(key2),0)), count(key2), compute_bit_vector(key2, 'hll'), min(val), max(val), count(val), compute_bit_vector(val, 'hll')
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: int), _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: int), _col6 (type: struct<count:bigint,sum:double,input:int>), _col7 (type: bigint), _col8 (type: binary), _col9 (type: int), _col10 (type: int), _col11 (type: bigint), _col12 (type: binary)
Reducer 7
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true
notVectorizedReason: Aggregation Function expression for GROUPBY operator: UDF compute_bit_vector not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8), min(VALUE._col9), max(VALUE._col10), count(VALUE._col11), compute_bit_vector(VALUE._col12)
mode: final
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col0,0)) (type: bigint), COALESCE(_col1,0) (type: double), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col5,0)) (type: bigint), COALESCE(_col6,0) (type: double), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'LONG' (type: string), UDFToLong(_col9) (type: bigint), UDFToLong(_col10) (type: bigint), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-3
Dependency Collection
Stage: Stage-0
Move Operator
tables:
replace: true
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.t2_n55
Stage: Stage-4
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key1, key2, val
Column Types: string, string, int
Table: default.t2_n55
Stage: Stage-1
Move Operator
tables:
replace: true
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.t3_n19
Stage: Stage-5
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key1, key2, val
Column Types: string, string, int
Table: default.t3_n19
PREHOOK: query: FROM T1_n90
INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube
INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
PREHOOK: Output: default@t2_n55
PREHOOK: Output: default@t3_n19
POSTHOOK: query: FROM T1_n90
INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube
INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
POSTHOOK: Output: default@t2_n55
POSTHOOK: Output: default@t3_n19
POSTHOOK: Lineage: t2_n55.key1 SIMPLE [(t1_n90)t1_n90.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: t2_n55.key2 SIMPLE [(t1_n90)t1_n90.FieldSchema(name:val, type:string, comment:null), ]
POSTHOOK: Lineage: t2_n55.val EXPRESSION [(t1_n90)t1_n90.null, ]
POSTHOOK: Lineage: t3_n19.key1 SIMPLE [(t1_n90)t1_n90.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: t3_n19.key2 SIMPLE [(t1_n90)t1_n90.FieldSchema(name:val, type:string, comment:null), ]
POSTHOOK: Lineage: t3_n19.val EXPRESSION [(t1_n90)t1_n90.null, ]