blob: 30917e20506b3a880b9bd0a738543e171dca3f94 [file] [log] [blame]
PREHOOK: query: CREATE TABLE T1_n90(key STRING, val STRING) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@T1_n90
POSTHOOK: query: CREATE TABLE T1_n90(key STRING, val STRING) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T1_n90
PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n90
PREHOOK: type: LOAD
#### A masked pattern was here ####
PREHOOK: Output: default@t1_n90
POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1_n90
POSTHOOK: type: LOAD
#### A masked pattern was here ####
POSTHOOK: Output: default@t1_n90
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>, 3:ROW__IS__DELETED:boolean]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
grouping sets: 0, 1, 2, 3
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
Reduce Sink Vectorization:
className: VectorReduceSinkMultiKeyOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: MERGEPARTIAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: MERGE_PARTIAL
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY CUBE(key, val)
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY CUBE(key, val)
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>, 3:ROW__IS__DELETED:boolean]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
grouping sets: 0, 1, 2, 3
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
Reduce Sink Vectorization:
className: VectorReduceSinkMultiKeyOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: MERGEPARTIAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: MERGE_PARTIAL
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 11 1
1 NULL 1
2 12 1
2 NULL 1
3 13 1
3 NULL 1
7 17 1
7 NULL 1
8 18 1
8 28 1
8 NULL 2
NULL 11 1
NULL 12 1
NULL 13 1
NULL 17 1
NULL 18 1
NULL 28 1
NULL NULL 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>, 3:ROW__IS__DELETED:boolean]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: _col0, _col1
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: _col0 (type: string), _col1 (type: string), 0L (type: bigint)
grouping sets: 0, 1, 2, 3
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
Reduce Sink Vectorization:
className: VectorReduceSinkMultiKeyOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: MERGEPARTIAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: MERGE_PARTIAL
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2, _col3
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2, 3]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, val, GROUPING__ID, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 11 0 1
1 NULL 1 1
2 12 0 1
2 NULL 1 1
3 13 0 1
3 NULL 1 1
7 17 0 1
7 NULL 1 1
8 18 0 1
8 28 0 1
8 NULL 1 2
NULL 11 2 1
NULL 12 2 1
NULL 13 2 1
NULL 17 2 1
NULL 18 2 1
NULL 28 2 1
NULL NULL 3 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(DISTINCT val)
keys: key (type: string), 0L (type: bigint), val (type: string)
grouping sets: 0, 1
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Execution mode: llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
notVectorizedReason: Aggregation Function expression for GROUPBY operator: DISTINCT with Groupingsets not supported
vectorized: false
Reducer 2
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
notVectorizedReason: GROUPBY operator: DISTINCT not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(DISTINCT KEY._col2:0._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
mode: mergepartial
outputColumnNames: _col0, _col2
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col2 (type: bigint)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 1
2 1
3 1
7 1
8 2
NULL 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>, 3:ROW__IS__DELETED:boolean]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
Group By Vectorization:
aggregators: VectorUDAFCountStar(*) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
grouping sets: 0, 1, 2, 3
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: rand() (type: double)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 4:double
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: PARTIALS
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 0:string, 1:string
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: FINAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: final
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, val, count(1) FROM T1_n90 GROUP BY key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 11 1
1 NULL 1
2 12 1
2 NULL 1
3 13 1
3 NULL 1
7 17 1
7 NULL 1
8 18 1
8 28 1
8 NULL 2
NULL 11 1
NULL 12 1
NULL 13 1
NULL 17 1
NULL 18 1
NULL 28 1
NULL NULL 6
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(DISTINCT val)
keys: key (type: string), 0L (type: bigint), val (type: string)
grouping sets: 0, 1
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Execution mode: llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
notVectorizedReason: Aggregation Function expression for GROUPBY operator: DISTINCT with Groupingsets not supported
vectorized: false
Reducer 2
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
notVectorizedReason: GROUPBY operator: DISTINCT not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(DISTINCT KEY._col2:0._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: bigint)
null sort order: zz
sort order: ++
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
value expressions: _col2 (type: bigint)
Reducer 3
Execution mode: llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
notVectorizedReason: GROUPBY operator: DISTINCT not supported
vectorized: false
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string), KEY._col1 (type: bigint)
mode: final
outputColumnNames: _col0, _col2
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col2 (type: bigint)
outputColumnNames: _col0, _col1
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
#### A masked pattern was here ####
POSTHOOK: query: SELECT key, count(distinct val) FROM T1_n90 GROUP BY key with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
#### A masked pattern was here ####
1 1
2 1
3 1
7 1
8 2
NULL 6
PREHOOK: query: CREATE TABLE T2_n55(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@T2_n55
POSTHOOK: query: CREATE TABLE T2_n55(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T2_n55
PREHOOK: query: CREATE TABLE T3_n19(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@T3_n19
POSTHOOK: query: CREATE TABLE T3_n19(key1 STRING, key2 STRING, val INT) STORED AS TEXTFILE
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@T3_n19
PREHOOK: query: EXPLAIN VECTORIZATION DETAIL
FROM T1_n90
INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube
INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
PREHOOK: Output: default@t2_n55
PREHOOK: Output: default@t3_n19
POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL
FROM T1_n90
INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube
INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
POSTHOOK: Output: default@t2_n55
POSTHOOK: Output: default@t3_n19
PLAN VECTORIZATION:
enabled: true
enabledConditionsMet: [hive.vectorized.execution.enabled IS true]
STAGE DEPENDENCIES:
Stage-2 is a root stage
Stage-3 depends on stages: Stage-2
Stage-0 depends on stages: Stage-3
Stage-4 depends on stages: Stage-0
Stage-1 depends on stages: Stage-3
Stage-5 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-2
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE)
Reducer 5 <- Map 1 (SIMPLE_EDGE)
Reducer 6 <- Reducer 5 (SIMPLE_EDGE)
Reducer 7 <- Reducer 6 (CUSTOM_SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: t1_n90
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
TableScan Vectorization:
native: true
vectorizationSchemaColumns: [0:key:string, 1:val:string, 2:ROW__ID:struct<writeid:bigint,bucketid:int,rowid:bigint>, 3:ROW__IS__DELETED:boolean]
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count(1)
Group By Vectorization:
aggregators: VectorUDAFCount(ConstantVectorExpression(val 1) -> 5:int) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 4:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
grouping sets: 0, 1, 2, 3
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: rand() (type: double)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 4:double
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Select Operator
expressions: key (type: string), val (type: string)
outputColumnNames: key, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1]
Statistics: Num rows: 1 Data size: 368 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: sum(1)
Group By Vectorization:
aggregators: VectorUDAFSumLong(ConstantVectorExpression(val 1) -> 7:int) -> bigint
className: VectorGroupByOperator
groupByMode: HASH
keyExpressions: col 0:string, col 1:string, ConstantVectorExpression(val 0) -> 6:bigint
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0]
keys: key (type: string), val (type: string), 0L (type: bigint)
grouping sets: 0, 1, 2, 3
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: rand() (type: double)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 4:double
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map Vectorization:
enabled: true
enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true
inputFormatFeatureSupport: [DECIMAL_64]
featureSupportInUse: [DECIMAL_64]
inputFileFormats: org.apache.hadoop.mapred.TextInputFormat
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 2
includeColumns: [0, 1]
dataColumns: key:string, val:string
partitionColumnCount: 0
scratchColumnTypeNames: [bigint, bigint, bigint, bigint]
Reducer 2
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: PARTIALS
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 0:string, 1:string
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Reducer 3
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFCountMerge(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: FINAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: final
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.t2_n55
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
outputColumnNames: key1, key2, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: max(length(key1)), avg(COALESCE(length(key1),0)), count(1), count(key1), compute_bit_vector_hll(key1), max(length(key2)), avg(COALESCE(length(key2),0)), count(key2), compute_bit_vector_hll(key2), min(val), max(val), count(val), compute_bit_vector_hll(val)
Group By Vectorization:
aggregators: VectorUDAFMaxLong(StringLength(col 0:string) -> 3:int) -> int, VectorUDAFAvgLong(VectorCoalesce(columns [4, 5])(children: StringLength(col 0:string) -> 4:int, ConstantVectorExpression(val 0) -> 5:int) -> 6:int) -> struct<count:bigint,sum:double,input:int>, VectorUDAFCount(ConstantVectorExpression(val 1) -> 7:int) -> bigint, VectorUDAFCount(col 0:string) -> bigint, VectorUDAFComputeBitVectorString(col 0:string) -> binary, VectorUDAFMaxLong(StringLength(col 1:string) -> 8:int) -> int, VectorUDAFAvgLong(VectorCoalesce(columns [9, 10])(children: StringLength(col 1:string) -> 9:int, ConstantVectorExpression(val 0) -> 10:int) -> 11:int) -> struct<count:bigint,sum:double,input:int>, VectorUDAFCount(col 1:string) -> bigint, VectorUDAFComputeBitVectorString(col 1:string) -> binary, VectorUDAFMinLong(col 2:int) -> int, VectorUDAFMaxLong(col 2:int) -> int, VectorUDAFCount(col 2:int) -> bigint, VectorUDAFComputeBitVectorLong(col 2:int) -> binary
className: VectorGroupByOperator
groupByMode: HASH
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Reduce Sink Vectorization:
className: VectorReduceSinkEmptyKeyOperator
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumns: 0:int, 1:struct<count:bigint,sum:double,input:int>, 2:bigint, 3:bigint, 4:binary, 5:int, 6:struct<count:bigint,sum:double,input:int>, 7:bigint, 8:binary, 9:int, 10:int, 11:bigint, 12:binary
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: int), _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: int), _col6 (type: struct<count:bigint,sum:double,input:int>), _col7 (type: bigint), _col8 (type: binary), _col9 (type: int), _col10 (type: int), _col11 (type: bigint), _col12 (type: binary)
Reducer 4
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder:
reduceColumnSortOrder:
allNative: false
usesVectorUDFAdaptor: true
vectorized: true
rowBatchContext:
dataColumnCount: 13
dataColumns: VALUE._col0:int, VALUE._col1:struct<count:bigint,sum:double,input:int>, VALUE._col2:bigint, VALUE._col3:bigint, VALUE._col4:binary, VALUE._col5:int, VALUE._col6:struct<count:bigint,sum:double,input:int>, VALUE._col7:bigint, VALUE._col8:binary, VALUE._col9:int, VALUE._col10:int, VALUE._col11:bigint, VALUE._col12:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector_hll(VALUE._col8), min(VALUE._col9), max(VALUE._col10), count(VALUE._col11), compute_bit_vector_hll(VALUE._col12)
Group By Vectorization:
aggregators: VectorUDAFMaxLong(col 0:int) -> int, VectorUDAFAvgFinal(col 1:struct<count:bigint,sum:double,input:int>) -> double, VectorUDAFCountMerge(col 2:bigint) -> bigint, VectorUDAFCountMerge(col 3:bigint) -> bigint, VectorUDAFComputeBitVectorFinal(col 4:binary) -> binary, VectorUDAFMaxLong(col 5:int) -> int, VectorUDAFAvgFinal(col 6:struct<count:bigint,sum:double,input:int>) -> double, VectorUDAFCountMerge(col 7:bigint) -> bigint, VectorUDAFComputeBitVectorFinal(col 8:binary) -> binary, VectorUDAFMinLong(col 9:int) -> int, VectorUDAFMaxLong(col 10:int) -> int, VectorUDAFCountMerge(col 11:bigint) -> bigint, VectorUDAFComputeBitVectorFinal(col 12:binary) -> binary
className: VectorGroupByOperator
groupByMode: FINAL
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
mode: final
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col0,0)) (type: bigint), COALESCE(_col1,0) (type: double), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col5,0)) (type: bigint), COALESCE(_col6,0) (type: double), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'LONG' (type: string), UDFToLong(_col9) (type: bigint), UDFToLong(_col10) (type: bigint), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [13, 15, 17, 18, 21, 4, 22, 24, 26, 27, 30, 8, 31, 9, 10, 32, 35, 12]
selectExpressions: ConstantVectorExpression(val STRING) -> 13:string, VectorCoalesce(columns [0, 14])(children: col 0:int, ConstantVectorExpression(val 0) -> 14:int) -> 15:int, VectorCoalesce(columns [1, 16])(children: col 1:double, ConstantVectorExpression(val 0.0) -> 16:double) -> 17:double, LongColSubtractLongColumn(col 2:bigint, col 3:bigint) -> 18:bigint, VectorCoalesce(columns [19, 20])(children: VectorUDFAdaptor(ndv_compute_bit_vector(_col4)) -> 19:bigint, ConstantVectorExpression(val 0) -> 20:bigint) -> 21:bigint, ConstantVectorExpression(val STRING) -> 22:string, VectorCoalesce(columns [5, 23])(children: col 5:int, ConstantVectorExpression(val 0) -> 23:int) -> 24:int, VectorCoalesce(columns [6, 25])(children: col 6:double, ConstantVectorExpression(val 0.0) -> 25:double) -> 26:double, LongColSubtractLongColumn(col 2:bigint, col 7:bigint) -> 27:bigint, VectorCoalesce(columns [28, 29])(children: VectorUDFAdaptor(ndv_compute_bit_vector(_col8)) -> 28:bigint, ConstantVectorExpression(val 0) -> 29:bigint) -> 30:bigint, ConstantVectorExpression(val LONG) -> 31:string, LongColSubtractLongColumn(col 2:bigint, col 11:bigint) -> 32:bigint, VectorCoalesce(columns [33, 34])(children: VectorUDFAdaptor(ndv_compute_bit_vector(_col12)) -> 33:bigint, ConstantVectorExpression(val 0) -> 34:bigint) -> 35:bigint
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Reducer 5
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: sum(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFSumLong(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: PARTIALS
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: partials
outputColumnNames: _col0, _col1, _col2, _col3
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint)
null sort order: zzz
sort order: +++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Reduce Sink Vectorization:
className: VectorReduceSinkObjectHashOperator
keyColumns: 0:string, 1:string, 2:bigint
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
partitionColumns: 0:string, 1:string
valueColumns: 3:bigint
Statistics: Num rows: 4 Data size: 1472 Basic stats: COMPLETE Column stats: NONE
value expressions: _col3 (type: bigint)
Reducer 6
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder: zzz
reduceColumnSortOrder: +++
allNative: false
usesVectorUDFAdaptor: false
vectorized: true
rowBatchContext:
dataColumnCount: 4
dataColumns: KEY._col0:string, KEY._col1:string, KEY._col2:bigint, VALUE._col0:bigint
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: sum(VALUE._col0)
Group By Vectorization:
aggregators: VectorUDAFSumLong(col 3:bigint) -> bigint
className: VectorGroupByOperator
groupByMode: FINAL
keyExpressions: col 0:string, col 1:string, col 2:bigint
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0]
keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: bigint)
mode: final
outputColumnNames: _col0, _col1, _col3
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
pruneGroupingSetId: true
Select Operator
expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int)
outputColumnNames: _col0, _col1, _col2
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.t3_n19
Select Operator
expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
outputColumnNames: key1, key2, val
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [0, 1, 2]
Statistics: Num rows: 2 Data size: 736 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: max(length(key1)), avg(COALESCE(length(key1),0)), count(1), count(key1), compute_bit_vector_hll(key1), max(length(key2)), avg(COALESCE(length(key2),0)), count(key2), compute_bit_vector_hll(key2), min(val), max(val), count(val), compute_bit_vector_hll(val)
Group By Vectorization:
aggregators: VectorUDAFMaxLong(StringLength(col 0:string) -> 3:int) -> int, VectorUDAFAvgLong(VectorCoalesce(columns [4, 5])(children: StringLength(col 0:string) -> 4:int, ConstantVectorExpression(val 0) -> 5:int) -> 6:int) -> struct<count:bigint,sum:double,input:int>, VectorUDAFCount(ConstantVectorExpression(val 1) -> 7:int) -> bigint, VectorUDAFCount(col 0:string) -> bigint, VectorUDAFComputeBitVectorString(col 0:string) -> binary, VectorUDAFMaxLong(StringLength(col 1:string) -> 8:int) -> int, VectorUDAFAvgLong(VectorCoalesce(columns [9, 10])(children: StringLength(col 1:string) -> 9:int, ConstantVectorExpression(val 0) -> 10:int) -> 11:int) -> struct<count:bigint,sum:double,input:int>, VectorUDAFCount(col 1:string) -> bigint, VectorUDAFComputeBitVectorString(col 1:string) -> binary, VectorUDAFMinLong(col 2:int) -> int, VectorUDAFMaxLong(col 2:int) -> int, VectorUDAFCount(col 2:int) -> bigint, VectorUDAFComputeBitVectorLong(col 2:int) -> binary
className: VectorGroupByOperator
groupByMode: HASH
native: false
vectorProcessingMode: HASH
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Reduce Sink Vectorization:
className: VectorReduceSinkEmptyKeyOperator
native: true
nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true
valueColumns: 0:int, 1:struct<count:bigint,sum:double,input:int>, 2:bigint, 3:bigint, 4:binary, 5:int, 6:struct<count:bigint,sum:double,input:int>, 7:bigint, 8:binary, 9:int, 10:int, 11:bigint, 12:binary
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: int), _col1 (type: struct<count:bigint,sum:double,input:int>), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: int), _col6 (type: struct<count:bigint,sum:double,input:int>), _col7 (type: bigint), _col8 (type: binary), _col9 (type: int), _col10 (type: int), _col11 (type: bigint), _col12 (type: binary)
Reducer 7
Execution mode: vectorized, llap
Reduce Vectorization:
enabled: true
enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true
reduceColumnNullOrder:
reduceColumnSortOrder:
allNative: false
usesVectorUDFAdaptor: true
vectorized: true
rowBatchContext:
dataColumnCount: 13
dataColumns: VALUE._col0:int, VALUE._col1:struct<count:bigint,sum:double,input:int>, VALUE._col2:bigint, VALUE._col3:bigint, VALUE._col4:binary, VALUE._col5:int, VALUE._col6:struct<count:bigint,sum:double,input:int>, VALUE._col7:bigint, VALUE._col8:binary, VALUE._col9:int, VALUE._col10:int, VALUE._col11:bigint, VALUE._col12:binary
partitionColumnCount: 0
scratchColumnTypeNames: []
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector_hll(VALUE._col8), min(VALUE._col9), max(VALUE._col10), count(VALUE._col11), compute_bit_vector_hll(VALUE._col12)
Group By Vectorization:
aggregators: VectorUDAFMaxLong(col 0:int) -> int, VectorUDAFAvgFinal(col 1:struct<count:bigint,sum:double,input:int>) -> double, VectorUDAFCountMerge(col 2:bigint) -> bigint, VectorUDAFCountMerge(col 3:bigint) -> bigint, VectorUDAFComputeBitVectorFinal(col 4:binary) -> binary, VectorUDAFMaxLong(col 5:int) -> int, VectorUDAFAvgFinal(col 6:struct<count:bigint,sum:double,input:int>) -> double, VectorUDAFCountMerge(col 7:bigint) -> bigint, VectorUDAFComputeBitVectorFinal(col 8:binary) -> binary, VectorUDAFMinLong(col 9:int) -> int, VectorUDAFMaxLong(col 10:int) -> int, VectorUDAFCountMerge(col 11:bigint) -> bigint, VectorUDAFComputeBitVectorFinal(col 12:binary) -> binary
className: VectorGroupByOperator
groupByMode: FINAL
native: false
vectorProcessingMode: STREAMING
projectedOutputColumnNums: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
mode: final
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col0,0)) (type: bigint), COALESCE(_col1,0) (type: double), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col5,0)) (type: bigint), COALESCE(_col6,0) (type: double), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'LONG' (type: string), UDFToLong(_col9) (type: bigint), UDFToLong(_col10) (type: bigint), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17
Select Vectorization:
className: VectorSelectOperator
native: true
projectedOutputColumnNums: [13, 15, 17, 18, 21, 4, 22, 24, 26, 27, 30, 8, 31, 9, 10, 32, 35, 12]
selectExpressions: ConstantVectorExpression(val STRING) -> 13:string, VectorCoalesce(columns [0, 14])(children: col 0:int, ConstantVectorExpression(val 0) -> 14:int) -> 15:int, VectorCoalesce(columns [1, 16])(children: col 1:double, ConstantVectorExpression(val 0.0) -> 16:double) -> 17:double, LongColSubtractLongColumn(col 2:bigint, col 3:bigint) -> 18:bigint, VectorCoalesce(columns [19, 20])(children: VectorUDFAdaptor(ndv_compute_bit_vector(_col4)) -> 19:bigint, ConstantVectorExpression(val 0) -> 20:bigint) -> 21:bigint, ConstantVectorExpression(val STRING) -> 22:string, VectorCoalesce(columns [5, 23])(children: col 5:int, ConstantVectorExpression(val 0) -> 23:int) -> 24:int, VectorCoalesce(columns [6, 25])(children: col 6:double, ConstantVectorExpression(val 0.0) -> 25:double) -> 26:double, LongColSubtractLongColumn(col 2:bigint, col 7:bigint) -> 27:bigint, VectorCoalesce(columns [28, 29])(children: VectorUDFAdaptor(ndv_compute_bit_vector(_col8)) -> 28:bigint, ConstantVectorExpression(val 0) -> 29:bigint) -> 30:bigint, ConstantVectorExpression(val LONG) -> 31:string, LongColSubtractLongColumn(col 2:bigint, col 11:bigint) -> 32:bigint, VectorCoalesce(columns [33, 34])(children: VectorUDFAdaptor(ndv_compute_bit_vector(_col12)) -> 33:bigint, ConstantVectorExpression(val 0) -> 34:bigint) -> 35:bigint
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
File Sink Vectorization:
className: VectorFileSinkOperator
native: false
Statistics: Num rows: 1 Data size: 1000 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-3
Dependency Collection
Stage: Stage-0
Move Operator
tables:
replace: true
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.t2_n55
Stage: Stage-4
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key1, key2, val
Column Types: string, string, int
Table: default.t2_n55
Stage: Stage-1
Move Operator
tables:
replace: true
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.t3_n19
Stage: Stage-5
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key1, key2, val
Column Types: string, string, int
Table: default.t3_n19
PREHOOK: query: FROM T1_n90
INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube
INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube
PREHOOK: type: QUERY
PREHOOK: Input: default@t1_n90
PREHOOK: Output: default@t2_n55
PREHOOK: Output: default@t3_n19
POSTHOOK: query: FROM T1_n90
INSERT OVERWRITE TABLE T2_n55 SELECT key, val, count(1) group by key, val with cube
INSERT OVERWRITE TABLE T3_n19 SELECT key, val, sum(1) group by key, val with cube
POSTHOOK: type: QUERY
POSTHOOK: Input: default@t1_n90
POSTHOOK: Output: default@t2_n55
POSTHOOK: Output: default@t3_n19
POSTHOOK: Lineage: t2_n55.key1 SIMPLE [(t1_n90)t1_n90.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: t2_n55.key2 SIMPLE [(t1_n90)t1_n90.FieldSchema(name:val, type:string, comment:null), ]
POSTHOOK: Lineage: t2_n55.val EXPRESSION [(t1_n90)t1_n90.null, ]
POSTHOOK: Lineage: t3_n19.key1 SIMPLE [(t1_n90)t1_n90.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: t3_n19.key2 SIMPLE [(t1_n90)t1_n90.FieldSchema(name:val, type:string, comment:null), ]
POSTHOOK: Lineage: t3_n19.val EXPRESSION [(t1_n90)t1_n90.null, ]