blob: 07e91b7a5692e1aaf6081fa5556e8c814fcb93f5 [file] [log] [blame]
PREHOOK: query: CREATE TABLE test_table1_n14 (key STRING, value STRING)
CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@test_table1_n14
POSTHOOK: query: CREATE TABLE test_table1_n14 (key STRING, value STRING)
CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@test_table1_n14
PREHOOK: query: CREATE TABLE test_table2_n13 (key STRING, value STRING)
CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@test_table2_n13
POSTHOOK: query: CREATE TABLE test_table2_n13 (key STRING, value STRING)
CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@test_table2_n13
PREHOOK: query: INSERT OVERWRITE TABLE test_table1_n14 SELECT key, value FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@test_table1_n14
POSTHOOK: query: INSERT OVERWRITE TABLE test_table1_n14 SELECT key, value FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@test_table1_n14
POSTHOOK: Lineage: test_table1_n14.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: test_table1_n14.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
PREHOOK: query: INSERT OVERWRITE TABLE test_table2_n13 SELECT key, value FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@test_table2_n13
POSTHOOK: query: INSERT OVERWRITE TABLE test_table2_n13 SELECT key, value FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@test_table2_n13
POSTHOOK: Lineage: test_table2_n13.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: test_table2_n13.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
PREHOOK: query: CREATE TABLE test_table_out_n0 (key STRING, value STRING) PARTITIONED BY (part STRING)
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@test_table_out_n0
POSTHOOK: query: CREATE TABLE test_table_out_n0 (key STRING, value STRING) PARTITIONED BY (part STRING)
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@test_table_out_n0
PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT key, count(*) FROM test_table1_n14 GROUP BY key
PREHOOK: type: QUERY
PREHOOK: Input: default@test_table1_n14
PREHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT key, count(*) FROM test_table1_n14 GROUP BY key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@test_table1_n14
POSTHOOK: Output: default@test_table_out_n0@part=1
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
Stage-0 depends on stages: Stage-2
Stage-3 depends on stages: Stage-0
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: test_table1_n14
Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: key
Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: key (type: string)
mode: final
outputColumnNames: _col0, _col1
Statistics: Num rows: 316 Data size: 30020 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: string), CAST( _col1 AS STRING) (type: string)
outputColumnNames: _col0, _col1
Statistics: Num rows: 316 Data size: 85636 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 316 Data size: 85636 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.test_table_out_n0
Select Operator
expressions: _col0 (type: string), _col1 (type: string), '1' (type: string)
outputColumnNames: key, value, part
Statistics: Num rows: 316 Data size: 112496 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: max(length(key)), avg(COALESCE(length(key),0)), count(1), count(key), compute_bit_vector(key, 'hll'), max(length(value)), avg(COALESCE(length(value),0)), count(value), compute_bit_vector(value, 'hll')
keys: part (type: string)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
Statistics: Num rows: 1 Data size: 557 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 1 Data size: 557 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: int), _col2 (type: struct<count:bigint,sum:double,input:int>), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: binary), _col6 (type: int), _col7 (type: struct<count:bigint,sum:double,input:int>), _col8 (type: bigint), _col9 (type: binary)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8)
keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
Statistics: Num rows: 1 Data size: 421 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col1,0)) (type: bigint), COALESCE(_col2,0) (type: double), (_col3 - _col4) (type: bigint), COALESCE(ndv_compute_bit_vector(_col5),0) (type: bigint), _col5 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col6,0)) (type: bigint), COALESCE(_col7,0) (type: double), (_col3 - _col8) (type: bigint), COALESCE(ndv_compute_bit_vector(_col9),0) (type: bigint), _col9 (type: binary), _col0 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 617 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 617 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-2
Dependency Collection
Stage: Stage-0
Move Operator
tables:
partition:
part 1
replace: true
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.test_table_out_n0
Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key, value
Column Types: string, string
Table: default.test_table_out_n0
PREHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT key, count(*) FROM test_table1_n14 GROUP BY key
PREHOOK: type: QUERY
PREHOOK: Input: default@test_table1_n14
PREHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT key, count(*) FROM test_table1_n14 GROUP BY key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@test_table1_n14
POSTHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: Lineage: test_table_out_n0 PARTITION(part=1).key SIMPLE [(test_table1_n14)test_table1_n14.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: test_table_out_n0 PARTITION(part=1).value EXPRESSION [(test_table1_n14)test_table1_n14.null, ]
PREHOOK: query: DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1')
PREHOOK: type: DESCTABLE
PREHOOK: Input: default@test_table_out_n0
POSTHOOK: query: DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1')
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@test_table_out_n0
# col_name data_type comment
key string
value string
# Partition Information
# col_name data_type comment
part string
# Detailed Partition Information
Partition Value: [1]
Database: default
Table: test_table_out_n0
#### A masked pattern was here ####
Partition Parameters:
COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles 2
numRows 309
rawDataSize 1482
totalSize 1791
#### A masked pattern was here ####
# Storage Information
SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
InputFormat: org.apache.hadoop.mapred.TextInputFormat
OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1
WARNING: Comparing a bigint and a string may result in a loss of precision.
PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT a.key, a.value FROM (
SELECT key, count(*) AS value FROM test_table1_n14 GROUP BY key
) a JOIN (
SELECT key, value FROM src
) b
ON (a.value = b.value)
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Input: default@test_table1_n14
PREHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT a.key, a.value FROM (
SELECT key, count(*) AS value FROM test_table1_n14 GROUP BY key
) a JOIN (
SELECT key, value FROM src
) b
ON (a.value = b.value)
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Input: default@test_table1_n14
POSTHOOK: Output: default@test_table_out_n0@part=1
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
Stage-0 depends on stages: Stage-2
Stage-3 depends on stages: Stage-0
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: test_table1_n14
Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: string)
outputColumnNames: key
Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: key (type: string)
mode: final
outputColumnNames: _col0, _col1
Statistics: Num rows: 316 Data size: 30020 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: UDFToDouble(_col1) is not null (type: boolean)
Statistics: Num rows: 316 Data size: 30020 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: string), _col1 (type: bigint), UDFToDouble(_col1) (type: double)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 316 Data size: 32548 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col2 (type: double)
null sort order: z
sort order: +
Map-reduce partition columns: _col2 (type: double)
Statistics: Num rows: 316 Data size: 32548 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: string), _col1 (type: bigint)
Execution mode: vectorized, llap
LLAP IO: all inputs
Map 4
Map Operator Tree:
TableScan
alias: src
filterExpr: UDFToDouble(value) is not null (type: boolean)
Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: UDFToDouble(value) is not null (type: boolean)
Statistics: Num rows: 500 Data size: 45500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: UDFToDouble(value) (type: double)
outputColumnNames: _col0
Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: double)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: double)
Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized, llap
LLAP IO: all inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Merge Join Operator
condition map:
Inner Join 0 to 1
keys:
0 _col2 (type: double)
1 _col0 (type: double)
outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: string), CAST( _col1 AS STRING) (type: string)
outputColumnNames: _col0, _col1
Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 500 Data size: 135500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.test_table_out_n0
Select Operator
expressions: _col0 (type: string), _col1 (type: string), '1' (type: string)
outputColumnNames: key, value, part
Statistics: Num rows: 500 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: max(length(key)), avg(COALESCE(length(key),0)), count(1), count(key), compute_bit_vector(key, 'hll'), max(length(value)), avg(COALESCE(length(value),0)), count(value), compute_bit_vector(value, 'hll')
keys: part (type: string)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
Statistics: Num rows: 1 Data size: 557 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 1 Data size: 557 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: int), _col2 (type: struct<count:bigint,sum:double,input:int>), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: binary), _col6 (type: int), _col7 (type: struct<count:bigint,sum:double,input:int>), _col8 (type: bigint), _col9 (type: binary)
Reducer 3
Execution mode: llap
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8)
keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
Statistics: Num rows: 1 Data size: 421 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col1,0)) (type: bigint), COALESCE(_col2,0) (type: double), (_col3 - _col4) (type: bigint), COALESCE(ndv_compute_bit_vector(_col5),0) (type: bigint), _col5 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col6,0)) (type: bigint), COALESCE(_col7,0) (type: double), (_col3 - _col8) (type: bigint), COALESCE(ndv_compute_bit_vector(_col9),0) (type: bigint), _col9 (type: binary), _col0 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 617 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 617 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-2
Dependency Collection
Stage: Stage-0
Move Operator
tables:
partition:
part 1
replace: true
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.test_table_out_n0
Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key, value
Column Types: string, string
Table: default.test_table_out_n0
PREHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT a.key, a.value FROM (
SELECT key, cast(count(*) AS STRING) AS value FROM test_table1_n14 GROUP BY key
) a JOIN (
SELECT key, value FROM src
) b
ON (a.value = b.value)
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Input: default@test_table1_n14
PREHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT a.key, a.value FROM (
SELECT key, cast(count(*) AS STRING) AS value FROM test_table1_n14 GROUP BY key
) a JOIN (
SELECT key, value FROM src
) b
ON (a.value = b.value)
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Input: default@test_table1_n14
POSTHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: Lineage: test_table_out_n0 PARTITION(part=1).key SIMPLE [(test_table1_n14)test_table1_n14.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: test_table_out_n0 PARTITION(part=1).value EXPRESSION [(test_table1_n14)test_table1_n14.null, ]
PREHOOK: query: DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1')
PREHOOK: type: DESCTABLE
PREHOOK: Input: default@test_table_out_n0
POSTHOOK: query: DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1')
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@test_table_out_n0
# col_name data_type comment
key string
value string
# Partition Information
# col_name data_type comment
part string
# Detailed Partition Information
Partition Value: [1]
Database: default
Table: test_table_out_n0
#### A masked pattern was here ####
Partition Parameters:
COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles 0
numRows 0
rawDataSize 0
totalSize 0
#### A masked pattern was here ####
# Storage Information
SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
InputFormat: org.apache.hadoop.mapred.TextInputFormat
OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1
PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
PREHOOK: type: QUERY
PREHOOK: Input: default@test_table1_n14
PREHOOK: Input: default@test_table2_n13
PREHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@test_table1_n14
POSTHOOK: Input: default@test_table2_n13
POSTHOOK: Output: default@test_table_out_n0@part=1
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
Stage-0 depends on stages: Stage-2
Stage-3 depends on stages: Stage-0
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: b
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Dummy Store
Map Operator Tree:
TableScan
alias: a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
Merge Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: string)
1 key (type: string)
outputColumnNames: _col0, _col6
Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: string), _col6 (type: string)
outputColumnNames: _col0, _col1
Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.test_table_out_n0
Select Operator
expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: key, value
Statistics: Num rows: 791 Data size: 208033 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: max(length(key)), avg(COALESCE(length(key),0)), count(1), count(key), compute_bit_vector(key, 'hll'), max(length(value)), avg(COALESCE(length(value),0)), count(value), compute_bit_vector(value, 'hll')
keys: '1' (type: string)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
Statistics: Num rows: 1 Data size: 557 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: '1' (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: '1' (type: string)
Statistics: Num rows: 1 Data size: 557 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: int), _col2 (type: struct<count:bigint,sum:double,input:int>), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: binary), _col6 (type: int), _col7 (type: struct<count:bigint,sum:double,input:int>), _col8 (type: bigint), _col9 (type: binary)
Execution mode: llap
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8)
keys: '1' (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
Statistics: Num rows: 1 Data size: 421 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col1,0)) (type: bigint), COALESCE(_col2,0) (type: double), (_col3 - _col4) (type: bigint), COALESCE(ndv_compute_bit_vector(_col5),0) (type: bigint), _col5 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col6,0)) (type: bigint), COALESCE(_col7,0) (type: double), (_col3 - _col8) (type: bigint), COALESCE(ndv_compute_bit_vector(_col9),0) (type: bigint), _col9 (type: binary), '1' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 617 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 617 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-2
Dependency Collection
Stage: Stage-0
Move Operator
tables:
partition:
part 1
replace: true
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.test_table_out_n0
Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key, value
Column Types: string, string
Table: default.test_table_out_n0
PREHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
PREHOOK: type: QUERY
PREHOOK: Input: default@test_table1_n14
PREHOOK: Input: default@test_table2_n13
PREHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@test_table1_n14
POSTHOOK: Input: default@test_table2_n13
POSTHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: Lineage: test_table_out_n0 PARTITION(part=1).key SIMPLE [(test_table1_n14)a.FieldSchema(name:key, type:string, comment:null), ]
POSTHOOK: Lineage: test_table_out_n0 PARTITION(part=1).value SIMPLE [(test_table2_n13)b.FieldSchema(name:value, type:string, comment:null), ]
PREHOOK: query: DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1')
PREHOOK: type: DESCTABLE
PREHOOK: Input: default@test_table_out_n0
POSTHOOK: query: DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1')
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@test_table_out_n0
# col_name data_type comment
key string
value string
# Partition Information
# col_name data_type comment
part string
# Detailed Partition Information
Partition Value: [1]
Database: default
Table: test_table_out_n0
#### A masked pattern was here ####
Partition Parameters:
COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles 2
numRows 1028
rawDataSize 10968
totalSize 11996
#### A masked pattern was here ####
# Storage Information
SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
InputFormat: org.apache.hadoop.mapred.TextInputFormat
OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1
PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
GROUP BY b.value
PREHOOK: type: QUERY
PREHOOK: Input: default@test_table1_n14
PREHOOK: Input: default@test_table2_n13
PREHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
GROUP BY b.value
POSTHOOK: type: QUERY
POSTHOOK: Input: default@test_table1_n14
POSTHOOK: Input: default@test_table2_n13
POSTHOOK: Output: default@test_table_out_n0@part=1
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
Stage-0 depends on stages: Stage-2
Stage-3 depends on stages: Stage-0
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Reducer 2 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: b
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Dummy Store
Map Operator Tree:
TableScan
alias: a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE
Merge Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: string)
1 key (type: string)
outputColumnNames: _col6
Statistics: Num rows: 791 Data size: 71981 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: _col6 (type: string)
minReductionHashAggr: 0.6118837
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 307 Data size: 30393 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: string)
Statistics: Num rows: 307 Data size: 30393 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: llap
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1
Statistics: Num rows: 307 Data size: 30393 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: _col0 (type: string), CAST( _col1 AS STRING) (type: string)
outputColumnNames: _col0, _col1
Statistics: Num rows: 307 Data size: 84425 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 307 Data size: 84425 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.test_table_out_n0
Select Operator
expressions: _col0 (type: string), _col1 (type: string)
outputColumnNames: key, value
Statistics: Num rows: 307 Data size: 110520 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: max(length(key)), avg(COALESCE(length(key),0)), count(1), count(key), compute_bit_vector(key, 'hll'), max(length(value)), avg(COALESCE(length(value),0)), count(value), compute_bit_vector(value, 'hll')
keys: '1' (type: string)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
Statistics: Num rows: 1 Data size: 557 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: '1' (type: string)
null sort order: z
sort order: +
Map-reduce partition columns: '1' (type: string)
Statistics: Num rows: 1 Data size: 557 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: int), _col2 (type: struct<count:bigint,sum:double,input:int>), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: binary), _col6 (type: int), _col7 (type: struct<count:bigint,sum:double,input:int>), _col8 (type: bigint), _col9 (type: binary)
Reducer 3
Execution mode: llap
Reduce Operator Tree:
Group By Operator
aggregations: max(VALUE._col0), avg(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8)
keys: '1' (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
Statistics: Num rows: 1 Data size: 421 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: 'STRING' (type: string), UDFToLong(COALESCE(_col1,0)) (type: bigint), COALESCE(_col2,0) (type: double), (_col3 - _col4) (type: bigint), COALESCE(ndv_compute_bit_vector(_col5),0) (type: bigint), _col5 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col6,0)) (type: bigint), COALESCE(_col7,0) (type: double), (_col3 - _col8) (type: bigint), COALESCE(ndv_compute_bit_vector(_col9),0) (type: bigint), _col9 (type: binary), '1' (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12
Statistics: Num rows: 1 Data size: 617 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 617 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-2
Dependency Collection
Stage: Stage-0
Move Operator
tables:
partition:
part 1
replace: true
table:
input format: org.apache.hadoop.mapred.TextInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
name: default.test_table_out_n0
Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key, value
Column Types: string, string
Table: default.test_table_out_n0
PREHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
GROUP BY b.value
PREHOOK: type: QUERY
PREHOOK: Input: default@test_table1_n14
PREHOOK: Input: default@test_table2_n13
PREHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: query: INSERT OVERWRITE TABLE test_table_out_n0 PARTITION (part = '1')
SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1_n14 a JOIN test_table2_n13 b ON a.key = b.key
GROUP BY b.value
POSTHOOK: type: QUERY
POSTHOOK: Input: default@test_table1_n14
POSTHOOK: Input: default@test_table2_n13
POSTHOOK: Output: default@test_table_out_n0@part=1
POSTHOOK: Lineage: test_table_out_n0 PARTITION(part=1).key SIMPLE [(test_table2_n13)b.FieldSchema(name:value, type:string, comment:null), ]
POSTHOOK: Lineage: test_table_out_n0 PARTITION(part=1).value EXPRESSION [(test_table1_n14)a.null, (test_table2_n13)b.null, ]
PREHOOK: query: DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1')
PREHOOK: type: DESCTABLE
PREHOOK: Input: default@test_table_out_n0
POSTHOOK: query: DESCRIBE FORMATTED test_table_out_n0 PARTITION (part = '1')
POSTHOOK: type: DESCTABLE
POSTHOOK: Input: default@test_table_out_n0
# col_name data_type comment
key string
value string
# Partition Information
# col_name data_type comment
part string
# Detailed Partition Information
Partition Value: [1]
Database: default
Table: test_table_out_n0
#### A masked pattern was here ####
Partition Parameters:
COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
numFiles 2
numRows 309
rawDataSize 2728
totalSize 3037
#### A masked pattern was here ####
# Storage Information
SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
InputFormat: org.apache.hadoop.mapred.TextInputFormat
OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1