blob: ec87c9da39b50439fd39830a36fc7d7a396bea53 [file] [log] [blame]
PREHOOK: query: DROP TABLE orcfile_merge1_n0
PREHOOK: type: DROPTABLE
POSTHOOK: query: DROP TABLE orcfile_merge1_n0
POSTHOOK: type: DROPTABLE
PREHOOK: query: DROP TABLE orcfile_merge1b_n0
PREHOOK: type: DROPTABLE
POSTHOOK: query: DROP TABLE orcfile_merge1b_n0
POSTHOOK: type: DROPTABLE
PREHOOK: query: DROP TABLE orcfile_merge1c_n0
PREHOOK: type: DROPTABLE
POSTHOOK: query: DROP TABLE orcfile_merge1c_n0
POSTHOOK: type: DROPTABLE
PREHOOK: query: CREATE TABLE orcfile_merge1_n0 (key INT, value STRING)
PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orcfile_merge1_n0
POSTHOOK: query: CREATE TABLE orcfile_merge1_n0 (key INT, value STRING)
PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orcfile_merge1_n0
PREHOOK: query: CREATE TABLE orcfile_merge1b_n0 (key INT, value STRING)
PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orcfile_merge1b_n0
POSTHOOK: query: CREATE TABLE orcfile_merge1b_n0 (key INT, value STRING)
PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orcfile_merge1b_n0
PREHOOK: query: CREATE TABLE orcfile_merge1c_n0 (key INT, value STRING)
PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@orcfile_merge1c_n0
POSTHOOK: query: CREATE TABLE orcfile_merge1c_n0 (key INT, value STRING)
PARTITIONED BY (ds STRING, part STRING) STORED AS ORC
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@orcfile_merge1c_n0
PREHOOK: query: EXPLAIN
INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@orcfile_merge1_n0@ds=1
POSTHOOK: query: EXPLAIN
INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
Stage-0 depends on stages: Stage-2
Stage-3 depends on stages: Stage-0
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col2 (type: string)
null sort order: a
sort order: +
Map-reduce partition columns: _col2 (type: string)
Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Select Operator
expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: min(key), max(key), count(1), count(key), compute_bit_vector(key, 'hll'), max(length(value)), avg(COALESCE(length(value),0)), count(value), compute_bit_vector(value, 'hll')
keys: ds (type: string), part (type: string)
minReductionHashAggr: 0.5
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
Statistics: Num rows: 250 Data size: 167250 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
null sort order: zz
sort order: ++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Statistics: Num rows: 250 Data size: 167250 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: binary), _col7 (type: int), _col8 (type: struct<count:bigint,sum:double,input:int>), _col9 (type: bigint), _col10 (type: binary)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Select Operator
expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1_n0
Reducer 3
Execution mode: llap
Reduce Operator Tree:
Group By Operator
aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8)
keys: KEY._col0 (type: string), KEY._col1 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
Statistics: Num rows: 250 Data size: 150250 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: 'LONG' (type: string), UDFToLong(_col2) (type: bigint), UDFToLong(_col3) (type: bigint), (_col4 - _col5) (type: bigint), COALESCE(ndv_compute_bit_vector(_col6),0) (type: bigint), _col6 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col7,0)) (type: bigint), COALESCE(_col8,0) (type: double), (_col4 - _col9) (type: bigint), COALESCE(ndv_compute_bit_vector(_col10),0) (type: bigint), _col10 (type: binary), _col0 (type: string), _col1 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
Statistics: Num rows: 250 Data size: 199750 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 250 Data size: 199750 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-2
Dependency Collection
Stage: Stage-0
Move Operator
tables:
partition:
ds 1
part
replace: true
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1_n0
Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key, value
Column Types: int, string
Table: default.orcfile_merge1_n0
PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@orcfile_merge1_n0@ds=1
POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@orcfile_merge1_n0@ds=1/part=0
POSTHOOK: Output: default@orcfile_merge1_n0@ds=1/part=1
POSTHOOK: Lineage: orcfile_merge1_n0 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: orcfile_merge1_n0 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: orcfile_merge1_n0 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: orcfile_merge1_n0 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
Found 1 items
#### A masked pattern was here ####
PREHOOK: query: EXPLAIN
INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@orcfile_merge1b_n0@ds=1
POSTHOOK: query: EXPLAIN
INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
Stage-5
Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
Stage-0 depends on stages: Stage-2
Stage-3 depends on stages: Stage-0
Stage-4
Stage-6
Stage-7 depends on stages: Stage-6
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col2 (type: string)
null sort order: a
sort order: +
Map-reduce partition columns: _col2 (type: string)
Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Select Operator
expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: min(key), max(key), count(1), count(key), compute_bit_vector(key, 'hll'), max(length(value)), avg(COALESCE(length(value),0)), count(value), compute_bit_vector(value, 'hll')
keys: ds (type: string), part (type: string)
minReductionHashAggr: 0.5
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
Statistics: Num rows: 250 Data size: 167250 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
null sort order: zz
sort order: ++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Statistics: Num rows: 250 Data size: 167250 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: binary), _col7 (type: int), _col8 (type: struct<count:bigint,sum:double,input:int>), _col9 (type: bigint), _col10 (type: binary)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Select Operator
expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1b_n0
Reducer 3
Execution mode: llap
Reduce Operator Tree:
Group By Operator
aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8)
keys: KEY._col0 (type: string), KEY._col1 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
Statistics: Num rows: 250 Data size: 150250 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: 'LONG' (type: string), UDFToLong(_col2) (type: bigint), UDFToLong(_col3) (type: bigint), (_col4 - _col5) (type: bigint), COALESCE(ndv_compute_bit_vector(_col6),0) (type: bigint), _col6 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col7,0)) (type: bigint), COALESCE(_col8,0) (type: double), (_col4 - _col9) (type: bigint), COALESCE(ndv_compute_bit_vector(_col10),0) (type: bigint), _col10 (type: binary), _col0 (type: string), _col1 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
Statistics: Num rows: 250 Data size: 199750 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 250 Data size: 199750 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-8
Conditional Operator
Stage: Stage-5
Move Operator
files:
hdfs directory: true
#### A masked pattern was here ####
Stage: Stage-2
Dependency Collection
Stage: Stage-0
Move Operator
tables:
partition:
ds 1
part
replace: true
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1b_n0
Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key, value
Column Types: int, string
Table: default.orcfile_merge1b_n0
Stage: Stage-4
Tez
#### A masked pattern was here ####
Vertices:
File Merge
Map Operator Tree:
TableScan
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1b_n0
Stage: Stage-6
Tez
#### A masked pattern was here ####
Vertices:
File Merge
Map Operator Tree:
TableScan
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1b_n0
Stage: Stage-7
Move Operator
files:
hdfs directory: true
#### A masked pattern was here ####
PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@orcfile_merge1b_n0@ds=1
POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1b_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@orcfile_merge1b_n0@ds=1/part=0
POSTHOOK: Output: default@orcfile_merge1b_n0@ds=1/part=1
POSTHOOK: Lineage: orcfile_merge1b_n0 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: orcfile_merge1b_n0 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: orcfile_merge1b_n0 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: orcfile_merge1b_n0 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
Found 1 items
#### A masked pattern was here ####
PREHOOK: query: EXPLAIN
INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@orcfile_merge1c_n0@ds=1
POSTHOOK: query: EXPLAIN
INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-8 depends on stages: Stage-1 , consists of Stage-5, Stage-4, Stage-6
Stage-5
Stage-2 depends on stages: Stage-5, Stage-4, Stage-7
Stage-0 depends on stages: Stage-2
Stage-3 depends on stages: Stage-0
Stage-4
Stage-6
Stage-7 depends on stages: Stage-6
STAGE PLANS:
Stage: Stage-1
Tez
#### A masked pattern was here ####
Edges:
Reducer 2 <- Map 1 (SIMPLE_EDGE)
Reducer 3 <- Map 1 (SIMPLE_EDGE)
#### A masked pattern was here ####
Vertices:
Map 1
Map Operator Tree:
TableScan
alias: src
Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: UDFToInteger(key) (type: int), value (type: string), CAST( (hash(key) pmod 2) AS STRING) (type: string)
outputColumnNames: _col0, _col1, _col2
Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col2 (type: string)
null sort order: a
sort order: +
Map-reduce partition columns: _col2 (type: string)
Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: int), _col1 (type: string)
Select Operator
expressions: _col0 (type: int), _col1 (type: string), '1' (type: string), _col2 (type: string)
outputColumnNames: key, value, ds, part
Statistics: Num rows: 500 Data size: 182000 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: min(key), max(key), count(1), count(key), compute_bit_vector(key, 'hll'), max(length(value)), avg(COALESCE(length(value),0)), count(value), compute_bit_vector(value, 'hll')
keys: ds (type: string), part (type: string)
minReductionHashAggr: 0.5
mode: hash
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
Statistics: Num rows: 250 Data size: 167250 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: string), _col1 (type: string)
null sort order: zz
sort order: ++
Map-reduce partition columns: _col0 (type: string), _col1 (type: string)
Statistics: Num rows: 250 Data size: 167250 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: binary), _col7 (type: int), _col8 (type: struct<count:bigint,sum:double,input:int>), _col9 (type: bigint), _col10 (type: binary)
Execution mode: llap
LLAP IO: all inputs
Reducer 2
Execution mode: llap
Reduce Operator Tree:
Select Operator
expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), KEY._col2 (type: string)
outputColumnNames: _col0, _col1, _col2
File Output Operator
compressed: false
Dp Sort State: PARTITION_SORTED
Statistics: Num rows: 500 Data size: 139500 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1c_n0
Reducer 3
Execution mode: llap
Reduce Operator Tree:
Group By Operator
aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector(VALUE._col4), max(VALUE._col5), avg(VALUE._col6), count(VALUE._col7), compute_bit_vector(VALUE._col8)
keys: KEY._col0 (type: string), KEY._col1 (type: string)
mode: mergepartial
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10
Statistics: Num rows: 250 Data size: 150250 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: 'LONG' (type: string), UDFToLong(_col2) (type: bigint), UDFToLong(_col3) (type: bigint), (_col4 - _col5) (type: bigint), COALESCE(ndv_compute_bit_vector(_col6),0) (type: bigint), _col6 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col7,0)) (type: bigint), COALESCE(_col8,0) (type: double), (_col4 - _col9) (type: bigint), COALESCE(ndv_compute_bit_vector(_col10),0) (type: bigint), _col10 (type: binary), _col0 (type: string), _col1 (type: string)
outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
Statistics: Num rows: 250 Data size: 199750 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 250 Data size: 199750 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-8
Conditional Operator
Stage: Stage-5
Move Operator
files:
hdfs directory: true
#### A masked pattern was here ####
Stage: Stage-2
Dependency Collection
Stage: Stage-0
Move Operator
tables:
partition:
ds 1
part
replace: true
table:
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
name: default.orcfile_merge1c_n0
Stage: Stage-3
Stats Work
Basic Stats Work:
Column Stats Desc:
Columns: key, value
Column Types: int, string
Table: default.orcfile_merge1c_n0
Stage: Stage-4
Tez
#### A masked pattern was here ####
Vertices:
File Merge
Merge File Operator
Map Operator Tree:
ORC File Merge Operator
merge level: stripe
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
Stage: Stage-6
Tez
#### A masked pattern was here ####
Vertices:
File Merge
Merge File Operator
Map Operator Tree:
ORC File Merge Operator
merge level: stripe
input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
Stage: Stage-7
Move Operator
files:
hdfs directory: true
#### A masked pattern was here ####
PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@orcfile_merge1c_n0@ds=1
POSTHOOK: query: INSERT OVERWRITE TABLE orcfile_merge1c_n0 PARTITION (ds='1', part)
SELECT key, value, PMOD(HASH(key), 2) as part
FROM src
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@orcfile_merge1c_n0@ds=1/part=0
POSTHOOK: Output: default@orcfile_merge1c_n0@ds=1/part=1
POSTHOOK: Lineage: orcfile_merge1c_n0 PARTITION(ds=1,part=0).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: orcfile_merge1c_n0 PARTITION(ds=1,part=0).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
POSTHOOK: Lineage: orcfile_merge1c_n0 PARTITION(ds=1,part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: orcfile_merge1c_n0 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
Found 1 items
#### A masked pattern was here ####
PREHOOK: query: SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
FROM orcfile_merge1_n0 WHERE ds='1'
) t
PREHOOK: type: QUERY
PREHOOK: Input: default@orcfile_merge1_n0
PREHOOK: Input: default@orcfile_merge1_n0@ds=1/part=0
PREHOOK: Input: default@orcfile_merge1_n0@ds=1/part=1
PREHOOK: Output: hdfs://### HDFS PATH ###
POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
FROM orcfile_merge1_n0 WHERE ds='1'
) t
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orcfile_merge1_n0
POSTHOOK: Input: default@orcfile_merge1_n0@ds=1/part=0
POSTHOOK: Input: default@orcfile_merge1_n0@ds=1/part=1
POSTHOOK: Output: hdfs://### HDFS PATH ###
-21975308766
PREHOOK: query: SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
FROM orcfile_merge1b_n0 WHERE ds='1'
) t
PREHOOK: type: QUERY
PREHOOK: Input: default@orcfile_merge1b_n0
PREHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=0
PREHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=1
PREHOOK: Output: hdfs://### HDFS PATH ###
POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
FROM orcfile_merge1b_n0 WHERE ds='1'
) t
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orcfile_merge1b_n0
POSTHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=0
POSTHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=1
POSTHOOK: Output: hdfs://### HDFS PATH ###
-21975308766
PREHOOK: query: SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
FROM orcfile_merge1c_n0 WHERE ds='1'
) t
PREHOOK: type: QUERY
PREHOOK: Input: default@orcfile_merge1c_n0
PREHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=0
PREHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=1
PREHOOK: Output: hdfs://### HDFS PATH ###
POSTHOOK: query: SELECT SUM(HASH(c)) FROM (
SELECT TRANSFORM(*) USING 'tr \t _' AS (c)
FROM orcfile_merge1c_n0 WHERE ds='1'
) t
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orcfile_merge1c_n0
POSTHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=0
POSTHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=1
POSTHOOK: Output: hdfs://### HDFS PATH ###
-21975308766
PREHOOK: query: select count(*) from orcfile_merge1_n0
PREHOOK: type: QUERY
PREHOOK: Input: default@orcfile_merge1_n0
PREHOOK: Input: default@orcfile_merge1_n0@ds=1/part=0
PREHOOK: Input: default@orcfile_merge1_n0@ds=1/part=1
PREHOOK: Output: hdfs://### HDFS PATH ###
POSTHOOK: query: select count(*) from orcfile_merge1_n0
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orcfile_merge1_n0
POSTHOOK: Input: default@orcfile_merge1_n0@ds=1/part=0
POSTHOOK: Input: default@orcfile_merge1_n0@ds=1/part=1
POSTHOOK: Output: hdfs://### HDFS PATH ###
500
PREHOOK: query: select count(*) from orcfile_merge1b_n0
PREHOOK: type: QUERY
PREHOOK: Input: default@orcfile_merge1b_n0
PREHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=0
PREHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=1
PREHOOK: Output: hdfs://### HDFS PATH ###
POSTHOOK: query: select count(*) from orcfile_merge1b_n0
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orcfile_merge1b_n0
POSTHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=0
POSTHOOK: Input: default@orcfile_merge1b_n0@ds=1/part=1
POSTHOOK: Output: hdfs://### HDFS PATH ###
500
PREHOOK: query: select count(*) from orcfile_merge1c_n0
PREHOOK: type: QUERY
PREHOOK: Input: default@orcfile_merge1c_n0
PREHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=0
PREHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=1
PREHOOK: Output: hdfs://### HDFS PATH ###
POSTHOOK: query: select count(*) from orcfile_merge1c_n0
POSTHOOK: type: QUERY
POSTHOOK: Input: default@orcfile_merge1c_n0
POSTHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=0
POSTHOOK: Input: default@orcfile_merge1c_n0@ds=1/part=1
POSTHOOK: Output: hdfs://### HDFS PATH ###
500
PREHOOK: query: DROP TABLE orcfile_merge1_n0
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@orcfile_merge1_n0
PREHOOK: Output: default@orcfile_merge1_n0
POSTHOOK: query: DROP TABLE orcfile_merge1_n0
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@orcfile_merge1_n0
POSTHOOK: Output: default@orcfile_merge1_n0
PREHOOK: query: DROP TABLE orcfile_merge1b_n0
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@orcfile_merge1b_n0
PREHOOK: Output: default@orcfile_merge1b_n0
POSTHOOK: query: DROP TABLE orcfile_merge1b_n0
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@orcfile_merge1b_n0
POSTHOOK: Output: default@orcfile_merge1b_n0
PREHOOK: query: DROP TABLE orcfile_merge1c_n0
PREHOOK: type: DROPTABLE
PREHOOK: Input: default@orcfile_merge1c_n0
PREHOOK: Output: default@orcfile_merge1c_n0
POSTHOOK: query: DROP TABLE orcfile_merge1c_n0
POSTHOOK: type: DROPTABLE
POSTHOOK: Input: default@orcfile_merge1c_n0
POSTHOOK: Output: default@orcfile_merge1c_n0