blob: 5bdf0edc2b44f403c5c714e09c09689ed3cceb0f [file] [log] [blame]
PREHOOK: query: CREATE TABLE tbl1_n13(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@tbl1_n13
POSTHOOK: query: CREATE TABLE tbl1_n13(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl1_n13
PREHOOK: query: CREATE TABLE tbl2_n12(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
PREHOOK: type: CREATETABLE
PREHOOK: Output: database:default
PREHOOK: Output: default@tbl2_n12
POSTHOOK: query: CREATE TABLE tbl2_n12(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS
POSTHOOK: type: CREATETABLE
POSTHOOK: Output: database:default
POSTHOOK: Output: default@tbl2_n12
PREHOOK: query: insert overwrite table tbl1_n13
select * from src where key < 10
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@tbl1_n13
POSTHOOK: query: insert overwrite table tbl1_n13
select * from src where key < 10
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@tbl1_n13
POSTHOOK: Lineage: tbl1_n13.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: tbl1_n13.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
PREHOOK: query: insert overwrite table tbl2_n12
select * from src where key < 10
PREHOOK: type: QUERY
PREHOOK: Input: default@src
PREHOOK: Output: default@tbl2_n12
POSTHOOK: query: insert overwrite table tbl2_n12
select * from src where key < 10
POSTHOOK: type: QUERY
POSTHOOK: Input: default@src
POSTHOOK: Output: default@tbl2_n12
POSTHOOK: Lineage: tbl2_n12.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
POSTHOOK: Lineage: tbl2_n12.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
PREHOOK: query: analyze table tbl1_n13 compute statistics
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Output: default@tbl1_n13
POSTHOOK: query: analyze table tbl1_n13 compute statistics
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Output: default@tbl1_n13
PREHOOK: query: analyze table tbl1_n13 compute statistics for columns
PREHOOK: type: ANALYZE_TABLE
PREHOOK: Input: default@tbl1_n13
PREHOOK: Output: default@tbl1_n13
#### A masked pattern was here ####
POSTHOOK: query: analyze table tbl1_n13 compute statistics for columns
POSTHOOK: type: ANALYZE_TABLE
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Output: default@tbl1_n13
#### A masked pattern was here ####
PREHOOK: query: analyze table tbl2_n12 compute statistics
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl2_n12
PREHOOK: Output: default@tbl2_n12
POSTHOOK: query: analyze table tbl2_n12 compute statistics
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl2_n12
POSTHOOK: Output: default@tbl2_n12
PREHOOK: query: analyze table tbl2_n12 compute statistics for columns
PREHOOK: type: ANALYZE_TABLE
PREHOOK: Input: default@tbl2_n12
PREHOOK: Output: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: analyze table tbl2_n12 compute statistics for columns
POSTHOOK: type: ANALYZE_TABLE
POSTHOOK: Input: default@tbl2_n12
POSTHOOK: Output: default@tbl2_n12
#### A masked pattern was here ####
PREHOOK: query: explain
select count(*) from (
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from (
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1, Stage-4
Stage-3 depends on stages: Stage-2
Stage-4 is a root stage
Stage-0 depends on stages: Stage-3
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: subq1:a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
bucketGroup: true
keys: key (type: int)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: key, $f1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-2
Map Reduce
Map Operator Tree:
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: $f1 (type: bigint)
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: $f1 (type: bigint)
Reduce Operator Tree:
Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
outputColumnNames: $f1, $f10
Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: ($f1 * $f10) (type: bigint)
outputColumnNames: $f4
Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: $sum0($f4)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-3
Map Reduce
Map Operator Tree:
TableScan
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: $sum0(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-4
Map Reduce
Map Operator Tree:
TableScan
alias: subq1:b
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
bucketGroup: true
keys: key (type: int)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: key, $f1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from (
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from (
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
22
PREHOOK: query: explain
select count(*) from
(
select key, count(*) from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1
group by key
) subq2
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from
(
select key, count(*) from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1
group by key
) subq2
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1
Stage-3 depends on stages: Stage-2
Stage-0 depends on stages: Stage-3
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: subq2:subq1:a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
bucketGroup: true
keys: key (type: int)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: key
Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-2
Map Reduce
Map Operator Tree:
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
TableScan
alias: subq2:subq1:b
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Operator Tree:
Join Operator
condition map:
Left Semi Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-3
Map Reduce
Map Operator Tree:
TableScan
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from
(
select key, count(*) from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1
group by key
) subq2
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from
(
select key, count(*) from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1
group by key
) subq2
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
6
PREHOOK: query: explain
select src1.key, src1.cnt1, src2.cnt1 from
(
select key, count(*) as cnt1 from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1 group by key
) src1
join
(
select key, count(*) as cnt1 from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq2 group by key
) src2
on src1.key = src2.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select src1.key, src1.cnt1, src2.cnt1 from
(
select key, count(*) as cnt1 from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1 group by key
) src1
join
(
select key, count(*) as cnt1 from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq2 group by key
) src2
on src1.key = src2.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1, Stage-3, Stage-5
Stage-3 is a root stage
Stage-4 is a root stage
Stage-5 depends on stages: Stage-4, Stage-6
Stage-6 is a root stage
Stage-0 depends on stages: Stage-2
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: src2:subq2:a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
bucketGroup: true
keys: key (type: int)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: key, $f1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-2
Map Reduce
Map Operator Tree:
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: $f1 (type: bigint)
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: $f1 (type: bigint)
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: $f1 (type: bigint), $f10 (type: bigint)
Reduce Operator Tree:
Join Operator
condition map:
Inner Join 0 to 1
Inner Join 0 to 2
keys:
0 key (type: int)
1 key (type: int)
2 key (type: int)
outputColumnNames: key, $f1, $f10, $f11, $f100
Statistics: Num rows: 6 Data size: 216 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int), ($f11 * $f100) (type: bigint), ($f1 * $f10) (type: bigint)
outputColumnNames: key, cnt1, cnt11
Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-3
Map Reduce
Map Operator Tree:
TableScan
alias: src2:subq2:b
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
bucketGroup: true
keys: key (type: int)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: key, $f1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-4
Map Reduce
Map Operator Tree:
TableScan
alias: src1:subq1:a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
bucketGroup: true
keys: key (type: int)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: key, $f1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-5
Map Reduce
Map Operator Tree:
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: $f1 (type: bigint)
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: $f1 (type: bigint)
Reduce Operator Tree:
Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
outputColumnNames: key, $f1, $f10
Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-6
Map Reduce
Map Operator Tree:
TableScan
alias: src1:subq1:b
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
bucketGroup: true
keys: key (type: int)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: key, $f1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from
(
select key, count(*) as cnt1 from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1 group by key
) src1
join
(
select key, count(*) as cnt1 from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq2 group by key
) src2
on src1.key = src2.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from
(
select key, count(*) as cnt1 from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq1 group by key
) src1
join
(
select key, count(*) as cnt1 from
(
select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
) subq2 group by key
) src2
on src1.key = src2.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
0 9 9
2 1 1
4 1 1
5 9 9
8 1 1
9 1 1
PREHOOK: query: explain
select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
on subq1.key = subq2.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
on subq1.key = subq2.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: subq1:a
filterExpr: (key < 6) (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (key < 6) (type: boolean)
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Select Operator
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
on subq1.key = subq2.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
on subq1.key = subq2.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
20
PREHOOK: query: explain
select count(*) from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join tbl2_n12 b
on subq2.key = b.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join tbl2_n12 b
on subq2.key = b.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: subq2:subq1:a
filterExpr: (key < 6) (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (key < 6) (type: boolean)
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Select Operator
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join tbl2_n12 b
on subq2.key = b.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join tbl2_n12 b
on subq2.key = b.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
20
PREHOOK: query: explain
select count(*) from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq3
where key < 6
) subq4
on subq2.key = subq4.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq3
where key < 6
) subq4
on subq2.key = subq4.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: subq2:subq1:a
filterExpr: (key < 6) (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (key < 6) (type: boolean)
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Select Operator
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq3
where key < 6
) subq4
on subq2.key = subq4.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq3
where key < 6
) subq4
on subq2.key = subq4.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
#### A masked pattern was here ####
20
PREHOOK: query: explain
select count(*) from
(select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1
join
(select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2
on subq1.key = subq2.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from
(select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1
join
(select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2
on subq1.key = subq2.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: subq1:a
filterExpr: (key < 8) (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (key < 8) (type: boolean)
Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
Statistics: Num rows: 9 Data size: 39 Basic stats: COMPLETE Column stats: NONE
Select Operator
Statistics: Num rows: 9 Data size: 39 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from
(select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1
join
(select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2
on subq1.key = subq2.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from
(select a.key as key, concat(a.value, a.value) as value from tbl1_n13 a where key < 8) subq1
join
(select a.key as key, concat(a.value, a.value) as value from tbl2_n12 a where key < 8) subq2
on subq1.key = subq2.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
20
PREHOOK: query: explain
select count(*) from
(select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1
join
(select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2
on subq1.key = subq2.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from
(select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1
join
(select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2
on subq1.key = subq2.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-2 depends on stages: Stage-1, Stage-4
Stage-3 depends on stages: Stage-2
Stage-4 is a root stage
Stage-0 depends on stages: Stage-3
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: subq1:a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: (key + 1) (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: key (type: int)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: key, $f1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-2
Map Reduce
Map Operator Tree:
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: $f1 (type: bigint)
TableScan
Reduce Output Operator
key expressions: key (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: key (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: $f1 (type: bigint)
Reduce Operator Tree:
Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
outputColumnNames: $f1, $f10
Statistics: Num rows: 6 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: ($f1 * $f10) (type: bigint)
outputColumnNames: $f4
Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: $sum0($f4)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-3
Map Reduce
Map Operator Tree:
TableScan
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col0 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: $sum0(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-4
Map Reduce
Map Operator Tree:
TableScan
alias: subq2:a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: (key + 1) (type: int)
outputColumnNames: key
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Group By Operator
aggregations: count()
keys: key (type: int)
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0, _col1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
Reduce Output Operator
key expressions: _col0 (type: int)
null sort order: z
sort order: +
Map-reduce partition columns: _col0 (type: int)
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
value expressions: _col1 (type: bigint)
Execution mode: vectorized
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
keys: KEY._col0 (type: int)
mode: mergepartial
outputColumnNames: key, $f1
Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE
File Output Operator
compressed: false
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from
(select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1
join
(select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2
on subq1.key = subq2.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from
(select a.key +1 as key, concat(a.value, a.value) as value from tbl1_n13 a) subq1
join
(select a.key +1 as key, concat(a.value, a.value) as value from tbl2_n12 a) subq2
on subq1.key = subq2.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
22
PREHOOK: query: explain
select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join tbl2_n12 a on subq1.key = a.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join tbl2_n12 a on subq1.key = a.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: subq1:a
filterExpr: (key < 6) (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (key < 6) (type: boolean)
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Select Operator
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join tbl2_n12 a on subq1.key = a.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join tbl2_n12 a on subq1.key = a.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
20
PREHOOK: query: explain
select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
on (subq1.key = subq2.key)
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3
on (subq1.key = subq3.key)
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
on (subq1.key = subq2.key)
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3
on (subq1.key = subq3.key)
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: subq1:a
filterExpr: (key < 6) (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (key < 6) (type: boolean)
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
Inner Join 0 to 2
keys:
0 key (type: int)
1 key (type: int)
2 key (type: int)
Statistics: Num rows: 15 Data size: 61 Basic stats: COMPLETE Column stats: NONE
Select Operator
Statistics: Num rows: 15 Data size: 61 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
on subq1.key = subq2.key
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3
on (subq1.key = subq3.key)
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from
(select a.key as key, a.value as value from tbl1_n13 a where key < 6) subq1
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq2
on subq1.key = subq2.key
join
(select a.key as key, a.value as value from tbl2_n12 a where key < 6) subq3
on (subq1.key = subq3.key)
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
56
PREHOOK: query: explain
select count(*) from (
select subq2.key as key, subq2.value as value1, b.value as value2 from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join tbl2_n12 b
on subq2.key = b.key) a
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain
select count(*) from (
select subq2.key as key, subq2.value as value1, b.value as value2 from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join tbl2_n12 b
on subq2.key = b.key) a
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: a:subq2:subq1:a
filterExpr: (key < 6) (type: boolean)
Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: (key < 6) (type: boolean)
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int)
outputColumnNames: key
Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Select Operator
Statistics: Num rows: 7 Data size: 30 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
minReductionHashAggr: 0.99
mode: hash
outputColumnNames: _col0
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
null sort order:
sort order:
Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE
value expressions: _col0 (type: bigint)
Reduce Operator Tree:
Group By Operator
aggregations: count(VALUE._col0)
mode: mergepartial
outputColumnNames: $f0
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select count(*) from (
select subq2.key as key, subq2.value as value1, b.value as value2 from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join tbl2_n12 b
on subq2.key = b.key) a
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select count(*) from (
select subq2.key as key, subq2.value as value1, b.value as value2 from
(
select * from
(
select a.key as key, a.value as value from tbl1_n13 a where key < 8
) subq1
where key < 6
) subq2
join tbl2_n12 b
on subq2.key = b.key) a
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
20
PREHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int), value (type: string)
outputColumnNames: key, value
Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
outputColumnNames: key, value, value0
Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: int), value (type: string), value0 (type: string)
outputColumnNames: key, val1, val2
Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
2 val_2 val_2
4 val_4 val_4
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
8 val_8 val_8
9 val_9 val_9
PREHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
STAGE DEPENDENCIES:
Stage-1 is a root stage
Stage-0 depends on stages: Stage-1
STAGE PLANS:
Stage: Stage-1
Map Reduce
Map Operator Tree:
TableScan
alias: a
filterExpr: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
Filter Operator
predicate: key is not null (type: boolean)
Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
Select Operator
expressions: key (type: int), value (type: string)
outputColumnNames: key, value
Statistics: Num rows: 10 Data size: 930 Basic stats: COMPLETE Column stats: COMPLETE
Sorted Merge Bucket Map Join Operator
condition map:
Inner Join 0 to 1
keys:
0 key (type: int)
1 key (type: int)
outputColumnNames: key, value, value0
Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: key (type: int), value (type: string), value0 (type: string)
outputColumnNames: key, val1, val2
Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
Statistics: Num rows: 11 Data size: 1023 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
Stage: Stage-0
Fetch Operator
limit: -1
Processor Tree:
ListSink
PREHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
PREHOOK: type: QUERY
PREHOOK: Input: default@tbl1_n13
PREHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
POSTHOOK: query: select a.key as key, a.value as val1, b.value as val2 from tbl1_n13 a join tbl2_n12 b on a.key = b.key
POSTHOOK: type: QUERY
POSTHOOK: Input: default@tbl1_n13
POSTHOOK: Input: default@tbl2_n12
#### A masked pattern was here ####
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
0 val_0 val_0
2 val_2 val_2
4 val_4 val_4
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
5 val_5 val_5
8 val_8 val_8
9 val_9 val_9