blob: a1f045f7f2d60a303eb82ebad094b78985277030 [file] [log] [blame]
# Basic test with a single with-clause view.
with t as (select int_col x, bigint_col y from functional.alltypes) select x, y from t
---- PLAN
PLAN-ROOT SINK
|
00:SCAN HDFS [functional.alltypes]
partitions=24/24 files=24 size=478.45KB
row-size=12B cardinality=7.30K
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
01:EXCHANGE [UNPARTITIONED]
|
00:SCAN HDFS [functional.alltypes]
partitions=24/24 files=24 size=478.45KB
row-size=12B cardinality=7.30K
====
# Basic test with a single with-clause view that references a catalog view.
with t as (select int_col x, bigint_col y from functional.alltypes_view)
select x, y from t
---- PLAN
PLAN-ROOT SINK
|
00:SCAN HDFS [functional.alltypes]
partitions=24/24 files=24 size=478.45KB
row-size=12B cardinality=7.30K
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
01:EXCHANGE [UNPARTITIONED]
|
00:SCAN HDFS [functional.alltypes]
partitions=24/24 files=24 size=478.45KB
row-size=12B cardinality=7.30K
====
# Multiple views in with-clause. Only one view is used.
with t1 as (select int_col x, bigint_col y from functional.alltypes),
t2 as (select 1 x , 10 y), t3 as (values(2 x , 20 y), (3, 30))
select x, y from t2
---- PLAN
PLAN-ROOT SINK
|
00:UNION
constant-operands=1
row-size=2B cardinality=1
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
00:UNION
constant-operands=1
row-size=2B cardinality=1
====
# Multiple views in with-clause. All views are used in a union.
with t1 as (select int_col x, bigint_col y from functional.alltypes),
t2 as (select 1 x , 10 y), t3 as (values(2 x , 20 y), (3, 30))
select * from t1 union all select * from t2 union all select * from t3
---- PLAN
PLAN-ROOT SINK
|
00:UNION
| row-size=12B cardinality=7.30K
|
|--03:UNION
| constant-operands=2
| row-size=2B cardinality=2
|
|--02:UNION
| constant-operands=1
| row-size=2B cardinality=1
|
01:SCAN HDFS [functional.alltypes]
partitions=24/24 files=24 size=478.45KB
row-size=12B cardinality=7.30K
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
06:EXCHANGE [UNPARTITIONED]
|
00:UNION
| row-size=12B cardinality=7.30K
|
|--05:EXCHANGE [RANDOM]
| |
| 03:UNION
| constant-operands=2
| row-size=2B cardinality=2
|
|--04:EXCHANGE [RANDOM]
| |
| 02:UNION
| constant-operands=1
| row-size=2B cardinality=1
|
01:SCAN HDFS [functional.alltypes]
partitions=24/24 files=24 size=478.45KB
row-size=12B cardinality=7.30K
====
# Multiple views in with-clause. All views are used in a join.
with t1 as (select int_col x, bigint_col y from functional.alltypes),
t2 as (select int_col x, bigint_col y from functional.alltypestiny),
t3 as (select int_col x, bigint_col y from functional.alltypessmall)
select * from t1, t2, t3 where t1.x = t2.x and t2.x = t3.x
---- PLAN
PLAN-ROOT SINK
|
04:HASH JOIN [INNER JOIN]
| hash predicates: int_col = int_col
| runtime filters: RF000 <- int_col
| row-size=36B cardinality=58.40K
|
|--02:SCAN HDFS [functional.alltypessmall]
| partitions=4/4 files=4 size=6.32KB
| row-size=12B cardinality=100
|
03:HASH JOIN [INNER JOIN]
| hash predicates: int_col = int_col
| runtime filters: RF002 <- int_col
| row-size=24B cardinality=5.84K
|
|--01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| runtime filters: RF000 -> int_col
| row-size=12B cardinality=8
|
00:SCAN HDFS [functional.alltypes]
partitions=24/24 files=24 size=478.45KB
runtime filters: RF000 -> functional.alltypes.int_col, RF002 -> int_col
row-size=12B cardinality=7.30K
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
07:EXCHANGE [UNPARTITIONED]
|
04:HASH JOIN [INNER JOIN, BROADCAST]
| hash predicates: int_col = int_col
| runtime filters: RF000 <- int_col
| row-size=36B cardinality=58.40K
|
|--06:EXCHANGE [BROADCAST]
| |
| 02:SCAN HDFS [functional.alltypessmall]
| partitions=4/4 files=4 size=6.32KB
| row-size=12B cardinality=100
|
03:HASH JOIN [INNER JOIN, BROADCAST]
| hash predicates: int_col = int_col
| runtime filters: RF002 <- int_col
| row-size=24B cardinality=5.84K
|
|--05:EXCHANGE [BROADCAST]
| |
| 01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| runtime filters: RF000 -> int_col
| row-size=12B cardinality=8
|
00:SCAN HDFS [functional.alltypes]
partitions=24/24 files=24 size=478.45KB
runtime filters: RF000 -> functional.alltypes.int_col, RF002 -> int_col
row-size=12B cardinality=7.30K
====
# Multiple dependent views in with-clause
with t1 as (
select int_col c1, tinyint_col c2, max(id) c3
from functional.alltypessmall
group by 1, 2
order by 1,2
limit 5),
t2 as (select c1, c2, c3 from t1),
t3 as (
select c1, c3, max(c2) m2
from t2
group by c1, c3
limit 10),
t4 as (select c1, c3, m2 from t3)
select * from t4
where c1 > 0
order by c3, c1 desc
limit 3
---- PLAN
PLAN-ROOT SINK
|
05:TOP-N [LIMIT=3]
| order by: c3 ASC, c1 DESC
| row-size=9B cardinality=1
|
04:SELECT
| predicates: c1 > 0
| row-size=9B cardinality=1
|
03:AGGREGATE [FINALIZE]
| output: max(tinyint_col)
| group by: int_col, max(id)
| limit: 10
| row-size=9B cardinality=5
|
02:TOP-N [LIMIT=5]
| order by: int_col ASC, tinyint_col ASC
| row-size=9B cardinality=5
|
01:AGGREGATE [FINALIZE]
| output: max(id)
| group by: int_col, tinyint_col
| row-size=9B cardinality=100
|
00:SCAN HDFS [functional.alltypessmall]
partitions=4/4 files=4 size=6.32KB
row-size=9B cardinality=100
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
05:TOP-N [LIMIT=3]
| order by: c3 ASC, c1 DESC
| row-size=9B cardinality=1
|
04:SELECT
| predicates: c1 > 0
| row-size=9B cardinality=1
|
03:AGGREGATE [FINALIZE]
| output: max(tinyint_col)
| group by: int_col, max(id)
| limit: 10
| row-size=9B cardinality=5
|
08:MERGING-EXCHANGE [UNPARTITIONED]
| order by: int_col ASC, tinyint_col ASC
| limit: 5
|
02:TOP-N [LIMIT=5]
| order by: int_col ASC, tinyint_col ASC
| row-size=9B cardinality=5
|
07:AGGREGATE [FINALIZE]
| output: max:merge(id)
| group by: int_col, tinyint_col
| row-size=9B cardinality=100
|
06:EXCHANGE [HASH(int_col,tinyint_col)]
|
01:AGGREGATE [STREAMING]
| output: max(id)
| group by: int_col, tinyint_col
| row-size=9B cardinality=100
|
00:SCAN HDFS [functional.alltypessmall]
partitions=4/4 files=4 size=6.32KB
row-size=9B cardinality=100
====
# Self-join of with-clause table to make sure the on clause is properly set
# in the cloned inline-view instances.
with t as (select int_col x, bigint_col y from functional.alltypestiny)
select * from t t1 inner join t t2 on (t1.x = t2.x) inner join t t3 on (t2.x = t3.x)
---- PLAN
PLAN-ROOT SINK
|
04:HASH JOIN [INNER JOIN]
| hash predicates: int_col = int_col
| runtime filters: RF000 <- int_col
| row-size=36B cardinality=128
|
|--02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
03:HASH JOIN [INNER JOIN]
| hash predicates: int_col = int_col
| runtime filters: RF002 <- int_col
| row-size=24B cardinality=32
|
|--01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| runtime filters: RF000 -> int_col
| row-size=12B cardinality=8
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
row-size=12B cardinality=8
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
08:EXCHANGE [UNPARTITIONED]
|
04:HASH JOIN [INNER JOIN, PARTITIONED]
| hash predicates: int_col = int_col
| runtime filters: RF000 <- int_col
| row-size=36B cardinality=128
|
|--07:EXCHANGE [HASH(int_col)]
| |
| 02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
03:HASH JOIN [INNER JOIN, PARTITIONED]
| hash predicates: int_col = int_col
| runtime filters: RF002 <- int_col
| row-size=24B cardinality=32
|
|--06:EXCHANGE [HASH(int_col)]
| |
| 01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| runtime filters: RF000 -> int_col
| row-size=12B cardinality=8
|
05:EXCHANGE [HASH(int_col)]
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
row-size=12B cardinality=8
====
# Self-join of with-clause table to make sure the using clause is properly set
# in the cloned inline-view instances.
with t as (select int_col x, bigint_col y from functional.alltypestiny)
select * from t t1 inner join t t2 using(x) inner join t t3 using(x)
---- PLAN
PLAN-ROOT SINK
|
04:HASH JOIN [INNER JOIN]
| hash predicates: int_col = int_col
| runtime filters: RF000 <- int_col
| row-size=36B cardinality=128
|
|--02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
03:HASH JOIN [INNER JOIN]
| hash predicates: int_col = int_col
| runtime filters: RF002 <- int_col
| row-size=24B cardinality=32
|
|--01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| runtime filters: RF000 -> int_col
| row-size=12B cardinality=8
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
row-size=12B cardinality=8
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
08:EXCHANGE [UNPARTITIONED]
|
04:HASH JOIN [INNER JOIN, PARTITIONED]
| hash predicates: int_col = int_col
| runtime filters: RF000 <- int_col
| row-size=36B cardinality=128
|
|--07:EXCHANGE [HASH(int_col)]
| |
| 02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
03:HASH JOIN [INNER JOIN, PARTITIONED]
| hash predicates: int_col = int_col
| runtime filters: RF002 <- int_col
| row-size=24B cardinality=32
|
|--06:EXCHANGE [HASH(int_col)]
| |
| 01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| runtime filters: RF000 -> int_col
| row-size=12B cardinality=8
|
05:EXCHANGE [HASH(int_col)]
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
row-size=12B cardinality=8
====
# Self-join of with-clause table to make sure the join op is properly set
# in the cloned inline-view instances.
with t as (select int_col x, bigint_col y from functional.alltypestiny)
select * from t t1 left outer join t t2 using(x) full outer join t t3 using(x)
---- PLAN
PLAN-ROOT SINK
|
04:HASH JOIN [FULL OUTER JOIN]
| hash predicates: int_col = int_col
| row-size=36B cardinality=128
|
|--02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
03:HASH JOIN [LEFT OUTER JOIN]
| hash predicates: int_col = int_col
| row-size=24B cardinality=32
|
|--01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
row-size=12B cardinality=8
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
08:EXCHANGE [UNPARTITIONED]
|
04:HASH JOIN [FULL OUTER JOIN, PARTITIONED]
| hash predicates: int_col = int_col
| row-size=36B cardinality=128
|
|--07:EXCHANGE [HASH(int_col)]
| |
| 02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
03:HASH JOIN [LEFT OUTER JOIN, PARTITIONED]
| hash predicates: int_col = int_col
| row-size=24B cardinality=32
|
|--06:EXCHANGE [HASH(int_col)]
| |
| 01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
05:EXCHANGE [HASH(int_col)]
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
row-size=12B cardinality=8
====
# Self-join of with-clause table to make sure join hints are properly set
# in the cloned inline-view instances.
# Note that in the plan above without hints the first join uses shuffle
# and the second broadcast.
with t as (select int_col x, bigint_col y from functional.alltypestiny)
select * from t t1 inner join [broadcast] t t2 using(x) inner join [shuffle] t t3 using(x)
---- PLAN
PLAN-ROOT SINK
|
04:HASH JOIN [INNER JOIN]
| hash predicates: int_col = int_col
| runtime filters: RF000 <- int_col
| row-size=36B cardinality=128
|
|--02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
03:HASH JOIN [INNER JOIN]
| hash predicates: int_col = int_col
| runtime filters: RF002 <- int_col
| row-size=24B cardinality=32
|
|--01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| runtime filters: RF000 -> int_col
| row-size=12B cardinality=8
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
row-size=12B cardinality=8
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
08:EXCHANGE [UNPARTITIONED]
|
04:HASH JOIN [INNER JOIN, PARTITIONED]
| hash predicates: int_col = int_col
| runtime filters: RF000 <- int_col
| row-size=36B cardinality=128
|
|--07:EXCHANGE [HASH(int_col)]
| |
| 02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=12B cardinality=8
|
06:EXCHANGE [HASH(int_col)]
|
03:HASH JOIN [INNER JOIN, BROADCAST]
| hash predicates: int_col = int_col
| runtime filters: RF002 <- int_col
| row-size=24B cardinality=32
|
|--05:EXCHANGE [BROADCAST]
| |
| 01:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| runtime filters: RF000 -> int_col
| row-size=12B cardinality=8
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
runtime filters: RF000 -> functional.alltypestiny.int_col, RF002 -> int_col
row-size=12B cardinality=8
====
# Multiple with clauses. One for the UnionStmt and one for each union operand.
with t1 as (values('a', 'b'))
(with t2 as (values('c', 'd')) select * from t2) union all
(with t3 as (values('e', 'f')) select * from t3) order by 1 limit 1
---- PLAN
PLAN-ROOT SINK
|
03:TOP-N [LIMIT=1]
| order by: 'c' ASC
| row-size=24B cardinality=1
|
00:UNION
| row-size=24B cardinality=2
|
|--02:UNION
| constant-operands=1
| row-size=24B cardinality=1
|
01:UNION
constant-operands=1
row-size=24B cardinality=1
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
03:TOP-N [LIMIT=1]
| order by: 'c' ASC
| row-size=24B cardinality=1
|
00:UNION
| row-size=24B cardinality=2
|
|--02:UNION
| constant-operands=1
| row-size=24B cardinality=1
|
01:UNION
constant-operands=1
row-size=24B cardinality=1
====
# Multiple with clauses. One for the UnionStmt and one for each union operand.
with t1 as (values('a', 'b'))
(with t2 as (values('c', 'd')) select * from t2) union all
(with t3 as (values('e', 'f')) select * from t3) order by 1 limit 1
---- PLAN
PLAN-ROOT SINK
|
03:TOP-N [LIMIT=1]
| order by: 'c' ASC
| row-size=24B cardinality=1
|
00:UNION
| row-size=24B cardinality=2
|
|--02:UNION
| constant-operands=1
| row-size=24B cardinality=1
|
01:UNION
constant-operands=1
row-size=24B cardinality=1
---- DISTRIBUTEDPLAN
PLAN-ROOT SINK
|
03:TOP-N [LIMIT=1]
| order by: 'c' ASC
| row-size=24B cardinality=1
|
00:UNION
| row-size=24B cardinality=2
|
|--02:UNION
| constant-operands=1
| row-size=24B cardinality=1
|
01:UNION
constant-operands=1
row-size=24B cardinality=1
====
# Test with clause in an insert statement.
with t1 as (select * from functional.alltypestiny)
insert into functional.alltypesinsert partition(year, month) select * from t1
---- PLAN
WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,month)]
| partitions=4
|
01:SORT
| order by: year ASC NULLS LAST, month ASC NULLS LAST
| row-size=89B cardinality=8
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
row-size=89B cardinality=8
---- DISTRIBUTEDPLAN
WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,month)]
| partitions=4
|
02:SORT
| order by: year ASC NULLS LAST, month ASC NULLS LAST
| row-size=89B cardinality=8
|
01:EXCHANGE [HASH(functional.alltypestiny.year,functional.alltypestiny.month)]
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
row-size=89B cardinality=8
====
# Test with clause in an insert statement and in its query statement.
with t1 as (select * from functional.alltypestiny)
insert into functional.alltypesinsert partition(year, month)
with t2 as (select * from functional.alltypestiny)
select * from t1 union all select * from t2
---- PLAN
WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,month)]
| partitions=16
|
03:SORT
| order by: year ASC NULLS LAST, month ASC NULLS LAST
| row-size=89B cardinality=16
|
00:UNION
| row-size=89B cardinality=16
|
|--02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=89B cardinality=8
|
01:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
row-size=89B cardinality=8
---- DISTRIBUTEDPLAN
WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,month)]
| partitions=16
|
04:SORT
| order by: year ASC NULLS LAST, month ASC NULLS LAST
| row-size=89B cardinality=16
|
03:EXCHANGE [HASH(year,month)]
|
00:UNION
| row-size=89B cardinality=16
|
|--02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=89B cardinality=8
|
01:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
row-size=89B cardinality=8
====
# IMPALA-5293: Test with clause in an insert statement and in its query statement. Make
# sure that noclustered hint prevents addition of a sort node before writing to HDFS.
with t1 as (select * from functional.alltypestiny)
insert into functional.alltypesinsert partition(year, month) /* +noclustered */
with t2 as (select * from functional.alltypestiny)
select * from t1 union all select * from t2
---- PLAN
WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,month)]
| partitions=16
|
00:UNION
| row-size=89B cardinality=16
|
|--02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=89B cardinality=8
|
01:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
row-size=89B cardinality=8
---- DISTRIBUTEDPLAN
WRITE TO HDFS [functional.alltypesinsert, OVERWRITE=false, PARTITION-KEYS=(year,month)]
| partitions=16
|
03:EXCHANGE [HASH(year,month)]
|
00:UNION
| row-size=89B cardinality=16
|
|--02:SCAN HDFS [functional.alltypestiny]
| partitions=4/4 files=4 size=460B
| row-size=89B cardinality=8
|
01:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
row-size=89B cardinality=8
====
# Test with clause with a query statement that references the same column from a
# base table multiple times (IMPALA-1412)
with t1 as (
select tinyint_col, tinyint_col as tinyint_col2, bigint_col, bigint_col as bigint_col2
from functional.alltypestiny)
select * from t1 where bigint_col = bigint_col2
---- PLAN
PLAN-ROOT SINK
|
00:SCAN HDFS [functional.alltypestiny]
partitions=4/4 files=4 size=460B
predicates: bigint_col = bigint_col
row-size=9B cardinality=1
====
# IMPALA-2414: Test basic correlated WITH clause view.
select pos from functional.allcomplextypes t inner join
(with w as (select pos from t.int_array_col)
select pos from w) v
on v.pos = t.id
---- PLAN
PLAN-ROOT SINK
|
01:SUBPLAN
| row-size=24B cardinality=0
|
|--04:NESTED LOOP JOIN [INNER JOIN]
| | join predicates: pos = t.id
| | row-size=24B cardinality=10
| |
| |--02:SINGULAR ROW SRC
| | row-size=16B cardinality=1
| |
| 03:UNNEST [t.int_array_col]
| row-size=8B cardinality=10
|
00:SCAN HDFS [functional.allcomplextypes t]
partitions=0/0 files=0 size=0B
row-size=16B cardinality=0
====
# IMPALA-2414: Test correlated WITH clause view nested in another WITH clause.
select pos from functional.allcomplextypes t inner join
(with w1 as (with w2 as (select pos from t.int_array_col) select * from w2)
select pos from w1) v
---- PLAN
PLAN-ROOT SINK
|
01:SUBPLAN
| row-size=20B cardinality=0
|
|--04:NESTED LOOP JOIN [CROSS JOIN]
| | row-size=20B cardinality=10
| |
| |--02:SINGULAR ROW SRC
| | row-size=12B cardinality=1
| |
| 03:UNNEST [t.int_array_col]
| row-size=8B cardinality=10
|
00:SCAN HDFS [functional.allcomplextypes t]
partitions=0/0 files=0 size=0B
row-size=12B cardinality=0
====
# IMPALA-2414: Test correlated WITH clause view nested in another WITH clause.
with w1 as (select pos from functional.allcomplextypes t,
(with w2 as (select pos from t.int_array_col) select * from w2) v)
select * from w1
---- PLAN
PLAN-ROOT SINK
|
01:SUBPLAN
| row-size=20B cardinality=0
|
|--04:NESTED LOOP JOIN [CROSS JOIN]
| | row-size=20B cardinality=10
| |
| |--02:SINGULAR ROW SRC
| | row-size=12B cardinality=1
| |
| 03:UNNEST [t.int_array_col]
| row-size=8B cardinality=10
|
00:SCAN HDFS [functional.allcomplextypes t]
partitions=0/0 files=0 size=0B
row-size=12B cardinality=0
====
# IMPALA-2414: Test multiple correlated WITH clause views that are joined.
select pos from functional.allcomplextypes t inner join
(with w1 as (select pos, item from t.int_array_col),
w2 as (select key, value from t.map_map_col.value)
select a1.*, m2.* from w1 a1, w1 a2, w2 m1, w2 m2) v on v.value = t.id
---- PLAN
PLAN-ROOT SINK
|
01:SUBPLAN
| row-size=64B cardinality=0
|
|--10:NESTED LOOP JOIN [INNER JOIN]
| | join predicates: value = t.id
| | row-size=64B cardinality=10.00K
| |
| |--02:SINGULAR ROW SRC
| | row-size=52B cardinality=1
| |
| 09:NESTED LOOP JOIN [CROSS JOIN]
| | row-size=12B cardinality=10.00K
| |
| |--06:UNNEST [t.map_map_col.value]
| | row-size=4B cardinality=10
| |
| 08:NESTED LOOP JOIN [CROSS JOIN]
| | row-size=8B cardinality=1.00K
| |
| |--05:UNNEST [t.map_map_col.value]
| | row-size=0B cardinality=10
| |
| 07:NESTED LOOP JOIN [CROSS JOIN]
| | row-size=8B cardinality=100
| |
| |--04:UNNEST [t.int_array_col]
| | row-size=0B cardinality=10
| |
| 03:UNNEST [t.int_array_col]
| row-size=8B cardinality=10
|
00:SCAN HDFS [functional.allcomplextypes t]
partitions=0/0 files=0 size=0B
row-size=52B cardinality=0
====