blob: 3bec51ee4f3cd280d68f30161c041e68beebea12 [file] [log] [blame]
-- This test verifies ORCA plans when one side of join is
-- of universal spec. Historically, we enforce universal
-- to be joined with singleton to avoid duplicates. This is
-- overly conservative. Instead, we should be able to join
-- universal with any deduplicated input, as far as the join
-- doesn't return all records from the universal side.
-- start_matchsubs
-- m/Memory Usage: \d+\w?B/
-- s/Memory Usage: \d+\w?B/Memory Usage: ###B/
-- m/Memory: \d+kB/
-- s/Memory: \d+kB/Memory: ###kB/
-- m/Buckets: \d+/
-- s/Buckets: \d+/Buckets: ###/
-- m/Hash chain length \d+\.\d+ avg, \d+ max/
-- s/Hash chain length \d+\.\d+ avg, \d+ max/Hash chain length ###/
-- m/using \d+ of \d+ buckets/
-- s/using \d+ of \d+ buckets/using ## of ### buckets/
-- m/Extra Text: \(seg\d+\)/
-- s/Extra Text: \(seg\d+\)/Extra Text: \(seg#\)/
-- end_matchsubs
-- start_ignore
drop schema if exists join_universal cascade;
NOTICE: schema "join_universal" does not exist, skipping
-- end_ignore
-- greenplum
create schema join_universal;
set search_path=join_universal;
set optimizer_trace_fallback=on;
-- distributed
create table dist (c1 int) distributed by (c1);
insert into dist select i from generate_series(1,999) i;
-- randomly distributed
create table rand (c1 int) distributed randomly;
insert into rand select i from generate_series(1,999) i;
-- replicated
create table rep (c1 int) distributed replicated;
insert into rep select i from generate_series(1,999) i;
-- partitioned
create table part (c1 int, c2 int) partition by list(c2) (
partition part1 values (1, 2, 3, 4),
partition part2 values (5, 6, 7),
partition part3 values (8, 9, 0));
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c1' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
insert into part select i, i%10 from generate_series(1, 999) i;
-- const tvf (universal)
-- This tvf is defined as volatile, but since it's not
-- used as a scan operator, it's distribution spec is
-- still universal instead of singleton.
-- We avoid the "immutable" keyword so that the tvf
-- execution doesn't fall back due to lack of support
-- for Query Parameter.
create function const_tvf(a int) returns int as $$ select $1 $$ language sql;
-- unnested array (universal)
create view unnest_arr as (select unnest(string_to_array('-3,-2,-1,0,1,2,3',','))::int c1);
-- generate_series (universal)
create view gen_series as (select generate_series(-10,10) c1);
analyze dist;
analyze rand;
analyze rep;
analyze part;
-- Testing hash join
set optimizer_enable_hashjoin = on;
-- distributed ⋈ universal
-- We no more enforce the outer side to be a singleton
-- when the inner side is universal. This allows us to
-- hash the much smaller universal table, instead of
-- the much larger distributed table.
explain (analyze, costs off, timing off, summary off) select * from dist join const_tvf(1) ct(c1) on dist.c1 = ct.c1;
QUERY PLAN
----------------------------------------------------------------------------------
Gather Motion 1:1 (slice1; segments: 1) (actual rows=1 loops=1)
-> Hash Join (actual rows=1 loops=1)
Hash Cond: (c1 = (1))
Extra Text: Hash chain length 1.0 avg, 1 max, using 1 of 524288 buckets.
-> Seq Scan on dist (actual rows=1 loops=1)
Filter: (c1 = 1)
Rows Removed by Filter: 321
-> Hash (actual rows=1 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Result (actual rows=1 loops=1)
Filter: ((1) = 1)
-> Result (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(13 rows)
explain (analyze, costs off, timing off, summary off) select * from dist join unnest_arr on dist.c1 = unnest_arr.c1;
QUERY PLAN
-------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=3 loops=1)
-> Hash Join (actual rows=2 loops=1)
Hash Cond: (c1 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer))
Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 7 of 524288 buckets.
-> Seq Scan on dist (actual rows=340 loops=1)
-> Hash (actual rows=7 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Result (actual rows=7 loops=1)
-> ProjectSet (actual rows=7 loops=1)
-> Result (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(11 rows)
explain (analyze, costs off, timing off, summary off) select * from dist join gen_series on dist.c1 = gen_series.c1;
QUERY PLAN
--------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=10 loops=1)
-> Hash Join (actual rows=5 loops=1)
Hash Cond: (c1 = (generate_series('-10'::integer, 10)))
Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 21 of 524288 buckets.
-> Seq Scan on dist (actual rows=340 loops=1)
-> Hash (actual rows=21 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> ProjectSet (actual rows=21 loops=1)
-> Result (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(10 rows)
-- randomly distributed ⋈ universal
-- We get the same plans as above, since no motion is
-- needed when joining with a universal table
-- (We don't flag row count diffs in the following tests.
-- This is because the row count of intermediate physical
-- operations are expected to fluctuate in randomly
-- distributed tables.)
explain (analyze, timing off, summary off) select * from rand join const_tvf(1) ct(c1) on rand.c1 = ct.c1;
QUERY PLAN
------------------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..431.02 rows=1 width=8) (actual rows=1 loops=1)
-> Hash Join (cost=0.00..431.02 rows=1 width=8) (actual rows=1 loops=1)
Hash Cond: (c1 = (1))
Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 1 of 524288 buckets.
-> Seq Scan on rand (cost=0.00..431.02 rows=1 width=4) (actual rows=1 loops=1)
Filter: (c1 = 1)
Rows Removed by Filter: 322
-> Hash (cost=0.00..0.00 rows=1 width=4) (actual rows=1 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Result (cost=0.00..0.00 rows=1 width=4) (actual rows=1 loops=1)
Filter: ((1) = 1)
-> Result (cost=0.00..0.00 rows=1 width=1) (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(13 rows)
explain (analyze, timing off, summary off) select * from rand join unnest_arr on rand.c1 = unnest_arr.c1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..431.11 rows=999 width=8) (actual rows=3 loops=1)
-> Hash Join (cost=0.00..431.08 rows=333 width=8) (actual rows=2 loops=1)
Hash Cond: (c1 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer))
Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 7 of 524288 buckets.
-> Seq Scan on rand (cost=0.00..431.01 rows=333 width=4) (actual rows=338 loops=1)
-> Hash (cost=0.00..0.00 rows=1 width=4) (actual rows=7 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Result (cost=0.00..0.00 rows=1 width=4) (actual rows=7 loops=1)
-> ProjectSet (cost=0.00..0.00 rows=1 width=4) (actual rows=7 loops=1)
-> Result (cost=0.00..0.00 rows=1 width=1) (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(11 rows)
explain (analyze, timing off, summary off) select * from rand join gen_series on rand.c1 = gen_series.c1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..431.11 rows=999 width=8) (actual rows=10 loops=1)
-> Hash Join (cost=0.00..431.08 rows=333 width=8) (actual rows=7 loops=1)
Hash Cond: (c1 = (generate_series('-10'::integer, 10)))
Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 21 of 524288 buckets.
-> Seq Scan on rand (cost=0.00..431.01 rows=333 width=4) (actual rows=338 loops=1)
-> Hash (cost=0.00..0.00 rows=1 width=4) (actual rows=21 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> ProjectSet (cost=0.00..0.00 rows=1 width=4) (actual rows=21 loops=1)
-> Result (cost=0.00..0.00 rows=1 width=1) (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(10 rows)
-- replicated universal
-- Replicated joined with universal needs to be deduplicated.
-- This is achieved by a one-time segment filter
-- (duplicate-sensitive random motion).
explain (analyze, costs off, timing off, summary off) select * from rep join const_tvf(1) ct(c1) on rep.c1 = ct.c1;
QUERY PLAN
-------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=1 loops=1)
-> Hash Join (actual rows=1 loops=1)
Hash Cond: ((1) = c1)
Extra Text: (seg1) Hash chain length 1.0 avg, 1 max, using 1 of 524288 buckets.
-> Result (actual rows=1 loops=1)
One-Time Filter: (gp_execution_segment() = 1)
-> Result (actual rows=1 loops=1)
Filter: ((1) = 1)
-> Result (actual rows=1 loops=1)
-> Hash (actual rows=1 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Seq Scan on rep (actual rows=1 loops=1)
Filter: (c1 = 1)
Rows Removed by Filter: 998
Optimizer: Pivotal Optimizer (GPORCA)
(15 rows)
explain (analyze, costs off, timing off, summary off) select * from rep join unnest_arr on rep.c1 = unnest_arr.c1;
QUERY PLAN
---------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=3 loops=1)
-> Hash Join (actual rows=3 loops=1)
Hash Cond: ((((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer) = c1)
Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 999 of 524288 buckets.
-> Result (actual rows=7 loops=1)
One-Time Filter: (gp_execution_segment() = 2)
-> Result (actual rows=7 loops=1)
-> ProjectSet (actual rows=7 loops=1)
-> Result (actual rows=1 loops=1)
-> Hash (actual rows=999 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4132kB
-> Seq Scan on rep (actual rows=999 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(13 rows)
explain (analyze, costs off, timing off, summary off) select * from rep join gen_series on rep.c1 = gen_series.c1;
QUERY PLAN
---------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=10 loops=1)
-> Hash Join (actual rows=10 loops=1)
Hash Cond: ((generate_series('-10'::integer, 10)) = c1)
Extra Text: (seg1) Hash chain length 1.0 avg, 1 max, using 999 of 524288 buckets.
-> Result (actual rows=21 loops=1)
One-Time Filter: (gp_execution_segment() = 1)
-> ProjectSet (actual rows=21 loops=1)
-> Result (actual rows=1 loops=1)
-> Hash (actual rows=999 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4132kB
-> Seq Scan on rep (actual rows=999 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(12 rows)
-- partitioned universal
-- We no more enforce the outer side to be a singleton
-- when the inner side is universal. This allows the
-- propagation of the partition selector, and enables DPE.
explain (analyze, costs off, timing off, summary off) select * from part join const_tvf(1) ct(c1) on part.c2 = ct.c1;
QUERY PLAN
-------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=100 loops=1)
-> Hash Join (actual rows=43 loops=1)
Hash Cond: (c2 = (1))
Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 1 of 524288 buckets.
-> Dynamic Seq Scan on part (actual rows=43 loops=1)
Number of partitions to scan: 1 (out of 3)
Filter: (c2 = 1)
Partitions scanned: Avg 1.0 x 3 workers. Max 1 parts (seg0).
-> Hash (actual rows=1 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Partition Selector (selector id: $0) (actual rows=1 loops=1)
-> Result (actual rows=1 loops=1)
Filter: ((1) = 1)
-> Result (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(15 rows)
explain (analyze, costs off, timing off, summary off) select * from part join unnest_arr on part.c2 = unnest_arr.c1;
QUERY PLAN
-------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=399 loops=1)
-> Hash Join (actual rows=150 loops=1)
Hash Cond: (c2 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer))
Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 7 of 524288 buckets.
-> Dynamic Seq Scan on part (actual rows=245 loops=1)
Number of partitions to scan: 3 (out of 3)
Partitions scanned: Avg 2.0 x 3 workers. Max 2 parts (seg0).
-> Hash (actual rows=7 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Partition Selector (selector id: $0) (actual rows=7 loops=1)
-> Result (actual rows=7 loops=1)
-> ProjectSet (actual rows=7 loops=1)
-> Result (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(14 rows)
explain (analyze, costs off, timing off, summary off) select * from part join gen_series on part.c2 = gen_series.c1;
QUERY PLAN
--------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=999 loops=1)
-> Hash Join (actual rows=340 loops=1)
Hash Cond: (c2 = (generate_series('-10'::integer, 10)))
Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 21 of 524288 buckets.
-> Dynamic Seq Scan on part (actual rows=340 loops=1)
Number of partitions to scan: 3 (out of 3)
Partitions scanned: Avg 3.0 x 3 workers. Max 3 parts (seg0).
-> Hash (actual rows=21 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Partition Selector (selector id: $0) (actual rows=21 loops=1)
-> ProjectSet (actual rows=21 loops=1)
-> Result (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(13 rows)
-- distributed universal
-- We get the same plans as those of the inner join,
-- since the outer table is deduplicated.
explain (analyze, costs off, timing off, summary off) select * from dist left join const_tvf(1) ct(c1) on dist.c1 = ct.c1;
QUERY PLAN
-------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=999 loops=1)
-> Hash Left Join (actual rows=340 loops=1)
Hash Cond: (c1 = (1))
Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 1 of 524288 buckets.
-> Seq Scan on dist (actual rows=340 loops=1)
-> Hash (actual rows=1 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Result (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(9 rows)
explain (analyze, costs off, timing off, summary off) select * from dist left join unnest_arr on dist.c1 = unnest_arr.c1;
QUERY PLAN
-------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=999 loops=1)
-> Hash Left Join (actual rows=340 loops=1)
Hash Cond: (c1 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer))
Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 7 of 524288 buckets.
-> Seq Scan on dist (actual rows=340 loops=1)
-> Hash (actual rows=7 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Result (actual rows=7 loops=1)
-> ProjectSet (actual rows=7 loops=1)
-> Result (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(11 rows)
explain (analyze, costs off, timing off, summary off) select * from dist left join gen_series on dist.c1 = gen_series.c1;
QUERY PLAN
--------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=999 loops=1)
-> Hash Left Join (actual rows=340 loops=1)
Hash Cond: (c1 = (generate_series('-10'::integer, 10)))
Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 21 of 524288 buckets.
-> Seq Scan on dist (actual rows=340 loops=1)
-> Hash (actual rows=21 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> ProjectSet (actual rows=21 loops=1)
-> Result (actual rows=1 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(10 rows)
-- universal distributed
-- Since left join returns all the records from the universal
-- side, it needs to be deduplicated. This is achieved by a
-- hash filter (duplicate-sensitive hash motion).
-- (Test of const TVF left join distributed table is flaky
-- and is turned off. ORCA generates two alternatives, left
-- join and right join, that happen to have the same cost.)
explain (analyze, costs off, timing off, summary off) select * from unnest_arr left join dist on dist.c1 = unnest_arr.c1;
QUERY PLAN
---------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=7 loops=1)
-> Hash Left Join (actual rows=4 loops=1)
Hash Cond: ((((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer) = c1)
Extra Text: (seg1) Hash chain length 1.0 avg, 1 max, using 322 of 524288 buckets.
-> Result (actual rows=4 loops=1)
-> Result (actual rows=7 loops=1)
-> ProjectSet (actual rows=7 loops=1)
-> Result (actual rows=1 loops=1)
-> Hash (actual rows=340 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4108kB
-> Seq Scan on dist (actual rows=340 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(12 rows)
explain (analyze, costs off, timing off, summary off) select * from gen_series left join dist on dist.c1 = gen_series.c1;
QUERY PLAN
---------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=21 loops=1)
-> Hash Left Join (actual rows=8 loops=1)
Hash Cond: ((generate_series('-10'::integer, 10)) = c1)
Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 337 of 524288 buckets.
-> Result (actual rows=8 loops=1)
-> ProjectSet (actual rows=21 loops=1)
-> Result (actual rows=1 loops=1)
-> Hash (actual rows=340 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4108kB
-> Seq Scan on dist (actual rows=340 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(11 rows)
-- universal distributed
-- Since anti join returns all the records from the universal
-- side where no matches are found in the deduplicated side,
-- it needs to be deduplicated. This is achieved by a hash
-- filter (duplicate-sensitive hash motion).
explain (analyze, costs off, timing off, summary off) select * from const_tvf(1) ct(c1) where not exists (select 1 from dist where dist.c1 = ct.c1);
QUERY PLAN
------------------------------------------------------------------------------------
Hash Anti Join (actual rows=0 loops=1)
Hash Cond: ((1) = c1)
Extra Text: Hash chain length 1.0 avg, 1 max, using 1 of 524288 buckets.
-> Result (actual rows=1 loops=1)
-> Hash (actual rows=1 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4097kB
-> Result (actual rows=1 loops=1)
-> Gather Motion 3:1 (slice1; segments: 3) (actual rows=1 loops=1)
-> Seq Scan on dist (actual rows=1 loops=1)
Filter: (c1 = 1)
Rows Removed by Filter: 321
Optimizer: Pivotal Optimizer (GPORCA)
(12 rows)
explain (analyze, costs off, timing off, summary off) select * from unnest_arr where not exists (select 1 from dist where dist.c1 = unnest_arr.c1);
QUERY PLAN
---------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=4 loops=1)
-> Hash Anti Join (actual rows=3 loops=1)
Hash Cond: ((((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer) = c1)
Extra Text: (seg1) Hash chain length 1.0 avg, 1 max, using 322 of 524288 buckets.
-> Result (actual rows=4 loops=1)
-> Result (actual rows=7 loops=1)
-> ProjectSet (actual rows=7 loops=1)
-> Result (actual rows=1 loops=1)
-> Hash (actual rows=340 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4110kB
-> Seq Scan on dist (actual rows=340 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(12 rows)
explain (analyze, costs off, timing off, summary off) select * from gen_series where not exists (select 1 from dist where dist.c1 = gen_series.c1);
QUERY PLAN
---------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=11 loops=1)
-> Hash Anti Join (actual rows=6 loops=1)
Hash Cond: ((generate_series('-10'::integer, 10)) = c1)
Extra Text: (seg1) Hash chain length 1.0 avg, 1 max, using 322 of 524288 buckets.
-> Result (actual rows=8 loops=1)
-> ProjectSet (actual rows=21 loops=1)
-> Result (actual rows=1 loops=1)
-> Hash (actual rows=340 loops=1)
Buckets: 524288 Batches: 1 Memory Usage: 4110kB
-> Seq Scan on dist (actual rows=340 loops=1)
Optimizer: Pivotal Optimizer (GPORCA)
(11 rows)
-- Testing inner nested loop join
set optimizer_enable_hashjoin = off;
-- We no more enforce the inner side to be a singleton
-- when the outer side is universal. It just needs to
-- be non-replicated since inner join is deduplicated.
explain (analyze, costs off, timing off, summary off) select * from dist join const_tvf(1) ct(c1) on dist.c1 < ct.c1;
QUERY PLAN
------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=0 loops=1)
-> Nested Loop (actual rows=0 loops=1)
Join Filter: (c1 < (1))
Rows Removed by Join Filter: 337
-> Result (actual rows=1 loops=1)
-> Seq Scan on dist (actual rows=170 loops=2)
Optimizer: Pivotal Optimizer (GPORCA)
(7 rows)
explain (analyze, costs off, timing off, summary off) select * from dist join unnest_arr on dist.c1 < unnest_arr.c1;
QUERY PLAN
---------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=3 loops=1)
-> Nested Loop (actual rows=2 loops=1)
Join Filter: (c1 < (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer))
Rows Removed by Join Filter: 2252
-> Result (actual rows=7 loops=1)
-> ProjectSet (actual rows=7 loops=1)
-> Result (actual rows=1 loops=1)
-> Seq Scan on dist (actual rows=298 loops=8)
Optimizer: Pivotal Optimizer (GPORCA)
(9 rows)
explain (analyze, costs off, timing off, summary off) select * from dist join gen_series on dist.c1 < gen_series.c1;
QUERY PLAN
-------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (actual rows=45 loops=1)
-> Nested Loop (actual rows=26 loops=1)
Join Filter: (c1 < (generate_series('-10'::integer, 10)))
Rows Removed by Join Filter: 7051
-> ProjectSet (actual rows=21 loops=1)
-> Result (actual rows=1 loops=1)
-> Seq Scan on dist (actual rows=325 loops=22)
Optimizer: Pivotal Optimizer (GPORCA)
(8 rows)