| -- This test verifies ORCA plans when one side of join is |
| -- of universal spec. Historically, we enforce universal |
| -- to be joined with singleton to avoid duplicates. This is |
| -- overly conservative. Instead, we should be able to join |
| -- universal with any deduplicated input, as far as the join |
| -- doesn't return all records from the universal side. |
| -- start_matchsubs |
| -- m/Memory Usage: \d+\w?B/ |
| -- s/Memory Usage: \d+\w?B/Memory Usage: ###B/ |
| -- m/Memory: \d+kB/ |
| -- s/Memory: \d+kB/Memory: ###kB/ |
| -- m/Buckets: \d+/ |
| -- s/Buckets: \d+/Buckets: ###/ |
| -- m/Hash chain length \d+\.\d+ avg, \d+ max/ |
| -- s/Hash chain length \d+\.\d+ avg, \d+ max/Hash chain length ###/ |
| -- m/using \d+ of \d+ buckets/ |
| -- s/using \d+ of \d+ buckets/using ## of ### buckets/ |
| -- m/Extra Text: \(seg\d+\)/ |
| -- s/Extra Text: \(seg\d+\)/Extra Text: \(seg#\)/ |
| -- end_matchsubs |
| -- start_ignore |
| drop schema if exists join_universal cascade; |
| NOTICE: schema "join_universal" does not exist, skipping |
| -- end_ignore |
| -- greenplum |
| create schema join_universal; |
| set search_path=join_universal; |
| set optimizer_trace_fallback=on; |
| -- distributed |
| create table dist (c1 int) distributed by (c1); |
| insert into dist select i from generate_series(1,999) i; |
| -- randomly distributed |
| create table rand (c1 int) distributed randomly; |
| insert into rand select i from generate_series(1,999) i; |
| -- replicated |
| create table rep (c1 int) distributed replicated; |
| insert into rep select i from generate_series(1,999) i; |
| -- partitioned |
| create table part (c1 int, c2 int) partition by list(c2) ( |
| partition part1 values (1, 2, 3, 4), |
| partition part2 values (5, 6, 7), |
| partition part3 values (8, 9, 0)); |
| NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c1' as the Greenplum Database data distribution key for this table. |
| HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. |
| insert into part select i, i%10 from generate_series(1, 999) i; |
| -- const tvf (universal) |
| -- This tvf is defined as volatile, but since it's not |
| -- used as a scan operator, it's distribution spec is |
| -- still universal instead of singleton. |
| -- We avoid the "immutable" keyword so that the tvf |
| -- execution doesn't fall back due to lack of support |
| -- for Query Parameter. |
| create function const_tvf(a int) returns int as $$ select $1 $$ language sql; |
| -- unnested array (universal) |
| create view unnest_arr as (select unnest(string_to_array('-3,-2,-1,0,1,2,3',','))::int c1); |
| -- generate_series (universal) |
| create view gen_series as (select generate_series(-10,10) c1); |
| analyze dist; |
| analyze rand; |
| analyze rep; |
| analyze part; |
| -- Testing hash join |
| set optimizer_enable_hashjoin = on; |
| -- distributed ⋈ universal |
| -- We no more enforce the outer side to be a singleton |
| -- when the inner side is universal. This allows us to |
| -- hash the much smaller universal table, instead of |
| -- the much larger distributed table. |
| explain (analyze, costs off, timing off, summary off) select * from dist join const_tvf(1) ct(c1) on dist.c1 = ct.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------ |
| Gather Motion 1:1 (slice1; segments: 1) (actual rows=1 loops=1) |
| -> Seq Scan on dist (actual rows=1 loops=1) |
| Filter: (c1 = 1) |
| Rows Removed by Filter: 321 |
| Optimizer: Postgres query optimizer |
| (5 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from dist join unnest_arr on dist.c1 = unnest_arr.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=3 loops=1) |
| -> Hash Join (actual rows=2 loops=1) |
| Hash Cond: (dist.c1 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer)) |
| Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 7 of 524288 buckets. |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| -> Hash (actual rows=7 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> Result (actual rows=7 loops=1) |
| -> ProjectSet (actual rows=7 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (11 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from dist join gen_series on dist.c1 = gen_series.c1; |
| QUERY PLAN |
| -------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=10 loops=1) |
| -> Hash Join (actual rows=5 loops=1) |
| Hash Cond: (dist.c1 = (generate_series('-10'::integer, 10))) |
| Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 21 of 524288 buckets. |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| -> Hash (actual rows=21 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> ProjectSet (actual rows=21 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (10 rows) |
| |
| -- randomly distributed ⋈ universal |
| -- We get the same plans as above, since no motion is |
| -- needed when joining with a universal table |
| -- (We don't flag row count diffs in the following tests. |
| -- This is because the row count of intermediate physical |
| -- operations are expected to fluctuate in randomly |
| -- distributed tables.) |
| explain (analyze, timing off, summary off) select * from rand join const_tvf(1) ct(c1) on rand.c1 = ct.c1; |
| QUERY PLAN |
| ---------------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..5.18 rows=1 width=8) (actual rows=1 loops=1) |
| -> Seq Scan on rand (cost=0.00..5.16 rows=1 width=8) (actual rows=1 loops=1) |
| Filter: (c1 = 1) |
| Rows Removed by Filter: 346 |
| Optimizer: Postgres query optimizer |
| (5 rows) |
| |
| explain (analyze, timing off, summary off) select * from rand join unnest_arr on rand.c1 = unnest_arr.c1; |
| QUERY PLAN |
| ----------------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (cost=0.33..5.66 rows=10 width=8) (actual rows=3 loops=1) |
| -> Hash Join (cost=0.33..5.53 rows=3 width=8) (actual rows=2 loops=1) |
| Hash Cond: (rand.c1 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer)) |
| Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 7 of 524288 buckets. |
| -> Seq Scan on rand (cost=0.00..4.33 rows=333 width=4) (actual rows=346 loops=1) |
| -> Hash (cost=0.25..0.25 rows=7 width=4) (actual rows=7 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> Result (cost=0.00..0.18 rows=7 width=4) (actual rows=7 loops=1) |
| -> ProjectSet (cost=0.00..0.05 rows=7 width=32) (actual rows=7 loops=1) |
| -> Result (cost=0.00..0.01 rows=1 width=0) (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (11 rows) |
| |
| explain (analyze, timing off, summary off) select * from rand join gen_series on rand.c1 = gen_series.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------------------------------------------ |
| Gather Motion 3:1 (slice1; segments: 3) (cost=0.59..6.12 rows=21 width=8) (actual rows=10 loops=1) |
| -> Hash Join (cost=0.59..5.84 rows=7 width=8) (actual rows=4 loops=1) |
| Hash Cond: (rand.c1 = (generate_series('-10'::integer, 10))) |
| Extra Text: (seg1) Hash chain length 1.0 avg, 1 max, using 21 of 524288 buckets. |
| -> Seq Scan on rand (cost=0.00..4.33 rows=333 width=4) (actual rows=346 loops=1) |
| -> Hash (cost=0.33..0.33 rows=21 width=4) (actual rows=21 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> ProjectSet (cost=0.00..0.12 rows=21 width=4) (actual rows=21 loops=1) |
| -> Result (cost=0.00..0.01 rows=1 width=0) (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (10 rows) |
| |
| -- replicated ⋈ universal |
| -- Replicated joined with universal needs to be deduplicated. |
| -- This is achieved by a one-time segment filter |
| -- (duplicate-sensitive random motion). |
| explain (analyze, costs off, timing off, summary off) select * from rep join const_tvf(1) ct(c1) on rep.c1 = ct.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------ |
| Gather Motion 1:1 (slice1; segments: 1) (actual rows=1 loops=1) |
| -> Seq Scan on rep (actual rows=1 loops=1) |
| Filter: (c1 = 1) |
| Rows Removed by Filter: 998 |
| Optimizer: Postgres query optimizer |
| (5 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from rep join unnest_arr on rep.c1 = unnest_arr.c1; |
| QUERY PLAN |
| ----------------------------------------------------------------------------------- |
| Gather Motion 1:1 (slice1; segments: 1) (actual rows=3 loops=1) |
| -> Hash Join (actual rows=3 loops=1) |
| Hash Cond: (rep.c1 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer)) |
| Extra Text: Hash chain length 1.0 avg, 1 max, using 7 of 524288 buckets. |
| -> Seq Scan on rep (actual rows=999 loops=1) |
| -> Hash (actual rows=7 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> Result (actual rows=7 loops=1) |
| -> ProjectSet (actual rows=7 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (11 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from rep join gen_series on rep.c1 = gen_series.c1; |
| QUERY PLAN |
| ----------------------------------------------------------------------------------- |
| Gather Motion 1:1 (slice1; segments: 1) (actual rows=10 loops=1) |
| -> Hash Join (actual rows=10 loops=1) |
| Hash Cond: (rep.c1 = (generate_series('-10'::integer, 10))) |
| Extra Text: Hash chain length 1.0 avg, 1 max, using 21 of 524288 buckets. |
| -> Seq Scan on rep (actual rows=999 loops=1) |
| -> Hash (actual rows=21 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> ProjectSet (actual rows=21 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (10 rows) |
| |
| -- partitioned ⋈ universal |
| -- We no more enforce the outer side to be a singleton |
| -- when the inner side is universal. This allows the |
| -- propagation of the partition selector, and enables DPE. |
| explain (analyze, costs off, timing off, summary off) select * from part join const_tvf(1) ct(c1) on part.c2 = ct.c1; |
| QUERY PLAN |
| -------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=100 loops=1) |
| -> Seq Scan on part_1_prt_part1 part (actual rows=43 loops=1) |
| Filter: (c2 = 1) |
| Rows Removed by Filter: 107 |
| Optimizer: Postgres query optimizer |
| (5 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from part join unnest_arr on part.c2 = unnest_arr.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=399 loops=1) |
| -> Hash Join (actual rows=150 loops=1) |
| Hash Cond: (part.c2 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer)) |
| Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 7 of 524288 buckets. |
| -> Append (actual rows=245 loops=1) |
| Partition Selectors: $0 |
| -> Seq Scan on part_1_prt_part3 part_1 (actual rows=106 loops=1) |
| -> Seq Scan on part_1_prt_part1 part_2 (actual rows=150 loops=1) |
| -> Seq Scan on part_1_prt_part2 part_3 (never executed) |
| -> Hash (actual rows=7 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> Partition Selector (selector id: $0) (actual rows=7 loops=1) |
| -> Result (actual rows=7 loops=1) |
| -> ProjectSet (actual rows=7 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (16 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from part join gen_series on part.c2 = gen_series.c1; |
| QUERY PLAN |
| -------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=999 loops=1) |
| -> Hash Join (actual rows=340 loops=1) |
| Hash Cond: (part.c2 = (generate_series('-10'::integer, 10))) |
| Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 21 of 524288 buckets. |
| -> Append (actual rows=340 loops=1) |
| Partition Selectors: $0 |
| -> Seq Scan on part_1_prt_part3 part_1 (actual rows=106 loops=1) |
| -> Seq Scan on part_1_prt_part1 part_2 (actual rows=150 loops=1) |
| -> Seq Scan on part_1_prt_part2 part_3 (actual rows=113 loops=1) |
| -> Hash (actual rows=21 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> Partition Selector (selector id: $0) (actual rows=21 loops=1) |
| -> ProjectSet (actual rows=21 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (15 rows) |
| |
| -- distributed ⟕ universal |
| -- We get the same plans as those of the inner join, |
| -- since the outer table is deduplicated. |
| explain (analyze, costs off, timing off, summary off) select * from dist left join const_tvf(1) ct(c1) on dist.c1 = ct.c1; |
| QUERY PLAN |
| -------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=999 loops=1) |
| -> Nested Loop Left Join (actual rows=340 loops=1) |
| Join Filter: (dist.c1 = 1) |
| Rows Removed by Join Filter: 340 |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| -> Materialize (actual rows=1 loops=340) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (8 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from dist left join unnest_arr on dist.c1 = unnest_arr.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=999 loops=1) |
| -> Hash Left Join (actual rows=340 loops=1) |
| Hash Cond: (dist.c1 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer)) |
| Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 7 of 524288 buckets. |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| -> Hash (actual rows=7 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> Result (actual rows=7 loops=1) |
| -> ProjectSet (actual rows=7 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (11 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from dist left join gen_series on dist.c1 = gen_series.c1; |
| QUERY PLAN |
| -------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=999 loops=1) |
| -> Hash Left Join (actual rows=340 loops=1) |
| Hash Cond: (dist.c1 = (generate_series('-10'::integer, 10))) |
| Extra Text: (seg2) Hash chain length 1.0 avg, 1 max, using 21 of 524288 buckets. |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| -> Hash (actual rows=21 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> ProjectSet (actual rows=21 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (10 rows) |
| |
| -- universal ⟕ distributed |
| -- Since left join returns all the records from the universal |
| -- side, it needs to be deduplicated. This is achieved by a |
| -- hash filter (duplicate-sensitive hash motion). |
| -- (Test of const TVF left join distributed table is flaky |
| -- and is turned off. ORCA generates two alternatives, left |
| -- join and right join, that happen to have the same cost.) |
| explain (analyze, costs off, timing off, summary off) select * from unnest_arr left join dist on dist.c1 = unnest_arr.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=7 loops=1) |
| -> Hash Right Join (actual rows=4 loops=1) |
| Hash Cond: (dist.c1 = (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer)) |
| Extra Text: (seg1) Hash chain length 1.0 avg, 1 max, using 4 of 524288 buckets. |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| -> Hash (actual rows=4 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> Redistribute Motion 1:3 (slice2; segments: 1) (actual rows=4 loops=1) |
| Hash Key: (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer) |
| -> Result (actual rows=7 loops=1) |
| -> ProjectSet (actual rows=7 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (13 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from gen_series left join dist on dist.c1 = gen_series.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=21 loops=1) |
| -> Hash Right Join (actual rows=8 loops=1) |
| Hash Cond: (dist.c1 = (generate_series('-10'::integer, 10))) |
| Extra Text: (seg0) Hash chain length 1.0 avg, 1 max, using 8 of 524288 buckets. |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| -> Hash (actual rows=8 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4097kB |
| -> Redistribute Motion 1:3 (slice2; segments: 1) (actual rows=8 loops=1) |
| Hash Key: (generate_series('-10'::integer, 10)) |
| -> ProjectSet (actual rows=21 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (12 rows) |
| |
| -- universal ▷ distributed |
| -- Since anti join returns all the records from the universal |
| -- side where no matches are found in the deduplicated side, |
| -- it needs to be deduplicated. This is achieved by a hash |
| -- filter (duplicate-sensitive hash motion). |
| explain (analyze, costs off, timing off, summary off) select * from const_tvf(1) ct(c1) where not exists (select 1 from dist where dist.c1 = ct.c1); |
| QUERY PLAN |
| ------------------------------------------------------------------------------------ |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=0 loops=1) |
| -> Hash Anti Join (actual rows=0 loops=1) |
| Hash Cond: (($0) = dist.c1) |
| -> Redistribute Motion 1:3 (slice2; segments: 1) (actual rows=1 loops=1) |
| Hash Key: ($0) |
| -> Result (actual rows=1 loops=1) |
| InitPlan 1 (returns $0) (slice3) |
| -> Result (actual rows=1 loops=1) |
| -> Hash (actual rows=340 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4108kB |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| Optimizer: Postgres query optimizer |
| (12 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from unnest_arr where not exists (select 1 from dist where dist.c1 = unnest_arr.c1); |
| QUERY PLAN |
| --------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=4 loops=1) |
| -> Hash Anti Join (actual rows=3 loops=1) |
| Hash Cond: ((((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer) = dist.c1) |
| Extra Text: (seg1) Hash chain length 1.0 avg, 1 max, using 322 of 524288 buckets. |
| -> Redistribute Motion 1:3 (slice2; segments: 1) (actual rows=4 loops=1) |
| Hash Key: (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer) |
| -> Result (actual rows=7 loops=1) |
| -> ProjectSet (actual rows=7 loops=1) |
| -> Result (actual rows=1 loops=1) |
| -> Hash (actual rows=340 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4108kB |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| Optimizer: Postgres query optimizer |
| (13 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from gen_series where not exists (select 1 from dist where dist.c1 = gen_series.c1); |
| QUERY PLAN |
| --------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=11 loops=1) |
| -> Hash Anti Join (actual rows=6 loops=1) |
| Hash Cond: ((generate_series('-10'::integer, 10)) = dist.c1) |
| Extra Text: (seg1) Hash chain length 1.0 avg, 1 max, using 322 of 524288 buckets. |
| -> Redistribute Motion 1:3 (slice2; segments: 1) (actual rows=8 loops=1) |
| Hash Key: (generate_series('-10'::integer, 10)) |
| -> ProjectSet (actual rows=21 loops=1) |
| -> Result (actual rows=1 loops=1) |
| -> Hash (actual rows=340 loops=1) |
| Buckets: 524288 Batches: 1 Memory Usage: 4108kB |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| Optimizer: Postgres query optimizer |
| (12 rows) |
| |
| -- Testing inner nested loop join |
| set optimizer_enable_hashjoin = off; |
| -- We no more enforce the inner side to be a singleton |
| -- when the outer side is universal. It just needs to |
| -- be non-replicated since inner join is deduplicated. |
| explain (analyze, costs off, timing off, summary off) select * from dist join const_tvf(1) ct(c1) on dist.c1 < ct.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------ |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=0 loops=1) |
| -> Seq Scan on dist (actual rows=0 loops=1) |
| Filter: (c1 < 1) |
| Rows Removed by Filter: 337 |
| Optimizer: Postgres query optimizer |
| (5 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from dist join unnest_arr on dist.c1 < unnest_arr.c1; |
| QUERY PLAN |
| -------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=3 loops=1) |
| -> Nested Loop (actual rows=2 loops=1) |
| Join Filter: (dist.c1 < (((unnest('{-3,-2,-1,0,1,2,3}'::text[])))::integer)) |
| Rows Removed by Join Filter: 2252 |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| -> Materialize (actual rows=7 loops=340) |
| -> Result (actual rows=7 loops=1) |
| -> ProjectSet (actual rows=7 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (10 rows) |
| |
| explain (analyze, costs off, timing off, summary off) select * from dist join gen_series on dist.c1 < gen_series.c1; |
| QUERY PLAN |
| ------------------------------------------------------------------------ |
| Gather Motion 3:1 (slice1; segments: 3) (actual rows=45 loops=1) |
| -> Nested Loop (actual rows=26 loops=1) |
| Join Filter: (dist.c1 < (generate_series('-10'::integer, 10))) |
| Rows Removed by Join Filter: 7051 |
| -> Seq Scan on dist (actual rows=340 loops=1) |
| -> Materialize (actual rows=21 loops=340) |
| -> ProjectSet (actual rows=21 loops=1) |
| -> Result (actual rows=1 loops=1) |
| Optimizer: Postgres query optimizer |
| (9 rows) |
| |