blob: 7a838b69be59368dac4dc498dff9c7d28c0c1007 [file] [log] [blame]
create schema gpexplain;
set search_path = gpexplain;
-- Helper function, to return the EXPLAIN output of a query as a normal
-- result set, so that you can manipulate it further.
create or replace function get_explain_output(explain_query text) returns setof text as
$$
declare
explainrow text;
begin
for explainrow in execute 'EXPLAIN ' || explain_query
loop
return next explainrow;
end loop;
end;
$$ language plpgsql;
-- Same, for EXPLAIN ANALYZE VERBOSE
create or replace function get_explain_analyze_output(explain_query text) returns setof text as
$$
declare
explainrow text;
begin
for explainrow in execute 'EXPLAIN (ANALYZE, VERBOSE) ' || explain_query
loop
return next explainrow;
end loop;
end;
$$ language plpgsql;
-- Same, for EXPLAIN ANALYZE WAL
create or replace function get_explain_analyze_wal_output(explain_query text) returns setof text as
$$
declare
explainrow text;
begin
for explainrow in execute 'EXPLAIN (ANALYZE, WAL) ' || explain_query
loop
return next explainrow;
end loop;
end;
$$ language plpgsql;
-- Test explain wal option
CREATE TABLE explainwal (id int4);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'id' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
WITH query_plan (et) AS
(
select get_explain_analyze_wal_output($$
INSERT INTO explainwal SELECT generate_series(1, 10);
$$)
),
count_result AS
(
SELECT COUNT(*) FROM query_plan WHERE et LIKE '%WAL%'
)
select
(SELECT COUNT(*) FROM count_result WHERE count > 0) as wal_reserved_lines;
wal_reserved_lines
--------------------
1
(1 row)
WITH query_plan (et) AS
(
select get_explain_analyze_wal_output($$
UPDATE explainwal SET id=11 WHERE id=10;
$$)
),
count_result AS
(
SELECT COUNT(*) FROM query_plan WHERE et LIKE '%WAL%'
)
select
(SELECT COUNT(*) FROM count_result WHERE count > 0) as wal_reserved_lines;
wal_reserved_lines
--------------------
1
(1 row)
WITH query_plan (et) AS
(
select get_explain_analyze_wal_output($$
DELETE FROM explainwal
$$)
),
count_result AS
(
SELECT COUNT(*) FROM query_plan WHERE et LIKE '%WAL%'
)
select
(SELECT COUNT(*) FROM count_result WHERE count > 0) as wal_reserved_lines;
wal_reserved_lines
--------------------
1
(1 row)
--
-- Test explain_memory_verbosity option
--
CREATE TABLE explaintest (id int4);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'id' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
INSERT INTO explaintest SELECT generate_series(1, 10);
ANALYZE explaintest;
EXPLAIN ANALYZE SELECT * FROM explaintest;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..3.10 rows=10 width=4)
-> Seq Scan on explaintest (cost=0.00..3.10 rows=4 width=4)
Slice statistics:
(slice0) Executor memory: 318K bytes.
(slice1) Executor memory: 50K bytes avg x 3 workers, 50K bytes max (seg0).
Statement statistics:
Memory used: 128000K bytes
Total runtime: 0.998 ms
(8 rows)
set explain_memory_verbosity='summary';
-- The plan should include the slice table with two slices, with a
-- "Vmem reserved: ..." line on both lines.
WITH query_plan (et) AS
(
select get_explain_analyze_output($$
SELECT * FROM explaintest;
$$)
)
SELECT
(SELECT COUNT(*) FROM query_plan WHERE et like '%Vmem reserved: %') as vmem_reserved_lines,
(SELECT COUNT(*) FROM query_plan WHERE et like '%Executor Memory: %') as executor_memory_lines
;
vmem_reserved_lines | executor_memory_lines
---------------------+-----------------------
2 | 0
(1 row)
-- With 'detail' level, should have an Executor Memory on each executor node.
set explain_memory_verbosity='detail';
WITH query_plan (et) AS
(
select get_explain_analyze_output($$
SELECT * FROM explaintest;
$$)
)
SELECT
(SELECT COUNT(*) FROM query_plan WHERE et like '%Vmem reserved: %') as vmem_reserved_lines,
(SELECT COUNT(*) FROM query_plan WHERE et like '%Executor Memory: %') as executor_memory_lines
;
vmem_reserved_lines | executor_memory_lines
---------------------+-----------------------
2 | 2
(1 row)
reset explain_memory_verbosity;
EXPLAIN ANALYZE SELECT id FROM
( SELECT id
FROM explaintest
WHERE id > (
SELECT avg(id)
FROM explaintest
)
) as foo
ORDER BY id
LIMIT 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------
Limit (cost=6.34..6.36 rows=1 width=4) (actual time=13.105..13.106 rows=1 loops=1)
InitPlan 1 (returns $0) (slice2)
-> Finalize Aggregate (cost=3.16..3.17 rows=1 width=32) (actual time=4.395..4.395 rows=1 loops=1)
-> Gather Motion 3:1 (slice3; segments: 3) (cost=3.12..3.15 rows=1 width=32) (actual time=1.197..4.356 rows=3 loops=1)
-> Partial Aggregate (cost=3.12..3.13 rows=1 width=32) (actual time=0.033..0.033 rows=1 loops=1)
-> Seq Scan on explaintest explaintest_1 (cost=0.00..3.10 rows=4 width=4) (actual time=0.020..0.021 rows=5 loops=1)
-> Gather Motion 3:1 (slice1; segments: 3) (cost=3.17..3.19 rows=1 width=4) (actual time=13.103..13.103 rows=1 loops=1)
Merge Key: explaintest.id
-> Limit (cost=3.17..3.17 rows=1 width=4) (actual time=0.597..0.602 rows=1 loops=1)
-> Sort (cost=3.17..3.17 rows=2 width=4) (actual time=0.594..0.594 rows=1 loops=1)
Sort Key: explaintest.id
Sort Method: top-N heapsort Memory: 51kB
-> Seq Scan on explaintest (cost=0.00..3.15 rows=2 width=4) (actual time=0.035..0.040 rows=3 loops=1)
Filter: ((id)::numeric > $0)
Planning time: 1.607 ms
(slice0) Executor memory: 156K bytes.
* (slice1) Executor memory: 94K bytes avg x 3 workers, 100K bytes max (seg1). Work_mem: 65K bytes max, 1K bytes wanted.
(slice2) Executor memory: 151K bytes.
_ (slice3) Workers: Workers: 3 not dispatched;.
Executor memory: 75K bytes avg x 3 workers, 75K bytes max (seg0).
Memory used: 128000kB
Memory wanted: 800kB
Optimizer: Postgres query optimizer
Execution time: 25.040 ms
(24 rows)
-- Verify that the column references are OK. This tests for an old ORCA bug,
-- where the Filter clause in the IndexScan of this query was incorrectly
-- printed as something like:
--
-- Filter: "outer".column2 = mpp22263.*::text
CREATE TABLE mpp22263 (
unique1 int4,
unique2 int4,
two int4,
four int4,
ten int4,
twenty int4,
hundred int4,
thousand int4,
twothousand int4,
fivethous int4,
tenthous int4,
odd int4,
even int4,
stringu1 name COLLATE pg_catalog."default",
stringu2 name COLLATE pg_catalog."default",
string4 name COLLATE pg_catalog."default"
) distributed by (unique1);
create index mpp22263_idx1 on mpp22263 using btree(unique1);
explain select * from mpp22263, (values(147, 'RFAAAA'), (931, 'VJAAAA')) as v (i, j)
WHERE mpp22263.unique1 = v.i and mpp22263.stringu1 = v.j;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (cost=0.06..278.70 rows=14 width=280)
-> Hash Join (cost=0.06..278.70 rows=5 width=280)
Hash Cond: ((mpp22263.unique1 = "*VALUES*".column1) AND ((mpp22263.stringu1)::text = "*VALUES*".column2))
-> Seq Scan on mpp22263 (cost=0.00..219.00 rows=3967 width=244)
-> Hash (cost=0.03..0.03 rows=1 width=36)
-> Values Scan on "*VALUES*" (cost=0.00..0.03 rows=1 width=36)
Optimizer status: Postgres query optimizer
(7 rows)
-- atmsort.pm masks out differences in the Filter line, so just memorizing
-- the output of the above EXPLAIN isn't enough to catch a faulty Filter line.
-- Extract the Filter explicitly.
SELECT * from
get_explain_output($$
select * from mpp22263, (values(147, 'RFAAAA'), (931, 'VJAAAA')) as v (i, j)
WHERE mpp22263.unique1 = v.i and mpp22263.stringu1 = v.j;
$$) as et
WHERE et like '%Filter: %';
et
----
(0 rows)
--
-- Join condition in explain plan should represent constants with proper
-- variable name
--
create table foo (a int) distributed randomly;
-- "outer", "inner" prefix must also be prefixed to variable name as length of rtable > 1
SELECT trim(et) et from
get_explain_output($$
select * from (values (1),(2)) as f(a) join (values(1),(2)) b(b) on a = b join foo on true join foo as foo2 on true $$) as et
WHERE et like '%Join Filter:%' or et like '%Hash Cond:%';
et
--------------------------------------------------------
Hash Cond: ("*VALUES*".column1 = "*VALUES*_1".column1)
(1 row)
SELECT trim(et) et from
get_explain_output($$
select * from (values (1),(2)) as f(a) join (values(1),(2)) b(b) on a = b$$) as et
WHERE et like '%Hash Cond:%';
et
--------------------------------------------------------
Hash Cond: ("*VALUES*".column1 = "*VALUES*_1".column1)
(1 row)
--
-- Test EXPLAINing of the Partition By in a window function. (PostgreSQL
-- doesn't print it at all.)
--
explain (costs off) select count(*) over (partition by g) from generate_series(1, 10) g;
QUERY PLAN
------------------------------------------------
WindowAgg
Partition By: g
-> Sort
Sort Key: g
-> Function Scan on generate_series g
Optimizer: Postgres query optimizer
(6 rows)
--
-- Test non-text format with a few queries that contain GPDB-specific node types.
--
-- The default init_file rules contain a line to mask this out in normal
-- text-format EXPLAIN output, but it doesn't catch these alternative formats.
-- start_matchignore
-- m/Optimizer.*Pivotal Optimizer \(GPORCA\)/
-- end_matchignore
CREATE EXTERNAL WEB TABLE dummy_ext_tab (x text) EXECUTE 'echo foo' FORMAT 'text';
-- External Table Scan
explain (format json, costs off) SELECT * FROM dummy_ext_tab;
QUERY PLAN
-----------------------------------------------
[ +
{ +
"Plan": { +
"Node Type": "Gather Motion", +
"Senders": 3, +
"Receivers": 1, +
"Slice": 1, +
"Segments": 3, +
"Gang Type": "primary reader", +
"Parallel Aware": false, +
"Async Capable": false, +
"Plans": [ +
{ +
"Node Type": "Foreign Scan", +
"Operation": "Select", +
"Parent Relationship": "Outer", +
"Slice": 1, +
"Segments": 3, +
"Gang Type": "primary reader", +
"Parallel Aware": false, +
"Async Capable": false, +
"Relation Name": "dummy_ext_tab", +
"Alias": "dummy_ext_tab" +
} +
] +
}, +
"Settings": { +
"Optimizer": "Postgres query optimizer"+
} +
} +
]
(1 row)
-- Seq Scan on an append-only table
CREATE TEMP TABLE dummy_aotab (x int4) WITH (appendonly=true);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'x' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
explain (format yaml, costs off) SELECT * FROM dummy_aotab;
QUERY PLAN
-------------------------------------------
- Plan: +
Node Type: "Gather Motion" +
Senders: 3 +
Receivers: 1 +
Slice: 1 +
Segments: 3 +
Gang Type: "primary reader" +
Parallel Aware: false +
Async Capable: false +
Plans: +
- Node Type: "Seq Scan" +
Parent Relationship: "Outer" +
Slice: 1 +
Segments: 3 +
Gang Type: "primary reader" +
Parallel Aware: false +
Async Capable: false +
Relation Name: "dummy_aotab" +
Alias: "dummy_aotab" +
Settings: +
Optimizer: "Postgres query optimizer"
(1 row)
-- DML node (with ORCA)
explain (format xml, costs off) insert into dummy_aotab values (1);
QUERY PLAN
------------------------------------------------------------
<explain xmlns="http://www.postgresql.org/2009/explain"> +
<Query> +
<Plan> +
<Node-Type>ModifyTable</Node-Type> +
<Operation>Insert</Operation> +
<Slice>0</Slice> +
<Segments>1</Segments> +
<Gang-Type>primary writer</Gang-Type> +
<Parallel-Aware>false</Parallel-Aware> +
<Async-Capable>false</Async-Capable> +
<Relation-Name>dummy_aotab</Relation-Name> +
<Alias>dummy_aotab</Alias> +
<Plans> +
<Plan> +
<Node-Type>Result</Node-Type> +
<Parent-Relationship>Outer</Parent-Relationship>+
<Slice>0</Slice> +
<Segments>1</Segments> +
<Gang-Type>primary writer</Gang-Type> +
<Parallel-Aware>false</Parallel-Aware> +
<Async-Capable>false</Async-Capable> +
</Plan> +
</Plans> +
</Plan> +
<Settings> +
<Optimizer>Postgres query optimizer</Optimizer> +
</Settings> +
</Query> +
</explain>
(1 row)
-- github issues 5795. explain fails previously.
--start_ignore
explain SELECT * from information_schema.key_column_usage;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------------------------
Hash Join (cost=388.28..20612.99 rows=39 width=232)
Hash Cond: ((a.attrelid = ss.roid) AND (a.attnum = (ss.x).x))
Join Filter: (pg_has_role(ss.relowner, 'USAGE'::text) OR has_column_privilege(ss.roid, a.attnum, 'SELECT, INSERT, UPDATE, REFERENCES'::text))
-> Seq Scan on pg_attribute a (cost=0.00..583.74 rows=39974 width=70)
Filter: (NOT attisdropped)
-> Hash (cost=252.37..252.37 rows=3021 width=333)
-> Subquery Scan on ss (cost=6.50..252.37 rows=9061 width=333)
-> Result (cost=0.00..0.00 rows=0 width=0)
-> Hash Join (cost=6.50..161.76 rows=9061 width=341)
Hash Cond: (r.relnamespace = nr.oid)
-> Hash Join (cost=5.31..112.94 rows=10 width=273)
Hash Cond: (r.oid = c.conrelid)
-> Seq Scan on pg_class r (cost=0.00..105.53 rows=801 width=76)
Filter: (relkind = 'r'::"char")
-> Hash (cost=5.27..5.27 rows=2 width=201)
-> Hash Join (cost=4.12..5.27 rows=4 width=201)
Hash Cond: (nc.oid = c.connamespace)
-> Seq Scan on pg_namespace nc (cost=0.00..1.09 rows=9 width=68)
-> Hash (cost=4.11..4.11 rows=1 width=141)
-> Seq Scan on pg_constraint c (cost=0.00..4.11 rows=1 width=141)
Filter: (contype = ANY ('{p,u,f}'::"char"[]))
-> Hash (cost=1.11..1.11 rows=3 width=68)
-> Seq Scan on pg_namespace nr (cost=0.00..1.11 rows=7 width=68)
Filter: (NOT pg_is_other_temp_schema(oid))
Optimizer: Postgres query optimizer
(25 rows)
--end_ignore
-- github issue 5794.
set gp_enable_explain_allstat=on;
explain analyze SELECT * FROM explaintest;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..3.10 rows=10 width=4) (actual time=0.266..0.384 rows=5 loops=2)
-> Seq Scan on explaintest (cost=0.00..3.10 rows=4 width=4) (actual time=0.011..0.013 rows=2 loops=2)
allstat: seg_firststart_total_ntuples/seg0_0.745 ms_0.028 ms_3/seg1_0.640 ms_0.025 ms_5/seg2_0.997 ms_0.019 ms_2//end
(slice0) Executor memory: 322K bytes.
(slice1) Executor memory: 50K bytes avg x 3 workers, 50K bytes max (seg0).
Memory used: 128000kB
Optimizer: Postgres query optimizer
Total runtime: 2.600 ms
(8 rows)
set gp_enable_explain_allstat=DEFAULT;
--
-- Test GPDB-specific EXPLAIN (SLICETABLE) option.
--
explain (slicetable, costs off) SELECT * FROM explaintest;
QUERY PLAN
-----------------------------------------------------
Gather Motion 3:1 (slice1; segments: 3)
-> Seq Scan on explaintest
Slice 0: Dispatcher; root 0; parent -1; gang size 0
Slice 1: Reader; root 0; parent 0; gang size 3
Optimizer: Postgres query optimizer
(5 rows)
-- same in JSON format
explain (slicetable, costs off, format json) SELECT * FROM explaintest;
QUERY PLAN
-----------------------------------------------
[ +
{ +
"Plan": { +
"Node Type": "Gather Motion", +
"Senders": 3, +
"Receivers": 1, +
"Slice": 1, +
"Segments": 3, +
"Gang Type": "primary reader", +
"Parallel Aware": false, +
"Async Capable": false, +
"Plans": [ +
{ +
"Node Type": "Seq Scan", +
"Parent Relationship": "Outer", +
"Slice": 1, +
"Segments": 3, +
"Gang Type": "primary reader", +
"Parallel Aware": false, +
"Async Capable": false, +
"Relation Name": "explaintest", +
"Alias": "explaintest" +
} +
] +
}, +
"Settings": { +
"Optimizer": "Postgres query optimizer"+
}, +
"Slice Table": [ +
{ +
"Slice ID": 0, +
"Gang Type": "Dispatcher", +
"Root": 0, +
"Parent": -1, +
"Gang Size": 0 +
}, +
{ +
"Slice ID": 1, +
"Gang Type": "Reader", +
"Root": 0, +
"Parent": 0, +
"Gang Size": 3 +
} +
] +
} +
]
(1 row)
--
-- The same slice may have different number of plan nodes on every qExec.
-- Check if explain analyze can work in that case
--
create schema explain_subplan;
set search_path = explain_subplan;
CREATE TABLE mintab(c1 int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'c1' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
INSERT into mintab VALUES (120);
CREATE TABLE range_parted (
a text,
b bigint,
c numeric
) PARTITION BY RANGE (a, b);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'a' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE part_a_1_a_10 PARTITION OF range_parted FOR VALUES FROM ('a', 1) TO ('a', 10);
NOTICE: table has parent, setting distribution columns to match parent table
CREATE TABLE part_a_10_a_20 PARTITION OF range_parted FOR VALUES FROM ('a', 10) TO ('a', 20);
NOTICE: table has parent, setting distribution columns to match parent table
CREATE TABLE part_b_1_b_10 PARTITION OF range_parted FOR VALUES FROM ('b', 1) TO ('b', 10);
NOTICE: table has parent, setting distribution columns to match parent table
CREATE TABLE part_b_10_b_20 PARTITION OF range_parted FOR VALUES FROM ('b', 10) TO ('b', 20);
NOTICE: table has parent, setting distribution columns to match parent table
ALTER TABLE range_parted ENABLE ROW LEVEL SECURITY;
INSERT INTO range_parted VALUES ('a', 1, 1), ('a', 12, 200);
CREATE USER regress_range_parted_user;
NOTICE: resource queue required -- using default resource queue "pg_default"
GRANT ALL ON SCHEMA explain_subplan TO regress_range_parted_user;
GRANT ALL ON range_parted, mintab TO regress_range_parted_user;
CREATE POLICY seeall ON range_parted AS PERMISSIVE FOR SELECT USING (true);
CREATE POLICY policy_range_parted ON range_parted for UPDATE USING (true) WITH CHECK (c % 2 = 0);
CREATE POLICY policy_range_parted_subplan on range_parted
AS RESTRICTIVE for UPDATE USING (true)
WITH CHECK ((SELECT range_parted.c <= c1 FROM mintab));
SET SESSION AUTHORIZATION regress_range_parted_user;
EXPLAIN (analyze, costs off, timing off, summary off) UPDATE explain_subplan.range_parted set a = 'b', c = 120 WHERE a = 'a' AND c = '200';
QUERY PLAN
-------------------------------------------------------------------------------------------
Update on range_parted (actual rows=0 loops=1)
Update on part_a_1_a_10 range_parted_1
Update on part_a_10_a_20 range_parted_2
-> Explicit Redistribute Motion 1:3 (slice1; segments: 1) (actual rows=1 loops=1)
-> Split (actual rows=2 loops=1)
-> Append (actual rows=1 loops=1)
-> Seq Scan on part_a_1_a_10 range_parted_1 (actual rows=0 loops=1)
Filter: ((a = 'a'::text) AND (c = '200'::numeric))
Rows Removed by Filter: 1
-> Seq Scan on part_a_10_a_20 range_parted_2 (actual rows=1 loops=1)
Filter: ((a = 'a'::text) AND (c = '200'::numeric))
SubPlan 1 (copy 2)
-> Result (actual rows=1 loops=1)
-> Materialize (actual rows=1 loops=1)
-> Broadcast Motion 3:3 (slice2; segments: 3) (actual rows=1 loops=1)
-> Seq Scan on mintab (actual rows=1 loops=1)
SubPlan 1 (copy 3)
-> Result (never executed)
-> Materialize (never executed)
-> Broadcast Motion 3:3 (slice3; segments: 3) (never executed)
-> Seq Scan on mintab mintab_1 (actual rows=1 loops=1)
Optimizer: Postgres query optimizer
(22 rows)
RESET SESSION AUTHORIZATION;
DROP POLICY seeall ON range_parted;
DROP POLICY policy_range_parted ON range_parted;
DROP POLICY policy_range_parted_subplan ON range_parted;
DROP TABLE mintab;
DROP TABLE range_parted;
RESET search_path;
DROP SCHEMA explain_subplan;
DROP USER regress_range_parted_user;
-- Test if explain analyze will hang with materialize node
CREATE TABLE recursive_table_ic (a INT) DISTRIBUTED BY (a);
INSERT INTO recursive_table_ic SELECT * FROM generate_series(20, 30000);
explain (analyze, costs off, timing off, summary off) WITH RECURSIVE
r(i) AS (
SELECT 1
),
y(i) AS (
SELECT 1
UNION ALL
SELECT i + 1 FROM y, recursive_table_ic WHERE NOT EXISTS (SELECT * FROM r LIMIT 10)
)
SELECT * FROM y LIMIT 10;
QUERY PLAN
-----------------------------------------------------------------------------------------------
Limit (actual rows=1 loops=1)
-> Recursive Union (actual rows=1 loops=1)
-> Result (actual rows=1 loops=1)
-> Result (actual rows=0 loops=1)
One-Time Filter: (NOT $1)
InitPlan 1 (returns $1) (slice2)
-> Result (actual rows=1 loops=1)
-> Nested Loop (never executed)
-> WorkTable Scan on y (never executed)
-> Materialize (never executed)
-> Gather Motion 3:1 (slice1; segments: 3) (never executed)
-> Seq Scan on recursive_table_ic (actual rows=4061 loops=1)
Optimizer: Postgres query optimizer
(13 rows)
DROP TABLE recursive_table_ic;