blob: cd162170e13a081832d1c1f849a7b9f851dfcf5e [file] [log] [blame]
drop table if exists _tmp_table;
NOTICE: table "_tmp_table" does not exist, skipping
create table _tmp_table (i1 int, i2 int, i3 int, i4 int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i1' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
insert into _tmp_table select i, i % 100, i % 10000, i % 75 from generate_series(0,99999) i;
-- make sort to spill
set statement_mem="2MB";
set gp_cte_sharing=on;
select gp_inject_fault('execsort_sort_mergeruns', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- set QueryFinishPending=true in sort mergeruns. This will stop sort and set result_tape to NULL
select gp_inject_fault('execsort_sort_mergeruns', 'finish_pending', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- return results although sort will be interrupted in one of the segments
select DISTINCT S from (select row_number() over(partition by i1 order by i2) AS T, count(*) over (partition by i1) AS S from _tmp_table) AS TMP;
s
---
1
(1 row)
select gp_inject_fault('execsort_sort_mergeruns', 'status', 2);
gp_inject_fault
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Success: fault name:'execsort_sort_mergeruns' fault type:'finish_pending' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'1' extra arg:'0' fault injection state:'completed' num times hit:'1' +
(1 row)
select gp_inject_fault('execsort_dumptuples', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- set QueryFinishPending=true in sort dumptuples. This will stop sort.
select gp_inject_fault('execsort_dumptuples', 'finish_pending', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- return results although sort will be interrupted in one of the segments
select DISTINCT S from (select row_number() over(partition by i1 order by i2) AS T, count(*) over (partition by i1) AS S from _tmp_table) AS TMP;
s
---
1
(1 row)
select gp_inject_fault('execsort_dumptuples', 'status', 2);
gp_inject_fault
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Success: fault name:'execsort_dumptuples' fault type:'finish_pending' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'1' extra arg:'0' fault injection state:'completed' num times hit:'1' +
(1 row)
-- ORCA does not trigger sort_bounded_heap() in following queries
set optimizer=off;
select gp_inject_fault('execsort_sort_bounded_heap', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- set QueryFinishPending=true in sort_bounded_heap. This will stop sort.
select gp_inject_fault('execsort_sort_bounded_heap', 'finish_pending', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- return results although sort will be interrupted in one of the segments
select i1 from _tmp_table order by i2 limit 3;
i1
-----
0
300
400
(3 rows)
select gp_inject_fault('execsort_sort_bounded_heap', 'status', 2);
gp_inject_fault
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Success: fault name:'execsort_sort_bounded_heap' fault type:'finish_pending' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'1' extra arg:'0' fault injection state:'completed' num times hit:'1' +
(1 row)
-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node
drop table if exists testsisc;
NOTICE: table "testsisc" does not exist, skipping
create table testsisc (i1 int, i2 int, i3 int, i4 int);
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'i1' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
insert into testsisc select i, i % 1000, i % 100000, i % 75 from
(select generate_series(1, nsegments * 50000) as i from
(select count(*) as nsegments from gp_segment_configuration where role='p' and content >= 0) foo) bar;
set gp_resqueue_print_operator_memory_limits=on;
set statement_mem='2MB';
-- ORCA does not generate SharedInputScan with a Sort node underneath it. For
-- the following query, ORCA disregards the order by inside the cte definition;
-- planner on the other hand does not.
set optimizer=off;
select gp_inject_fault('execshare_input_next', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);
gp_inject_fault
-----------------
Success:
(1 row)
with cte as (select i2 from testsisc order by i2)
select * from cte c1, cte c2 limit 2;
i2 | i2
----+----
0 | 0
0 | 0
(2 rows)
select gp_inject_fault('execshare_input_next', 'status', 2);
gp_inject_fault
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Success: fault name:'execshare_input_next' fault type:'finish_pending' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'1' extra arg:'0' fault injection state:'completed' num times hit:'1' +
(1 row)
-- test if shared input scan deletes memory correctly when QueryFinishPending and its child has been eagerly freed,
-- where the child is a Sort node and sort_mk algorithm is used
select gp_inject_fault('execshare_input_next', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- Set QueryFinishPending to true after SharedInputScan has retrieved the first tuple.
-- This will eagerly free the memory context of shared input scan's child node.
select gp_inject_fault('execshare_input_next', 'finish_pending', 2);
gp_inject_fault
-----------------
Success:
(1 row)
with cte as (select i2 from testsisc order by i2)
select * from cte c1, cte c2 limit 2;
i2 | i2
----+----
0 | 0
0 | 0
(2 rows)
select gp_inject_fault('execshare_input_next', 'status', 2);
gp_inject_fault
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Success: fault name:'execshare_input_next' fault type:'finish_pending' ddl statement:'' database name:'' table name:'' start occurrence:'1' end occurrence:'1' extra arg:'0' fault injection state:'completed' num times hit:'1' +
(1 row)
-- Disable faultinjectors
select gp_inject_fault('execsort_sort_mergeruns', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
select gp_inject_fault('execsort_dumptuples', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
select gp_inject_fault('execsort_sort_bounded_heap', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
select gp_inject_fault('execshare_input_next', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- test if a query can be canceled when cancel signal arrives fast than the query dispatched.
create table _tmp_table1 as select i as c1, i as c2 from generate_series(1, 10) i;
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'c1' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
create table _tmp_table2 as select i as c1, 0 as c2 from generate_series(0, 10) i;
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column(s) named 'c1' as the Apache Cloudberry data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
-- Prevent FTS from probing as "before_read_command" fault interfers
-- with FTS probes. Request a scan so that the skip fault is
-- triggered immediately, rather that waiting until the next probe
-- interval.
select gp_inject_fault_infinite('fts_probe', 'skip', 1);
gp_inject_fault_infinite
--------------------------
Success:
(1 row)
select gp_request_fts_probe_scan();
gp_request_fts_probe_scan
---------------------------
t
(1 row)
-- make one QE sleep before reading command
select gp_inject_fault('before_read_command', 'sleep', '', '', '', 1, 1, 50, 2::smallint);
gp_inject_fault
-----------------
Success:
(1 row)
select count(*) from _tmp_table1, _tmp_table2 where 100 / _tmp_table2.c2 > 1;
ERROR: division by zero (seg0 slice1 127.0.1.1:25432 pid=15478)
select gp_inject_fault('before_read_command', 'reset', 2);
gp_inject_fault
-----------------
Success:
(1 row)
-- Resume FTS probes starting from the next probe interval.
select gp_inject_fault('fts_probe', 'reset', 1);
gp_inject_fault
-----------------
Success:
(1 row)
drop table _tmp_table1;
drop table _tmp_table2;
drop table testsisc;