| -- start_ignore |
| create extension if not exists gp_debug_numsegments; |
| create language plpython3u; |
| -- end_ignore |
| drop schema if exists test_expand_table_regression cascade; |
| create schema test_expand_table_regression; |
| set search_path=test_expand_table_regression,public; |
| -- |
| -- derived from src/pl/plpython/sql/plpython_trigger.sql |
| -- |
| -- with some hacks we could insert data into incorrect segments, data expansion |
| -- should tolerant this. |
| -- |
| -- with this trigger the inserted data is always hacked to '345' |
| create function trig345() returns trigger language plpython3u as $$ |
| TD["new"]["data"] = '345' |
| return 'modify' |
| $$; |
| select gp_debug_set_create_table_default_numsegments(2); |
| gp_debug_set_create_table_default_numsegments |
| ----------------------------------------------- |
| 2 |
| (1 row) |
| |
| create table b(data int); |
| NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'data' as the Apache Cloudberry data distribution key for this table. |
| HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew. |
| select gp_debug_reset_create_table_default_numsegments(); |
| gp_debug_reset_create_table_default_numsegments |
| ------------------------------------------------- |
| |
| (1 row) |
| |
| -- by default '345' should be inserted on seg0 |
| insert into b values ('345'); |
| select gp_segment_id, * from b; |
| gp_segment_id | data |
| ---------------+------ |
| 0 | 345 |
| (1 row) |
| |
| truncate b; |
| create trigger b_t before insert on b for each row execute procedure trig345(); |
| -- however with the trigger it is inserted on seg1 |
| insert into b select i from generate_series(1, 10) i; |
| select gp_segment_id, * from b; |
| gp_segment_id | data |
| ---------------+------ |
| 1 | 345 |
| 1 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| (10 rows) |
| |
| -- data expansion should tolerant it |
| alter table b expand table; |
| select gp_segment_id, * from b; |
| gp_segment_id | data |
| ---------------+------ |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| 0 | 345 |
| (10 rows) |
| |
| -- start_ignore |
| -- We need to do a cluster expansion which will check if there are partial |
| -- tables, we need to drop the partial tables to keep the cluster expansion |
| -- run correctly. |
| reset search_path; |
| drop schema test_expand_table_regression cascade; |
| -- end_ignore |