| -- |
| -- Concurrent scan on bitmap index when there's insert running in the backend |
| -- may cause the bitmap scan read wrong tid. |
| -- If a LOV has multiple bitmap pages, and the index insert tries to insert a tid |
| -- into a compressed word on a full bitmap page(Let's call the page `PAGE_FULL`). |
| -- Then it'll try to find free space on next bitmap page(Let's call the page `PAGE_NEXT`) |
| -- and rearrange the words and copy extra words into the next bitmap page. |
| -- So when the above insertion happens, imagine below case: |
| -- 1. Query on bitmap: A query starts and reads all bitmap pages to `PAGE_FULL`, increase |
| -- next tid to fetch, release lock after reading each page. |
| -- 2. Concurrent insert: insert a tid into `PAGE_FULL` cause expand compressed words to |
| -- new words, and rearrange words into `PAGE_NEXT`. |
| -- 3. Query on bitmap: fetch `PAGE_NEXT` and expect the first tid in it should equal the |
| -- saved next tid. But actually `PAGE_NEXT` now contains words used to belong in `PAGE_FULL`. |
| -- This causes the real next tid less than the expected next tid. But our scan keeps increasing |
| -- the wrong tid. And then this leads to a wrong result. |
| -- This related to issue: https://github.com/greenplum-db/gpdb/issues/11308 |
| -- |
| |
| -- Here we use append optimized table to speed up create full bitmap pages |
| -- since each transaction use different seg file number. And ao table's AOTupleId |
| -- is composed of (seg file number, row number). So this will lead to lots of |
| -- compressed words in the first bitmap page. |
| -- With the below transacions in each session, on seg0, the bitmap for id=97 |
| -- will generate two bitmap pages, and the first page is a full page. |
| -- Use heap table, delete tuples and then vacuum should be the same. But it needs huge tuples. |
| CREATE TABLE bmupdate (id int) with(appendonly = true) DISTRIBUTED BY (id); |
| CREATE |
| |
| 1: begin; |
| BEGIN |
| 2: begin; |
| BEGIN |
| 3: begin; |
| BEGIN |
| 4: begin; |
| BEGIN |
| 5: begin; |
| BEGIN |
| 6: begin; |
| BEGIN |
| 7: begin; |
| BEGIN |
| 8: begin; |
| BEGIN |
| 9: begin; |
| BEGIN |
| 10: begin; |
| BEGIN |
| 11: begin; |
| BEGIN |
| 12: begin; |
| BEGIN |
| 13: begin; |
| BEGIN |
| 14: begin; |
| BEGIN |
| 15: begin; |
| BEGIN |
| 16: begin; |
| BEGIN |
| 17: begin; |
| BEGIN |
| 18: begin; |
| BEGIN |
| 19: begin; |
| BEGIN |
| 20: begin; |
| BEGIN |
| 21: begin; |
| BEGIN |
| 22: begin; |
| BEGIN |
| |
| 1: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 2: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 3: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 4: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 5: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 6: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 7: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 8: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 9: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 10: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 11: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 12: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 13: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 14: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 15: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 16: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 17: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 18: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 19: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 20: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 21: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| 22: INSERT INTO bmupdate SELECT i%10000 FROM generate_series(1, 1000000) AS i; |
| INSERT 1000000 |
| |
| 1: commit; |
| COMMIT |
| 2: commit; |
| COMMIT |
| 3: commit; |
| COMMIT |
| 4: commit; |
| COMMIT |
| 5: commit; |
| COMMIT |
| 6: commit; |
| COMMIT |
| 7: commit; |
| COMMIT |
| 8: commit; |
| COMMIT |
| 9: commit; |
| COMMIT |
| 10: commit; |
| COMMIT |
| 11: commit; |
| COMMIT |
| 12: commit; |
| COMMIT |
| 13: commit; |
| COMMIT |
| 14: commit; |
| COMMIT |
| 15: commit; |
| COMMIT |
| 16: commit; |
| COMMIT |
| 17: commit; |
| COMMIT |
| 18: commit; |
| COMMIT |
| 19: commit; |
| COMMIT |
| 20: commit; |
| COMMIT |
| 21: commit; |
| COMMIT |
| 22: commit; |
| COMMIT |
| |
| -- Let's check the total tuple count with id=97 without bitmap index. |
| SELECT count(*) FROM bmupdate WHERE id = 97; |
| count |
| ------- |
| 2200 |
| (1 row) |
| |
| CREATE INDEX idx_bmupdate__id ON bmupdate USING bitmap (id); |
| CREATE |
| |
| -- |
| -- Test 1, run Bitmap Heap Scan on the bitmap index when there's |
| -- backend insert running. |
| -- |
| -- Inject fault after read the first bitmap page when query the table. |
| SELECT gp_inject_fault_infinite('after_read_one_bitmap_idx_page', 'suspend', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault_infinite |
| -------------------------- |
| Success: |
| (1 row) |
| |
| -- Inject fault when insert new tid cause rearrange words from current |
| -- bitmap page to next bitmap page. |
| SELECT gp_inject_fault_infinite('rearrange_word_to_next_bitmap_page', 'skip', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault_infinite |
| -------------------------- |
| Success: |
| (1 row) |
| |
| 1: set optimizer = off; |
| SET |
| 1: set enable_seqscan=off; |
| SET |
| -- Should generate Bitmap Heap Scan on the bitmap index. |
| 1: EXPLAIN (COSTS OFF) SELECT * FROM bmupdate WHERE id = 97; |
| QUERY PLAN |
| --------------------------------------------------- |
| Gather Motion 1:1 (slice1; segments: 1) |
| -> Bitmap Heap Scan on bmupdate |
| Recheck Cond: (id = 97) |
| -> Bitmap Index Scan on idx_bmupdate__id |
| Index Cond: (id = 97) |
| Optimizer: Postgres query optimizer |
| (6 rows) |
| -- Query should suspend on the first fault injection which finish read the first bitmap page. |
| 1&: SELECT count(*) FROM bmupdate WHERE id = 97; <waiting ...> |
| |
| -- Insert will insert new tid in the first bitmap page and cause the word expand |
| -- and rearrange exceed words to next bitmap page. |
| -- The reason it not insert at the end of bitmap LOV is because right now only one |
| -- transaction doing the insert, and it'll insert to small seg file number. |
| 2: INSERT INTO bmupdate VALUES (97); |
| INSERT 1 |
| |
| -- Query should read the first page(buffer lock released), and then INSERT insert to |
| -- the first page which will trigger rearrange words. |
| SELECT gp_wait_until_triggered_fault('rearrange_word_to_next_bitmap_page', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_wait_until_triggered_fault |
| ------------------------------- |
| Success: |
| (1 row) |
| SELECT gp_inject_fault('rearrange_word_to_next_bitmap_page', 'reset', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault |
| ----------------- |
| Success: |
| (1 row) |
| |
| -- Insert triggered rearrange |
| SELECT gp_wait_until_triggered_fault('after_read_one_bitmap_idx_page', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_wait_until_triggered_fault |
| ------------------------------- |
| Success: |
| (1 row) |
| SELECT gp_inject_fault('after_read_one_bitmap_idx_page', 'reset', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault |
| ----------------- |
| Success: |
| (1 row) |
| |
| -- Should return the correct tuple count with id=97. It used to raise assertion failure for |
| -- AO tables. This is because the wrong tid transform to an invalud AOTupleId. |
| 1<: <... completed> |
| count |
| ------- |
| 2200 |
| (1 row) |
| |
| -- Let's check the total tuple count after the test. |
| SELECT count(*) FROM bmupdate WHERE id = 97; |
| count |
| ------- |
| 2201 |
| (1 row) |
| |
| -- |
| -- Test 2, run Index Scan on the bitmap index when there's backend |
| -- insert running. |
| -- |
| -- Inject fault after read the first bitmap page when query the table. |
| SELECT gp_inject_fault_infinite('after_read_one_bitmap_idx_page', 'suspend', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault_infinite |
| -------------------------- |
| Success: |
| (1 row) |
| |
| -- Inject fault when insert new tid cause rearrange words from current |
| -- bitmap page to next bitmap page. |
| SELECT gp_inject_fault_infinite('rearrange_word_to_next_bitmap_page', 'skip', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault_infinite |
| -------------------------- |
| Success: |
| (1 row) |
| |
| 1: set enable_bitmapscan=off; |
| SET |
| -- Should generate Index Scan on the bitmap index. |
| 1: EXPLAIN (COSTS OFF) SELECT * FROM bmupdate WHERE id = 97; |
| QUERY PLAN |
| ----------------------------------------------------- |
| Gather Motion 1:1 (slice1; segments: 1) |
| -> Index Scan using idx_bmupdate__id on bmupdate |
| Index Cond: (id = 97) |
| Optimizer: Postgres query optimizer |
| (4 rows) |
| -- Query should suspend on the first fault injection which finish read the first bitmap page. |
| 1&: SELECT count(*) FROM bmupdate WHERE id = 97; <waiting ...> |
| |
| -- Insert will insert new tid in the first bitmap page and cause the word expand |
| -- and rearrange exceed words to next bitmap page. |
| -- The reason it not insert at the end of bitmap LOV is because right now only one |
| -- transaction doing the insert, and it'll insert to small seg file number. |
| 2: INSERT INTO bmupdate VALUES (97); |
| INSERT 1 |
| |
| -- Query should read the first page(buffer lock released), and then INSERT insert to |
| -- the first page which will trigger rearrange words. |
| SELECT gp_wait_until_triggered_fault('rearrange_word_to_next_bitmap_page', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_wait_until_triggered_fault |
| ------------------------------- |
| Success: |
| (1 row) |
| SELECT gp_inject_fault('rearrange_word_to_next_bitmap_page', 'reset', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault |
| ----------------- |
| Success: |
| (1 row) |
| |
| -- Insert triggered rearrange |
| SELECT gp_wait_until_triggered_fault('after_read_one_bitmap_idx_page', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_wait_until_triggered_fault |
| ------------------------------- |
| Success: |
| (1 row) |
| SELECT gp_inject_fault('after_read_one_bitmap_idx_page', 'reset', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault |
| ----------------- |
| Success: |
| (1 row) |
| |
| -- Should return the correct tuple count with id=97. It used to raise assertion failure for |
| -- AO tables. This is because the wrong tid transform to an invalud AOTupleId. |
| 1<: <... completed> |
| count |
| ------- |
| 2201 |
| (1 row) |
| |
| -- |
| -- Test 3, run Index Scan on the bitmap index that match multiple keys when there's backend |
| -- insert running. |
| -- |
| -- Let's check the total tuple count before the test. |
| SELECT count(*) FROM bmupdate WHERE id >= 97 and id <= 99 and gp_segment_id = 0; |
| count |
| ------- |
| 4402 |
| (1 row) |
| |
| -- Inject fault after read the first bitmap page when query the table. |
| SELECT gp_inject_fault_infinite('after_read_one_bitmap_idx_page', 'suspend', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault_infinite |
| -------------------------- |
| Success: |
| (1 row) |
| |
| -- Inject fault when insert new tid cause rearrange words from current |
| -- bitmap page to next bitmap page. |
| SELECT gp_inject_fault_infinite('rearrange_word_to_next_bitmap_page', 'skip', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault_infinite |
| -------------------------- |
| Success: |
| (1 row) |
| |
| -- Should generate Index Scan on the bitmap index that match multiple keys. |
| 1: EXPLAIN (COSTS OFF) SELECT * FROM bmupdate WHERE id >= 97 and id <= 99 and gp_segment_id = 0; |
| QUERY PLAN |
| ----------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) |
| -> Index Scan using idx_bmupdate__id on bmupdate |
| Index Cond: ((id >= 97) AND (id <= 99)) |
| Filter: (gp_segment_id = 0) |
| Optimizer: Postgres query optimizer |
| (5 rows) |
| -- Query should suspend on the first fault injection which finish read the first bitmap page. |
| 1&: SELECT count(*) FROM bmupdate WHERE id >= 97 and id <= 99 and gp_segment_id = 0; <waiting ...> |
| |
| -- Insert will insert new tid in the first bitmap page and cause the word expand |
| -- and rearrange exceed words to next bitmap page. |
| -- The reason it not insert at the end of bitmap LOV is because right now only one |
| -- transaction doing the insert, and it'll insert to small seg file number. |
| -- Here insert both values to make sure update on full bitmap happens for one LOV. |
| 2: INSERT INTO bmupdate VALUES (97); |
| INSERT 1 |
| 2: INSERT INTO bmupdate VALUES (99); |
| INSERT 1 |
| |
| -- Query should read the first page(buffer lock released), and then INSERT insert to |
| -- the first page which will trigger rearrange words. |
| SELECT gp_wait_until_triggered_fault('rearrange_word_to_next_bitmap_page', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_wait_until_triggered_fault |
| ------------------------------- |
| Success: |
| (1 row) |
| SELECT gp_inject_fault('rearrange_word_to_next_bitmap_page', 'reset', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault |
| ----------------- |
| Success: |
| (1 row) |
| |
| -- Insert triggered rearrange |
| SELECT gp_wait_until_triggered_fault('after_read_one_bitmap_idx_page', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_wait_until_triggered_fault |
| ------------------------------- |
| Success: |
| (1 row) |
| SELECT gp_inject_fault('after_read_one_bitmap_idx_page', 'reset', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault |
| ----------------- |
| Success: |
| (1 row) |
| |
| -- Should return the correct tuple count with id=97. It used to raise assertion failure for |
| -- AO tables. This is because the wrong tid transform to an invalud AOTupleId. |
| 1<: <... completed> |
| count |
| ------- |
| 4402 |
| (1 row) |
| |
| -- Let's check the total tuple count after the test. |
| SELECT count(*) FROM bmupdate WHERE id >= 97 and id <= 99 and gp_segment_id = 0; |
| count |
| ------- |
| 4404 |
| (1 row) |
| |
| -- |
| -- Test 4, run Bitmap Heap Scan on the bitmap index that match multiple keys when there's backend |
| -- insert running. |
| -- |
| |
| -- Inject fault after read the first bitmap page when query the table. |
| SELECT gp_inject_fault_infinite('after_read_one_bitmap_idx_page', 'suspend', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault_infinite |
| -------------------------- |
| Success: |
| (1 row) |
| |
| -- Inject fault when insert new tid cause rearrange words from current |
| -- bitmap page to next bitmap page. |
| SELECT gp_inject_fault_infinite('rearrange_word_to_next_bitmap_page', 'skip', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault_infinite |
| -------------------------- |
| Success: |
| (1 row) |
| |
| 1: set enable_bitmapscan=on; |
| SET |
| -- Should generate Bitmap HEAP Scan on the bitmap index that match multiple keys. |
| 1: EXPLAIN (COSTS OFF) SELECT * FROM bmupdate WHERE id >= 97 and id <= 99 and gp_segment_id = 0; |
| QUERY PLAN |
| ------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) |
| -> Bitmap Heap Scan on bmupdate |
| Recheck Cond: ((id >= 97) AND (id <= 99)) |
| Filter: (gp_segment_id = 0) |
| -> Bitmap Index Scan on idx_bmupdate__id |
| Index Cond: ((id >= 97) AND (id <= 99)) |
| Optimizer: Postgres query optimizer |
| (7 rows) |
| -- Query should suspend on the first fault injection which finish read the first bitmap page. |
| 1&: SELECT count(*) FROM bmupdate WHERE id >= 97 and id <= 99 and gp_segment_id = 0; <waiting ...> |
| |
| -- Insert will insert new tid in the first bitmap page and cause the word expand |
| -- and rearrange exceed words to next bitmap page. |
| -- The reason it not insert at the end of bitmap LOV is because right now only one |
| -- transaction doing the insert, and it'll insert to small seg file number. |
| -- Here insert both values to make sure update on full bitmap happens for one LOV. |
| 2: INSERT INTO bmupdate SELECT 97 FROM generate_series(1, 1000); |
| INSERT 1000 |
| 2: INSERT INTO bmupdate SELECT 99 FROM generate_series(1, 1000); |
| INSERT 1000 |
| |
| -- Query should read the first page(buffer lock released), and then INSERT insert to |
| -- the first page which will trigger rearrange words. |
| SELECT gp_wait_until_triggered_fault('rearrange_word_to_next_bitmap_page', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_wait_until_triggered_fault |
| ------------------------------- |
| Success: |
| (1 row) |
| SELECT gp_inject_fault('rearrange_word_to_next_bitmap_page', 'reset', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault |
| ----------------- |
| Success: |
| (1 row) |
| |
| -- Insert triggered rearrange |
| SELECT gp_wait_until_triggered_fault('after_read_one_bitmap_idx_page', 1, dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_wait_until_triggered_fault |
| ------------------------------- |
| Success: |
| (1 row) |
| SELECT gp_inject_fault('after_read_one_bitmap_idx_page', 'reset', dbid) FROM gp_segment_configuration where role = 'p' and content = 0; |
| gp_inject_fault |
| ----------------- |
| Success: |
| (1 row) |
| |
| -- Should return the correct tuple count with id=97. It used to raise assertion failure for |
| -- AO tables. This is because the wrong tid transform to an invalud AOTupleId. |
| 1<: <... completed> |
| count |
| ------- |
| 4404 |
| (1 row) |
| |
| -- Let's check the total tuple count after the test. |
| SELECT count(*) FROM bmupdate WHERE id >= 97 and id <= 99 and gp_segment_id = 0; |
| count |
| ------- |
| 6404 |
| (1 row) |
| |
| DROP TABLE bmupdate; |
| DROP |
| |
| |
| -- Regression test, when large amount of inserts concurrent inserts happen, |
| -- querying the table shouldn't take along time. |
| -- This test is from https://github.com/greenplum-db/gpdb/issues/15389 |
| -- Although planner is slower, this regression does not happen |
| -- for planner |
| set optimizer = on; |
| SET |
| DROP TABLE IF EXISTS bug.let_me_out; |
| DROP |
| DROP SCHEMA IF EXISTS bug; |
| DROP |
| CREATE SCHEMA bug; |
| CREATE |
| CREATE TABLE bug.let_me_out ( date_column date NULL, int_column int4 NULL ) WITH (appendonly = true, orientation = column) distributed randomly; |
| CREATE |
| |
| 1&: INSERT INTO bug.let_me_out(date_column, int_column) SELECT ('2017-01-01'::timestamp + random() * ('2023-08-10'::timestamp - '2017-01-01'::timestamp))::date AS date_column, id / 50000 AS int_column -- id % 700 as int_column FROM generate_series(1, 30000000) s(id); <waiting ...> |
| |
| 2&: INSERT INTO bug.let_me_out(date_column, int_column) SELECT ('2017-01-01'::timestamp + random() * ('2023-08-10'::timestamp - '2017-01-01'::timestamp))::date AS date_column, id / 50000 AS int_column -- id % 700 as int_column FROM generate_series(30000000, 50000000) s(id); <waiting ...> |
| |
| 1<: <... completed> |
| INSERT 30000000 |
| 2<: <... completed> |
| INSERT 20000001 |
| |
| CREATE INDEX idx_let_me_out__date_column ON bug.let_me_out USING bitmap (date_column); |
| CREATE |
| CREATE INDEX idx_let_me_out__int_column ON bug.let_me_out USING bitmap (int_column); |
| CREATE |
| VACUUM FULL ANALYZE bug.let_me_out; |
| VACUUM |
| |
| SET random_page_cost = 1; |
| SET |
| -- expected to finish under 250ms, but if we go over 60000, then something really bad happened |
| SET statement_timeout=60000; |
| SET |
| EXPLAIN ANALYZE SELECT date_column, int_column FROM bug.let_me_out WHERE date_column in ('2023-03-19', '2023-03-08', '2023-03-13', '2023-03-29', '2023-03-20', '2023-03-28', '2023-03-23', '2023-03-04', '2023-03-05', '2023-03-18', '2023-03-14', '2023-03-06', '2023-03-15', '2023-03-31', '2023-03-11', '2023-03-21', '2023-03-24', '2023-03-30', '2023-03-26', '2023-03-03', '2023-03-22', '2023-03-01', '2023-03-12', '2023-03-17', '2023-03-27', '2023-03-07', '2023-03-16', '2023-03-10', '2023-03-25', '2023-03-09', '2023-03-02') AND int_column IN (1003,1025,1026,1033,1034,1216,1221,160,161,1780,3049,305,3051,3052,3069,3077,3083,3084,3092,3121,3122,3123,3124,3180,3182,3183,3184,3193,3225,3226,3227,3228,3234,3267,3269,3270,3271,3272,3277,3301,3302,3303,3305,3307,3308,3310,3314,3317,3318,3319,3320,3321,3343,3344,3345,3347,3348,3388,339,341,345,346,347,349,3522,3565,3606,3607,3610,3612,3613,3637,3695,3738,3739,3740,3741,3742,3764,3829,3859,3861,3864,3865,3866,3867,3870,3871,3948,3967,3969,3971,3974,3975,3976,4043,4059,4061,4062,4064,4065,4069,4070,4145,42,423,4269,43,4300,4303,4308,4311,4312,4313,4361,4449,445,446,4475,4476,4479,4480,4483,4485,4486,450,4581,4609,4610,4611,4613,4614,4685,4707,4708,4709,4710,4799,4800,4825,4831,4832,4905,4940,4941,4942,4945,4947,4948,4953,4954,4957,540,572,627,743,762,763,77,787,80,81,84,871,899,901,902,905,906); |
| QUERY PLAN |
| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Gather Motion 3:1 (slice1; segments: 3) (cost=0.00..448.82 rows=37406 width=8) (actual time=39.186..142.060 rows=20169 loops=1) |
| -> Bitmap Heap Scan on let_me_out (cost=0.00..447.52 rows=12469 width=8) (actual time=36.948..131.712 rows=6817 loops=1) |
| Recheck Cond: ((date_column = ANY ('{2023-03-19,2023-03-08,2023-03-13,2023-03-29,2023-03-20,2023-03-28,2023-03-23,2023-03-04,2023-03-05,2023-03-18,2023-03-14,2023-03-06,2023-03-15,2023-03-31,2023-03-11,2023-03-21,2023-03-24,2023-03-30,2023-03-26,2023-03-03,2023-03-22,2023-03-01,2023-03-12,2023-03-17,2023-03-27,2023-03-07,2023-03-16,2023-03-10,2023-03-25,2023-03-09,2023-03-02}'::date[])) AND (int_column = ANY ('{1003,1025,1026,1033,1034,1216,1221,160,161,1780,3049,305,3051,3052,3069,3077,3083,3084,3092,3121,3122,3123,3124,3180,3182,3183,3184,3193,3225,3226,3227,3228,3234,3267,3269,3270,3271,3272,3277,3301,3302,3303,3305,3307,3308,3310,3314,3317,3318,3319,3320,3321,3343,3344,3345,3347,3348,3388,339,341,345,346,347,349,3522,3565,3606,3607,3610,3612,3613,3637,3695,3738,3739,3740,3741,3742,3764,3829,3859,3861,3864,3865,3866,3867,3870,3871,3948,3967,3969,3971,3974,3975,3976,4043,4059,4061,4062,4064,4065,4069,4070,4145,42,423,4269,43,4300,4303,4308,4311,4312,4313,4361,4449,445,446,4475,4476,4479,4480,4483,4485,4486,450,4581,4609,4610,4611,4613,4614,4685,4707,4708,4709,4710,4799,4800,4825,4831,4832,4905,4940,4941,4942,4945,4947,4948,4953,4954,4957,540,572,627,743,762,763,77,787,80,81,84,871,899,901,902,905,906}'::integer[]))) |
| -> BitmapAnd (cost=0.00..0.00 rows=0 width=0) (actual time=23.072..23.073 rows=1 loops=1) |
| -> Bitmap Index Scan on idx_let_me_out__date_column (cost=0.00..0.00 rows=0 width=0) (actual time=12.033..12.033 rows=31 loops=1) |
| Index Cond: (date_column = ANY ('{2023-03-19,2023-03-08,2023-03-13,2023-03-29,2023-03-20,2023-03-28,2023-03-23,2023-03-04,2023-03-05,2023-03-18,2023-03-14,2023-03-06,2023-03-15,2023-03-31,2023-03-11,2023-03-21,2023-03-24,2023-03-30,2023-03-26,2023-03-03,2023-03-22,2023-03-01,2023-03-12,2023-03-17,2023-03-27,2023-03-07,2023-03-16,2023-03-10,2023-03-25,2023-03-09,2023-03-02}'::date[])) |
| -> Bitmap Index Scan on idx_let_me_out__int_column (cost=0.00..0.00 rows=0 width=0) (actual time=11.035..11.035 rows=169 loops=1) |
| Index Cond: (int_column = ANY ('{1003,1025,1026,1033,1034,1216,1221,160,161,1780,3049,305,3051,3052,3069,3077,3083,3084,3092,3121,3122,3123,3124,3180,3182,3183,3184,3193,3225,3226,3227,3228,3234,3267,3269,3270,3271,3272,3277,3301,3302,3303,3305,3307,3308,3310,3314,3317,3318,3319,3320,3321,3343,3344,3345,3347,3348,3388,339,341,345,346,347,349,3522,3565,3606,3607,3610,3612,3613,3637,3695,3738,3739,3740,3741,3742,3764,3829,3859,3861,3864,3865,3866,3867,3870,3871,3948,3967,3969,3971,3974,3975,3976,4043,4059,4061,4062,4064,4065,4069,4070,4145,42,423,4269,43,4300,4303,4308,4311,4312,4313,4361,4449,445,446,4475,4476,4479,4480,4483,4485,4486,450,4581,4609,4610,4611,4613,4614,4685,4707,4708,4709,4710,4799,4800,4825,4831,4832,4905,4940,4941,4942,4945,4947,4948,4953,4954,4957,540,572,627,743,762,763,77,787,80,81,84,871,899,901,902,905,906}'::integer[])) |
| Optimizer: GPORCA |
| Planning Time: 79.198 ms |
| (slice0) Executor memory: 47K bytes. |
| (slice1) Executor memory: 49133K bytes avg x 3 workers, 49133K bytes max (seg0). |
| Memory used: 128000kB |
| Execution Time: 148.774 ms |
| (14 rows) |