| -- start_ignore |
| CREATE LANGUAGE plpython3u; |
| ! rmdir @cgroup_mnt_point@/cpu/gpdb; |
| ! rmdir @cgroup_mnt_point@/cpuacct/gpdb; |
| ! rmdir @cgroup_mnt_point@/cpuset/gpdb; |
| ! mkdir @cgroup_mnt_point@/cpu/gpdb; |
| ! mkdir @cgroup_mnt_point@/cpuacct/gpdb; |
| ! mkdir @cgroup_mnt_point@/cpuset/gpdb; |
| -- end_ignore |
| |
| -- we want to simulate a 3-segment (both master and primary) cluster with 2GB |
| -- memory and gp_resource_group_memory_limit=100%, suppose: |
| -- |
| -- - total: the total memory on the system; |
| -- - nsegs: the max per-host segment count (including both master and primaries); |
| -- - limit: the gp_resource_group_memory_limit used for the simulation; |
| -- |
| -- then we have: total * limit / nsegs = 2GB * 1.0 / 3 |
| -- so: limit = 2GB * 1.0 / 3 * nsegs / total |
| -- |
| -- with the simulation each primary segment should manage 682MB memory. |
| DO LANGUAGE plpython3u $$ |
| import os |
| import psutil |
| |
| mem = psutil.virtual_memory().total |
| swap = psutil.swap_memory().total |
| overcommit = int(open('/proc/sys/vm/overcommit_ratio').readline()) |
| total = swap + mem * overcommit / 100. |
| |
| nsegs = int(plpy.execute(''' |
| SELECT count(hostname) as nsegs |
| FROM gp_segment_configuration |
| WHERE preferred_role = 'p' |
| GROUP BY hostname |
| ORDER BY count(hostname) DESC |
| LIMIT 1 |
| ''')[0]['nsegs']) |
| |
| limit = (2 << 30) * 1.0 * nsegs / 3 / total |
| os.system('gpconfig -c gp_resource_group_memory_limit -v {:f}'.format(limit)) |
| $$; |
| |
| -- enable resource group and restart cluster. |
| -- start_ignore |
| ! gpconfig -c gp_resource_manager -v group; |
| ! gpconfig -c gp_resource_group_cpu_limit -v 0.9; |
| ! gpconfig -c max_connections -v 250 -m 25; |
| ! gpconfig -c runaway_detector_activation_percent -v 100; |
| ! gpstop -rai; |
| -- end_ignore |
| |
| -- after the restart we need a new connection to run the queries |
| |
| 0: SHOW gp_resource_manager; |
| |
| -- resource queue statistics should not crash |
| 0: SELECT * FROM pg_resqueue_status; |
| 0: SELECT * FROM gp_toolkit.gp_resqueue_status; |
| 0: SELECT * FROM gp_toolkit.gp_resq_priority_backend; |
| |
| -- verify the default settings |
| 0: SELECT * from gp_toolkit.gp_resgroup_config; |
| |
| -- by default admin_group has concurrency set to -1 which leads to |
| -- very small memory quota for each resgroup slot, correct it. |
| 0: ALTER RESOURCE GROUP admin_group SET concurrency 2; |
| |
| -- explicitly set memory settings |
| 0: ALTER RESOURCE GROUP admin_group SET memory_limit 10; |
| 0: ALTER RESOURCE GROUP default_group SET memory_limit 30; |
| 0: ALTER RESOURCE GROUP admin_group SET memory_shared_quota 80; |
| 0: ALTER RESOURCE GROUP default_group SET memory_shared_quota 80; |
| -- in later cases we will SHOW memory_spill_ratio as first command |
| -- to verify that it can be correctly loaded even for bypassed commands |
| 0: ALTER RESOURCE GROUP admin_group SET memory_spill_ratio 10; |
| 0: ALTER RESOURCE GROUP default_group SET memory_spill_ratio 10; |