summaryrefslogtreecommitdiff
path: root/test/ruby
diff options
context:
space:
mode:
authorPeter Zhu <peter@peterzhu.ca>2023-08-30 15:28:22 -0400
committerPeter Zhu <peter@peterzhu.ca>2023-08-31 09:28:31 -0400
commit4f0d58260a7f506ead198064d12a967edd11fe5e (patch)
tree060ea8fef61b6b4240ee74dbe3cff410fe44e6b8 /test/ruby
parenteb3d94f4baff70d2e120c9472a3851a4aa9c90d9 (diff)
Correctly calculate initial pages
The old algorithm could calculate an undercount for the initial pages due to two issues: 1. It did not take into account that some heap pages will have one less slot due to alignment. It assumed that every heap page would be able to be fully filled with slots. Pages that are unaligned with the slot size will lose one slot. The new algorithm assumes that every page will be unaligned. 2. It performed integer division, which truncates down. This means that the number of pages might not actually satisfy the number of slots. This can cause the heap to grow in `gc_sweep_finish_size_pool` after allocating all of the allocatable pages because the total number of slots would be less than the initial configured number of slots.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/8333
Diffstat (limited to 'test/ruby')
-rw-r--r--test/ruby/test_gc.rb46
1 files changed, 32 insertions, 14 deletions
diff --git a/test/ruby/test_gc.rb b/test/ruby/test_gc.rb
index 3967ed54bc..0b4062e99f 100644
--- a/test/ruby/test_gc.rb
+++ b/test/ruby/test_gc.rb
@@ -444,12 +444,12 @@ class TestGc < Test::Unit::TestCase
# Constant from gc.c.
GC_HEAP_INIT_SLOTS = 10_000
GC.stat_heap.each do |_, s|
- # Sometimes pages will have 1 less slot due to alignment, so always increase slots_per_page by 1.
- slots_per_page = (s[:heap_eden_slots] / s[:heap_eden_pages]) + 1
+ multiple = s[:slot_size] / (GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE] + GC::INTERNAL_CONSTANTS[:RVALUE_OVERHEAD])
+ # Allocatable pages are assumed to have lost 1 slot due to alignment.
+ slots_per_page = (GC::INTERNAL_CONSTANTS[:HEAP_PAGE_OBJ_LIMIT] / multiple) - 1
+
total_slots = s[:heap_eden_slots] + s[:heap_allocatable_pages] * slots_per_page
- # Give a 0.9x delta because integer division in minimum_pages_for_size_pool can sometimes cause number to be
- # less than GC_HEAP_INIT_SLOTS.
- assert_operator(total_slots, :>=, GC_HEAP_INIT_SLOTS * 0.9, s)
+ assert_operator(total_slots, :>=, GC_HEAP_INIT_SLOTS, s)
end
RUBY
@@ -462,10 +462,16 @@ class TestGc < Test::Unit::TestCase
assert_separately([env, "-W0"], __FILE__, __LINE__, <<~RUBY)
SIZES = #{sizes}
GC.stat_heap.each do |i, s|
- # Sometimes pages will have 1 less slot due to alignment, so always increase slots_per_page by 1.
- slots_per_page = (s[:heap_eden_slots] / s[:heap_eden_pages]) + 1
+ multiple = s[:slot_size] / (GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE] + GC::INTERNAL_CONSTANTS[:RVALUE_OVERHEAD])
+ # Allocatable pages are assumed to have lost 1 slot due to alignment.
+ slots_per_page = (GC::INTERNAL_CONSTANTS[:HEAP_PAGE_OBJ_LIMIT] / multiple) - 1
+
total_slots = s[:heap_eden_slots] + s[:heap_allocatable_pages] * slots_per_page
- assert_in_epsilon(SIZES[i], total_slots, 0.01, s)
+
+ # The delta is calculated as follows:
+ # - For allocated pages, each page can vary by 1 slot due to alignment.
+ # - For allocatable pages, we can end up with at most 1 extra page of slots.
+ assert_in_delta(SIZES[i], total_slots, s[:heap_eden_pages] + slots_per_page, s)
end
RUBY
@@ -486,10 +492,16 @@ class TestGc < Test::Unit::TestCase
# Check that we still have the same number of slots as initially configured.
GC.stat_heap.each do |i, s|
- # Sometimes pages will have 1 less slot due to alignment, so always increase slots_per_page by 1.
- slots_per_page = (s[:heap_eden_slots] / s[:heap_eden_pages]) + 1
+ multiple = s[:slot_size] / (GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE] + GC::INTERNAL_CONSTANTS[:RVALUE_OVERHEAD])
+ # Allocatable pages are assumed to have lost 1 slot due to alignment.
+ slots_per_page = (GC::INTERNAL_CONSTANTS[:HEAP_PAGE_OBJ_LIMIT] / multiple) - 1
+
total_slots = s[:heap_eden_slots] + s[:heap_allocatable_pages] * slots_per_page
- assert_in_epsilon(SIZES[i], total_slots, 0.01, s)
+
+ # The delta is calculated as follows:
+ # - For allocated pages, each page can vary by 1 slot due to alignment.
+ # - For allocatable pages, we can end up with at most 1 extra page of slots.
+ assert_in_delta(SIZES[i], total_slots, s[:heap_eden_pages] + slots_per_page, s)
end
RUBY
@@ -525,10 +537,16 @@ class TestGc < Test::Unit::TestCase
# Check that we still have the same number of slots as initially configured.
GC.stat_heap.each do |i, s|
- # Sometimes pages will have 1 less slot due to alignment, so always increase slots_per_page by 1.
- slots_per_page = (s[:heap_eden_slots] / s[:heap_eden_pages]) + 1
+ multiple = s[:slot_size] / (GC::INTERNAL_CONSTANTS[:BASE_SLOT_SIZE] + GC::INTERNAL_CONSTANTS[:RVALUE_OVERHEAD])
+ # Allocatable pages are assumed to have lost 1 slot due to alignment.
+ slots_per_page = (GC::INTERNAL_CONSTANTS[:HEAP_PAGE_OBJ_LIMIT] / multiple) - 1
+
total_slots = s[:heap_eden_slots] + s[:heap_allocatable_pages] * slots_per_page
- assert_in_epsilon(SIZES[i], total_slots, 0.01, s)
+
+ # The delta is calculated as follows:
+ # - For allocated pages, each page can vary by 1 slot due to alignment.
+ # - For allocatable pages, we can end up with at most 1 extra page of slots.
+ assert_in_delta(SIZES[i], total_slots, s[:heap_eden_pages] + slots_per_page, s)
end
RUBY
end