summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorKJ Tsanaktsidis <kj@kjtsanaktsidis.id.au>2023-11-27 16:50:05 +1100
committerPeter Zhu <peter@peterzhu.ca>2023-12-07 10:19:35 -0500
commitcbc0e0bef08f9389f5bbe76de016795f01c3bc76 (patch)
tree415d9399dc5db15e641e4f0c52dfe02f8b27389b /gc.c
parent5d832d16d9878766dfe527934ef1ad64698b9bf8 (diff)
Fix GC.verify_compaction_references not moving every object
The intention of GC.verify_compaction_references is, I believe, to force every single movable object to be moved, so that it's possible to debug native extensions which not correctly updating their references to objects they mark as movable. To do this, it doubles the number of allocated pages for each size pool, and sorts the heap pages so that the free ones are swept first; thus, every object in an old page should be moved into a free slot in one of the new pages. This worked fine until movement of objects _between_ size pools during compaction was implemented. That causes some problems for verify_compaction_references: * We were doubling the number of pages in each size pool, but actually if some objects need to move into a _different_ pool, there's no guarantee that they'll be enough room in that one. * It's possible for the sweep & compact cursors to meet in one size pool before all the objects that want to move into that size pool from another are processed by the compaction. You can see these problems by changing some of the movement tests in test_gc_compact.rb to try and move e.g. 50,000 objects instead of 500; the test is not able to actually move all of the objects in a single compaction run. To fix this, we do two things in verify_compaction_references: * Firstly, we add enough pages to every size pool to make them the same size. This ensures that their compact cursors will all have space to move during compaction (even if that means empty pages are pointlessly compacted) * Then, we examine every object and determine where it _wants_ to be compacted into. We use this information to add additional pages to each size pool to handle all objects which should live there. With these two changes, we can move arbitrary amounts of objects into the correct size pool in a single call to verify_compaction_references. My _motivation_ for performing this work was to try and fix some test stability issues in test_gc_compact.rb. I now no longer think that we actually see this particular bug in rubyci.org, but I also think verify_compaction_references should do what it says on the tin, so it's worth keeping. [Bug #20022]
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c84
1 files changed, 77 insertions, 7 deletions
diff --git a/gc.c b/gc.c
index f97f1cbbae..9ef2b4a48a 100644
--- a/gc.c
+++ b/gc.c
@@ -11054,6 +11054,39 @@ gc_compact(VALUE self)
#endif
#if GC_CAN_COMPILE_COMPACTION
+
+struct desired_compaction_pages_i_data {
+ rb_objspace_t *objspace;
+ size_t required_slots[SIZE_POOL_COUNT];
+};
+
+static int
+desired_compaction_pages_i(struct heap_page *page, void *data)
+{
+ struct desired_compaction_pages_i_data *tdata = data;
+ rb_objspace_t *objspace = tdata->objspace;
+ VALUE vstart = (VALUE)page->start;
+ VALUE vend = vstart + (VALUE)(page->total_slots * page->size_pool->slot_size);
+
+
+ for (VALUE v = vstart; v != vend; v += page->size_pool->slot_size) {
+ /* skip T_NONEs; they won't be moved */
+ void *poisoned = asan_unpoison_object_temporary(v);
+ if (BUILTIN_TYPE(v) == T_NONE) {
+ if (poisoned) {
+ asan_poison_object(v);
+ }
+ continue;
+ }
+
+ rb_size_pool_t *dest_pool = gc_compact_destination_pool(objspace, page->size_pool, v);
+ size_t dest_pool_idx = dest_pool - size_pools;
+ tdata->required_slots[dest_pool_idx]++;
+ }
+
+ return 0;
+}
+
static VALUE
gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE expand_heap, VALUE toward_empty)
{
@@ -11071,18 +11104,55 @@ gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE do
gc_rest(objspace);
/* if both double_heap and expand_heap are set, expand_heap takes precedence */
- if (RTEST(double_heap) || RTEST(expand_heap)) {
+ if (RTEST(expand_heap)) {
+ struct desired_compaction_pages_i_data desired_compaction = {
+ .objspace = objspace,
+ .required_slots = {0},
+ };
+ /* Work out how many objects want to be in each size pool, taking account of moves */
+ objspace_each_pages(objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
+
+ /* Find out which pool has the most pages */
+ size_t max_existing_pages = 0;
+ for(int i = 0; i < SIZE_POOL_COUNT; i++) {
+ rb_size_pool_t *size_pool = &size_pools[i];
+ rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
+ max_existing_pages = MAX(max_existing_pages, heap->total_pages);
+ }
+ /* Add pages to each size pool so that compaction is guaranteed to move every object */
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
rb_size_pool_t *size_pool = &size_pools[i];
rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
- size_t minimum_pages = 0;
- if (RTEST(expand_heap)) {
- minimum_pages = minimum_pages_for_size_pool(objspace, size_pool);
- }
-
- heap_add_pages(objspace, size_pool, heap, MAX(minimum_pages, heap->total_pages));
+ size_t pages_to_add = 0;
+ /*
+ * Step 1: Make sure every pool has the same number of pages, by adding empty pages
+ * to smaller pools. This is required to make sure the compact cursor can advance
+ * through all of the pools in `gc_sweep_compact` without hitting the "sweep &
+ * compact cursors met" condition on some pools before fully compacting others
+ */
+ pages_to_add += max_existing_pages - heap->total_pages;
+ /*
+ * Step 2: Now add additional free pages to each size pool sufficient to hold all objects
+ * that want to be in that size pool, whether moved into it or moved within it
+ */
+ pages_to_add += slots_to_pages_for_size_pool(objspace, size_pool, desired_compaction.required_slots[i]);
+ /*
+ * Step 3: Add two more pages so that the compact & sweep cursors will meet _after_ all objects
+ * have been moved, and not on the last iteration of the `gc_sweep_compact` loop
+ */
+ pages_to_add += 2;
+
+ heap_add_pages(objspace, size_pool, heap, pages_to_add);
+ }
+ }
+ else if (RTEST(double_heap)) {
+ for (int i = 0; i < SIZE_POOL_COUNT; i++) {
+ rb_size_pool_t *size_pool = &size_pools[i];
+ rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
+ heap_add_pages(objspace, size_pool, heap, heap->total_pages);
}
+
}
if (RTEST(toward_empty)) {