summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorPeter Zhu <peter@peterzhu.ca>2023-08-03 10:35:44 -0400
committerPeter Zhu <peter@peterzhu.ca>2023-08-04 09:13:57 -0400
commit4b45b2764b293af45c159d3772692b52752e662a (patch)
tree88fe1bceb0a9b916342a4832e4d11d1ff46e02d7 /gc.c
parent6b570ff2c0f3383f22a456eee6049715c9276383 (diff)
Don't check stack for moved after compaction
We don't need to check stack for moved objects after compaction because the mutator cannot run between marking the stack and the end of compaction. However, the stack may have moved objects leftover from marking and sweeping phases. This means that their pages will be invalidated and all objects moved back. We don't need to move these objects back. This also fixes the issue on Windows where some compaction tests sometimes fail due to the page of the object being invalidated.
Notes
Notes: Merged: https://github.com/ruby/ruby/pull/8166
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c44
1 files changed, 0 insertions, 44 deletions
diff --git a/gc.c b/gc.c
index 0b5f792839..55b6a7b5d4 100644
--- a/gc.c
+++ b/gc.c
@@ -5376,43 +5376,6 @@ install_handlers(void)
#endif
static void
-revert_stack_objects(VALUE stack_obj, void *ctx)
-{
- rb_objspace_t * objspace = (rb_objspace_t*)ctx;
-
- if (BUILTIN_TYPE(stack_obj) == T_MOVED) {
- /* For now we'll revert the whole page if the object made it to the
- * stack. I think we can change this to move just the one object
- * back though */
- invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
- }
-}
-
-static void
-revert_machine_stack_references(rb_objspace_t *objspace, VALUE v)
-{
- if (is_pointer_to_heap(objspace, (void *)v)) {
- if (BUILTIN_TYPE(v) == T_MOVED) {
- /* For now we'll revert the whole page if the object made it to the
- * stack. I think we can change this to move just the one object
- * back though */
- invalidate_moved_page(objspace, GET_HEAP_PAGE(v));
- }
- }
-}
-
-static void each_machine_stack_value(const rb_execution_context_t *ec, void (*cb)(rb_objspace_t *, VALUE));
-
-static void
-check_stack_for_moved(rb_objspace_t *objspace)
-{
- rb_execution_context_t *ec = GET_EC();
- rb_vm_t *vm = rb_ec_vm_ptr(ec);
- rb_vm_each_stack_value(vm, revert_stack_objects, (void*)objspace);
- each_machine_stack_value(ec, revert_machine_stack_references);
-}
-
-static void
gc_compact_finish(rb_objspace_t *objspace)
{
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
@@ -5423,13 +5386,6 @@ gc_compact_finish(rb_objspace_t *objspace)
uninstall_handlers();
- /* The mutator is allowed to run during incremental sweeping. T_MOVED
- * objects can get pushed on the stack and when the compaction process
- * finishes up, it may remove the read barrier before anything has a
- * chance to read from the T_MOVED address. To fix this, we scan the stack
- * then revert any moved objects that made it to the stack. */
- check_stack_for_moved(objspace);
-
gc_update_references(objspace);
objspace->profile.compact_count++;