summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAaron Patterson <tenderlove@ruby-lang.org>2020-11-25 09:24:50 -0800
committerAaron Patterson <tenderlove@ruby-lang.org>2020-11-25 11:29:14 -0800
commitc32218de1ba094223420a4ea017707f48d0009c5 (patch)
treeacee8f29e3d3fdd53f1c0d386eed8203cc532ff9
parent7aaf6676c416e773b0b3053de2e250a73c2b2e77 (diff)
Disable auto compaction on platforms that can't support it
Both explicit compaction routines (gc_compact and the verify references form) need to clear the heap before executing compaction. Otherwise some objects may not be alive, and we'll need the read barrier. The heap must only contain *live* objects if we want to disable the read barrier during explicit compaction. The previous commit was missing the "clear the heap" phase from the "verify references" explicit compaction function. Fixes [Bug #17306]
-rw-r--r--gc.c69
-rw-r--r--gc.rb3
-rw-r--r--test/ruby/test_gc_compact.rb112
3 files changed, 134 insertions, 50 deletions
diff --git a/gc.c b/gc.c
index 5573ee5dbe..195a753823 100644
--- a/gc.c
+++ b/gc.c
@@ -682,7 +682,7 @@ typedef struct rb_objspace {
unsigned int dont_gc : 1;
unsigned int dont_incremental : 1;
unsigned int during_gc : 1;
- unsigned int during_compacting : 1;
+ unsigned int during_compacting : 2;
unsigned int gc_stressful: 1;
unsigned int has_hook: 1;
unsigned int during_minor_gc : 1;
@@ -3090,6 +3090,17 @@ Init_heap(void)
{
rb_objspace_t *objspace = &rb_objspace;
+#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
+ /* If Ruby's heap pages are not a multiple of the system page size, we
+ * cannot use mprotect for the read barrier, so we must disable automatic
+ * compaction. */
+ int pagesize;
+ pagesize = (int)sysconf(_SC_PAGE_SIZE);
+ if ((HEAP_PAGE_SIZE % pagesize) != 0) {
+ ruby_enable_autocompact = 0;
+ }
+#endif
+
objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
objspace->obj_to_id_tbl = st_init_numtable();
@@ -4389,6 +4400,11 @@ static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free);
static void
lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
{
+ /* If this is an explicit compaction (GC.compact), we don't need a read
+ * barrier, so just return early. */
+ if (objspace->flags.during_compacting >> 1) {
+ return;
+ }
#if defined(_WIN32)
DWORD old_protect;
@@ -4405,6 +4421,11 @@ lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
static void
unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
{
+ /* If this is an explicit compaction (GC.compact), we don't need a read
+ * barrier, so just return early. */
+ if (objspace->flags.during_compacting >> 1) {
+ return;
+ }
#if defined(_WIN32)
DWORD old_protect;
@@ -4624,8 +4645,12 @@ gc_compact_finish(rb_objspace_t *objspace, rb_heap_t *heap)
{
GC_ASSERT(heap->sweeping_page == heap->compact_cursor);
- gc_unprotect_pages(objspace, heap);
- uninstall_handlers();
+ /* If this is an explicit compaction (GC.compact), no read barrier was set
+ * so we don't need to unprotect pages or uninstall the SEGV handler */
+ if (!(objspace->flags.during_compacting >> 1)) {
+ gc_unprotect_pages(objspace, heap);
+ uninstall_handlers();
+ }
/* The mutator is allowed to run during incremental sweeping. T_MOVED
* objects can get pushed on the stack and when the compaction process
@@ -5134,6 +5159,12 @@ gc_compact_start(rb_objspace_t *objspace, rb_heap_t *heap)
memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
+ /* If this is an explicit compaction (GC.compact), we don't need a read
+ * barrier, so just return early. */
+ if (objspace->flags.during_compacting >> 1) {
+ return;
+ }
+
/* Set up read barrier for pages containing MOVED objects */
install_handlers();
}
@@ -7030,7 +7061,7 @@ gc_marks_start(rb_objspace_t *objspace, int full_mark)
#endif
objspace->flags.during_minor_gc = FALSE;
if (ruby_enable_autocompact) {
- objspace->flags.during_compacting = TRUE;
+ objspace->flags.during_compacting |= TRUE;
}
objspace->profile.major_gc_count++;
objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
@@ -8057,7 +8088,9 @@ gc_start(rb_objspace_t *objspace, int reason)
/* reason may be clobbered, later, so keep set immediate_sweep here */
objspace->flags.immediate_sweep = !!((unsigned)reason & GPR_FLAG_IMMEDIATE_SWEEP);
- objspace->flags.during_compacting = !!((unsigned)reason & GPR_FLAG_COMPACT);
+
+ /* Explicitly enable compaction (GC.compact) */
+ objspace->flags.during_compacting = (!!((unsigned)reason & GPR_FLAG_COMPACT) << 1);
if (!heap_allocated_pages) return FALSE; /* heap is not ready */
if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
@@ -9248,10 +9281,26 @@ heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
}
static VALUE
+gc_compact(rb_execution_context_t *ec, VALUE self)
+{
+ /* Clear the heap. */
+ gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse);
+
+ /* At this point, all references are live and the mutator is not allowed
+ * to run, so we don't need a read barrier. */
+ gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qtrue);
+
+ return gc_compact_stats(ec, self);
+}
+
+static VALUE
gc_verify_compaction_references(rb_execution_context_t *ec, VALUE self, VALUE double_heap, VALUE toward_empty)
{
rb_objspace_t *objspace = &rb_objspace;
+ /* Clear the heap. */
+ gc_start_internal(ec, self, Qtrue, Qtrue, Qtrue, Qfalse);
+
RB_VM_LOCK_ENTER();
{
gc_rest(objspace);
@@ -9865,6 +9914,16 @@ gc_disable(rb_execution_context_t *ec, VALUE _)
static VALUE
gc_set_auto_compact(rb_execution_context_t *ec, VALUE _, VALUE v)
{
+#if defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
+ /* If Ruby's heap pages are not a multiple of the system page size, we
+ * cannot use mprotect for the read barrier, so we must disable automatic
+ * compaction. */
+ int pagesize;
+ pagesize = (int)sysconf(_SC_PAGE_SIZE);
+ if ((HEAP_PAGE_SIZE % pagesize) != 0) {
+ rb_raise(rb_eNotImpError, "Automatic compaction isn't available on this platform");
+ }
+#endif
ruby_enable_autocompact = RTEST(v);
return v;
}
diff --git a/gc.rb b/gc.rb
index d2b0d8e86e..4e0faaf00d 100644
--- a/gc.rb
+++ b/gc.rb
@@ -199,8 +199,7 @@ module GC
end
def self.compact
- Primitive.gc_start_internal true, true, true, true
- Primitive.gc_compact_stats
+ Primitive.gc_compact
end
# call-seq:
diff --git a/test/ruby/test_gc_compact.rb b/test/ruby/test_gc_compact.rb
index 3aad9e6d5f..4a8cff33f4 100644
--- a/test/ruby/test_gc_compact.rb
+++ b/test/ruby/test_gc_compact.rb
@@ -1,56 +1,82 @@
# frozen_string_literal: true
require 'test/unit'
require 'fiddle'
+require 'etc'
class TestGCCompact < Test::Unit::TestCase
- def test_enable_autocompact
- before = GC.auto_compact
- GC.auto_compact = true
- assert GC.auto_compact
- ensure
- GC.auto_compact = before
- end
+ class AutoCompact < Test::Unit::TestCase
+ def setup
+ skip "autocompact not supported on this platform" unless supports_auto_compact?
+ super
+ end
- def test_disable_autocompact
- before = GC.auto_compact
- GC.auto_compact = false
- refute GC.auto_compact
- ensure
- GC.auto_compact = before
- end
+ def test_enable_autocompact
+ before = GC.auto_compact
+ GC.auto_compact = true
+ assert GC.auto_compact
+ ensure
+ GC.auto_compact = before
+ end
- def test_major_compacts
- before = GC.auto_compact
- GC.auto_compact = true
- compact = GC.stat :compact_count
- GC.start
- assert_operator GC.stat(:compact_count), :>, compact
- ensure
- GC.auto_compact = before
- end
+ def test_disable_autocompact
+ before = GC.auto_compact
+ GC.auto_compact = false
+ refute GC.auto_compact
+ ensure
+ GC.auto_compact = before
+ end
- def test_implicit_compaction_does_something
- before = GC.auto_compact
- list = []
- list2 = []
+ def test_major_compacts
+ before = GC.auto_compact
+ GC.auto_compact = true
+ compact = GC.stat :compact_count
+ GC.start
+ assert_operator GC.stat(:compact_count), :>, compact
+ ensure
+ GC.auto_compact = before
+ end
- # Try to make some fragmentation
- 500.times {
- list << Object.new
- Object.new
- Object.new
- }
- count = GC.stat :compact_count
- GC.auto_compact = true
- loop do
- break if count < GC.stat(:compact_count)
- list2 << Object.new
+ def test_implicit_compaction_does_something
+ before = GC.auto_compact
+ list = []
+ list2 = []
+
+ # Try to make some fragmentation
+ 500.times {
+ list << Object.new
+ Object.new
+ Object.new
+ }
+ count = GC.stat :compact_count
+ GC.auto_compact = true
+ loop do
+ break if count < GC.stat(:compact_count)
+ list2 << Object.new
+ end
+ compact_stats = GC.latest_compact_info
+ refute_predicate compact_stats[:considered], :empty?
+ refute_predicate compact_stats[:moved], :empty?
+ ensure
+ GC.auto_compact = before
end
- compact_stats = GC.latest_compact_info
- refute_predicate compact_stats[:considered], :empty?
- refute_predicate compact_stats[:moved], :empty?
- ensure
- GC.auto_compact = before
+
+ private
+
+ def supports_auto_compact?
+ return true unless defined?(Etc::SC_PAGE_SIZE)
+
+ begin
+ return GC::INTERNAL_CONSTANTS[:HEAP_PAGE_SIZE] % Etc.sysconf(Etc::SC_PAGE_SIZE) == 0
+ rescue NotImplementedError
+ rescue ArgumentError
+ end
+
+ true
+ end
+ end
+
+ def os_page_size
+ return true unless defined?(Etc::SC_PAGE_SIZE)
end
def test_gc_compact_stats