diff options
| author | John Hawthorn <john@hawthorn.email> | 2025-07-23 12:12:58 -0700 |
|---|---|---|
| committer | John Hawthorn <john@hawthorn.email> | 2025-07-23 14:33:55 -0700 |
| commit | 9256442615db227ab8ccd18b0ca65da980de7eaf (patch) | |
| tree | 841588299e24c6a6cb5604159f29e98a4870de26 /internal | |
| parent | d67eb07f7549508da09e6f3aa2dbe55ad0ba2da1 (diff) | |
Cleanup M_TBL workarounds and comments
Previously we had an assertion that the method table was only set on
young objects, and a comment stating that was how it needed to be used.
I think that confused the complexity of the write barriers that may be
needed here.
* Setting an empty M_TBL never needs a write barrier
* T_CLASS and T_MODULE should always fire a write barrier to newly added
methods
* T_ICLASS only needs a write barrier to methods when
RCLASSEXT_ICLASS_IS_ORIGIN(x) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(x)
We shouldn't assume that the object being young is sufficient, because
we also need write barriers for incremental marking and it's unreliable.
Diffstat (limited to 'internal')
| -rw-r--r-- | internal/class.h | 17 |
1 files changed, 2 insertions, 15 deletions
diff --git a/internal/class.h b/internal/class.h index f8cfba3fd9..520994170f 100644 --- a/internal/class.h +++ b/internal/class.h @@ -259,9 +259,6 @@ static inline void RCLASSEXT_SET_INCLUDER(rb_classext_t *ext, VALUE klass, VALUE static inline void RCLASS_SET_SUPER(VALUE klass, VALUE super); static inline void RCLASS_WRITE_SUPER(VALUE klass, VALUE super); -// TODO: rename RCLASS_SET_M_TBL_WORKAROUND (and _WRITE_) to RCLASS_SET_M_TBL with write barrier -static inline void RCLASS_SET_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted); -static inline void RCLASS_WRITE_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted); static inline void RCLASS_SET_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared); static inline void RCLASS_WRITE_CONST_TBL(VALUE klass, struct rb_id_table *table, bool shared); static inline void RCLASS_WRITE_CALLABLE_M_TBL(VALUE klass, struct rb_id_table *table); @@ -594,25 +591,15 @@ RCLASS_FIELDS_COUNT(VALUE obj) return 0; } -#define RCLASS_SET_M_TBL_EVEN_WHEN_PROMOTED(klass, table) RCLASS_SET_M_TBL_WORKAROUND(klass, table, false) -#define RCLASS_SET_M_TBL(klass, table) RCLASS_SET_M_TBL_WORKAROUND(klass, table, true) - static inline void -RCLASS_SET_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted) +RCLASS_SET_M_TBL(VALUE klass, struct rb_id_table *table) { - RUBY_ASSERT(!check_promoted || !RB_OBJ_PROMOTED(klass)); RCLASSEXT_M_TBL(RCLASS_EXT_PRIME(klass)) = table; } -#define RCLASS_WRITE_M_TBL_EVEN_WHEN_PROMOTED(klass, table) RCLASS_WRITE_M_TBL_WORKAROUND(klass, table, false) -#define RCLASS_WRITE_M_TBL(klass, table) RCLASS_WRITE_M_TBL_WORKAROUND(klass, table, true) - static inline void -RCLASS_WRITE_M_TBL_WORKAROUND(VALUE klass, struct rb_id_table *table, bool check_promoted) +RCLASS_WRITE_M_TBL(VALUE klass, struct rb_id_table *table) { - RUBY_ASSERT(!check_promoted || !RB_OBJ_PROMOTED(klass)); - // TODO: add write barrier here to guard assigning m_tbl - // see commit 28a6e4ea9d9379a654a8f7c4b37fa33aa3ccd0b7 RCLASSEXT_M_TBL(RCLASS_EXT_WRITABLE(klass)) = table; } |
