summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c249
1 files changed, 126 insertions, 123 deletions
diff --git a/gc.c b/gc.c
index 1f5d0fe365..474454e487 100644
--- a/gc.c
+++ b/gc.c
@@ -21,8 +21,6 @@
#include <signal.h>
-#define sighandler_t ruby_sighandler_t
-
#ifndef _WIN32
#include <unistd.h>
#include <sys/mman.h>
@@ -226,6 +224,9 @@ size_add_overflow(size_t x, size_t y)
bool p;
#if 0
+#elif defined(ckd_add)
+ p = ckd_add(&z, x, y);
+
#elif __has_builtin(__builtin_add_overflow)
p = __builtin_add_overflow(x, y, &z);
@@ -418,7 +419,6 @@ rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
#endif
#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
-#define TICK_TYPE 1
typedef struct {
size_t size_pool_init_slots[SIZE_POOL_COUNT];
@@ -689,29 +689,33 @@ typedef struct RVALUE {
VALUE v3;
} values;
} as;
+} RVALUE;
- /* Start of RVALUE_OVERHEAD.
- * Do not directly read these members from the RVALUE as they're located
- * at the end of the slot (which may differ in size depending on the size
- * pool). */
-#if RACTOR_CHECK_MODE
+/* These members ae located at the end of the slot that the object is in. */
+#if RACTOR_CHECK_MODE || GC_DEBUG
+struct rvalue_overhead {
+# if RACTOR_CHECK_MODE
uint32_t _ractor_belonging_id;
-#endif
-#if GC_DEBUG
+# endif
+# if GC_DEBUG
const char *file;
int line;
-#endif
-} RVALUE;
+# endif
+};
-#if RACTOR_CHECK_MODE
-# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
-#elif GC_DEBUG
-# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
+// Make sure that RVALUE_OVERHEAD aligns to sizeof(VALUE)
+# define RVALUE_OVERHEAD (sizeof(struct { \
+ union { \
+ struct rvalue_overhead overhead; \
+ VALUE value; \
+ }; \
+}))
+# define GET_RVALUE_OVERHEAD(obj) ((struct rvalue_overhead *)((uintptr_t)obj + rb_gc_obj_slot_size(obj)))
#else
# define RVALUE_OVERHEAD 0
#endif
-STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == (SIZEOF_VALUE * 5) + RVALUE_OVERHEAD);
+STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == (SIZEOF_VALUE * 5));
STATIC_ASSERT(alignof_rvalue, RUBY_ALIGNOF(RVALUE) == SIZEOF_VALUE);
typedef uintptr_t bits_t;
@@ -719,7 +723,6 @@ enum {
BITS_SIZE = sizeof(bits_t),
BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
};
-#define popcount_bits rb_popcount_intptr
struct heap_page_header {
struct heap_page *page;
@@ -818,7 +821,7 @@ typedef struct rb_objspace {
} flags;
rb_event_flag_t hook_events;
- VALUE next_object_id;
+ unsigned long long next_object_id;
rb_size_pool_t size_pools[SIZE_POOL_COUNT];
@@ -956,7 +959,7 @@ typedef struct rb_objspace {
#define HEAP_PAGE_ALIGN_LOG 16
#endif
-#define BASE_SLOT_SIZE sizeof(RVALUE)
+#define BASE_SLOT_SIZE (sizeof(RVALUE) + RVALUE_OVERHEAD)
#define CEILDIV(i, mod) roomof(i, mod)
enum {
@@ -1294,6 +1297,7 @@ total_freed_objects(rb_objspace_t *objspace)
#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
#define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
+#define gc_needs_major_flags objspace->rgengc.need_major_gc
#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
@@ -1418,17 +1422,7 @@ static const char *obj_type_name(VALUE obj);
static void gc_finalize_deferred(void *dmy);
-/*
- * 1 - TSC (H/W Time Stamp Counter)
- * 2 - getrusage
- */
-#ifndef TICK_TYPE
-#define TICK_TYPE 1
-#endif
-
#if USE_TICK_T
-
-#if TICK_TYPE == 1
/* the following code is only for internal tuning. */
/* Source code to use RDTSC is quoted and modified from
@@ -1525,28 +1519,6 @@ tick(void)
return clock();
}
#endif /* TSC */
-
-#elif TICK_TYPE == 2
-typedef double tick_t;
-#define PRItick "4.9f"
-
-static inline tick_t
-tick(void)
-{
- return getrusage_time();
-}
-#else /* TICK_TYPE */
-#error "choose tick type"
-#endif /* TICK_TYPE */
-
-#define MEASURE_LINE(expr) do { \
- volatile tick_t start_time = tick(); \
- volatile tick_t end_time; \
- expr; \
- end_time = tick(); \
- fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
-} while (0)
-
#else /* USE_TICK_T */
#define MEASURE_LINE(expr) expr
#endif /* USE_TICK_T */
@@ -1884,44 +1856,45 @@ rb_gc_initial_stress_set(VALUE flag)
initial_stress = flag;
}
-static void * Alloc_GC_impl(void);
+static void *rb_gc_impl_objspace_alloc(void);
#if USE_SHARED_GC
# include "dln.h"
-# define Alloc_GC rb_gc_functions->init
+
+# define RUBY_GC_LIBRARY_PATH "RUBY_GC_LIBRARY_PATH"
void
-ruby_external_gc_init()
+ruby_external_gc_init(void)
{
- rb_gc_function_map_t *map = malloc(sizeof(rb_gc_function_map_t));
- rb_gc_functions = map;
-
- char *gc_so_path = getenv("RUBY_GC_LIBRARY_PATH");
- if (!gc_so_path) {
- map->init = Alloc_GC_impl;
- return;
+ char *gc_so_path = getenv(RUBY_GC_LIBRARY_PATH);
+ void *handle = NULL;
+ if (gc_so_path && dln_supported_p()) {
+ char error[1024];
+ handle = dln_open(gc_so_path, error, sizeof(error));
+ if (!handle) {
+ fprintf(stderr, "%s", error);
+ rb_bug("ruby_external_gc_init: Shared library %s cannot be opened", gc_so_path);
+ }
}
- void *h = dln_open(gc_so_path);
- if (!h) {
- rb_bug(
- "ruby_external_gc_init: Shared library %s cannot be opened.",
- gc_so_path
- );
- }
+# define load_external_gc_func(name) do { \
+ if (handle) { \
+ rb_gc_functions->name = dln_symbol(handle, "rb_gc_impl_" #name); \
+ if (!rb_gc_functions->name) { \
+ rb_bug("ruby_external_gc_init: " #name " func not exported by library %s", gc_so_path); \
+ } \
+ } \
+ else { \
+ rb_gc_functions->name = rb_gc_impl_##name; \
+ } \
+} while (0)
- void *gc_init_func = dln_symbol(h, "Init_GC");
- if (!gc_init_func) {
- rb_bug(
- "ruby_external_gc_init: Init_GC func not exported by library %s",
- gc_so_path
- );
- }
+ load_external_gc_func(objspace_alloc);
- map->init = gc_init_func;
+# undef load_external_gc_func
}
-#else
-# define Alloc_GC Alloc_GC_impl
+
+# define rb_gc_impl_objspace_alloc rb_gc_functions->objspace_alloc
#endif
rb_objspace_t *
@@ -1930,9 +1903,13 @@ rb_objspace_alloc(void)
#if USE_SHARED_GC
ruby_external_gc_init();
#endif
- return (rb_objspace_t *)Alloc_GC();
+ return (rb_objspace_t *)rb_gc_impl_objspace_alloc();
}
+#if USE_SHARED_GC
+# undef rb_gc_impl_objspace_alloc
+#endif
+
static void free_stack_chunks(mark_stack_t *);
static void mark_stack_free_cache(mark_stack_t *);
static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
@@ -2552,7 +2529,7 @@ heap_prepare(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *heap
* sweeping and still don't have a free page, then
* gc_sweep_finish_size_pool should allow us to create a new page. */
if (heap->free_pages == NULL && !heap_increment(objspace, size_pool, heap)) {
- if (objspace->rgengc.need_major_gc == GPR_FLAG_NONE) {
+ if (gc_needs_major_flags == GPR_FLAG_NONE) {
rb_bug("cannot create a new page after GC");
}
else { // Major GC is required, which will allow us to create new page
@@ -2661,7 +2638,7 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace,
#endif
#if GC_DEBUG
- RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
+ GET_RVALUE_OVERHEAD(obj)->file = rb_source_location_cstr(&GET_RVALUE_OVERHEAD(obj)->line);
GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
#endif
@@ -2694,12 +2671,6 @@ size_pool_slot_size(unsigned char pool_id)
return slot_size;
}
-size_t
-rb_size_pool_slot_size(unsigned char pool_id)
-{
- return size_pool_slot_size(pool_id);
-}
-
bool
rb_gc_size_allocatable_p(size_t size)
{
@@ -2713,7 +2684,7 @@ rb_gc_size_pool_sizes(void)
{
if (size_pool_sizes[0] == 0) {
for (unsigned char i = 0; i < SIZE_POOL_COUNT; i++) {
- size_pool_sizes[i] = rb_size_pool_slot_size(i);
+ size_pool_sizes[i] = size_pool_slot_size(i);
}
}
@@ -3509,7 +3480,7 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
}
-#define OBJ_ID_INCREMENT (sizeof(RVALUE))
+#define OBJ_ID_INCREMENT (BASE_SLOT_SIZE)
#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT)
static int
@@ -3539,7 +3510,7 @@ static const struct st_hash_type object_id_hash_type = {
};
static void *
-Alloc_GC_impl(void)
+rb_gc_impl_objspace_alloc(void)
{
rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
ruby_current_vm_ptr->objspace = objspace;
@@ -3575,7 +3546,7 @@ Alloc_GC_impl(void)
heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
#endif
- objspace->next_object_id = INT2FIX(OBJ_ID_INITIAL);
+ objspace->next_object_id = OBJ_ID_INITIAL;
objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
objspace->obj_to_id_tbl = st_init_numtable();
@@ -4553,7 +4524,7 @@ id2ref(VALUE objid)
}
}
- if (rb_int_ge(objid, objspace->next_object_id)) {
+ if (rb_int_ge(objid, ULL2NUM(objspace->next_object_id))) {
rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
}
else {
@@ -4595,8 +4566,8 @@ cached_object_id(VALUE obj)
else {
GC_ASSERT(!FL_TEST(obj, FL_SEEN_OBJ_ID));
- id = objspace->next_object_id;
- objspace->next_object_id = rb_int_plus(id, INT2FIX(OBJ_ID_INCREMENT));
+ id = ULL2NUM(objspace->next_object_id);
+ objspace->next_object_id += OBJ_ID_INCREMENT;
VALUE already_disabled = rb_gc_disable_no_rest();
st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
@@ -5634,7 +5605,7 @@ gc_sweep_finish_size_pool(rb_objspace_t *objspace, rb_size_pool_t *size_pool)
grow_heap = TRUE;
}
else if (is_growth_heap) { /* Only growth heaps are allowed to start a major GC. */
- objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
+ gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
size_pool->force_major_gc_count++;
}
}
@@ -7228,7 +7199,6 @@ gc_mark_roots(rb_objspace_t *objspace, const char **categoryp)
rb_gc_mark_global_tbl();
MARK_CHECKPOINT("object_id");
- rb_gc_mark(objspace->next_object_id);
mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl); /* Only mark ids */
if (stress_to_class) rb_gc_mark(stress_to_class);
@@ -8074,7 +8044,7 @@ gc_marks_finish(rb_objspace_t *objspace)
}
else {
gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
- objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
+ gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
}
}
}
@@ -8090,20 +8060,20 @@ gc_marks_finish(rb_objspace_t *objspace)
}
if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
- objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
+ gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_SHADY;
}
if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
- objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
+ gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDGEN;
}
if (RGENGC_FORCE_MAJOR_GC) {
- objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
+ gc_needs_major_flags = GPR_FLAG_MAJOR_BY_FORCE;
}
gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
"old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
"sweep %"PRIdSIZE" slots, increment: %"PRIdSIZE", next GC: %s)\n",
objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
- objspace->rgengc.need_major_gc ? "major" : "minor");
+ gc_needs_major_flags ? "major" : "minor");
}
rb_ractor_finish_marking();
@@ -8228,7 +8198,7 @@ gc_compact_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *
do {
VALUE vp = (VALUE)p;
- GC_ASSERT(vp % sizeof(RVALUE) == 0);
+ GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
if (bitset & 1) {
objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
@@ -8945,7 +8915,7 @@ gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
#if RGENGC_ESTIMATE_OLDMALLOC
if (!full_mark) {
if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
- objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
+ gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
objspace->rgengc.oldmalloc_increase_limit =
(size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
@@ -8956,7 +8926,7 @@ gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
rb_gc_count(),
- objspace->rgengc.need_major_gc,
+ gc_needs_major_flags,
objspace->rgengc.oldmalloc_increase,
objspace->rgengc.oldmalloc_increase_limit,
gc_params.oldmalloc_limit_max);
@@ -9032,8 +9002,8 @@ gc_start(rb_objspace_t *objspace, unsigned int reason)
objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
}
- if (objspace->rgengc.need_major_gc) {
- reason |= objspace->rgengc.need_major_gc;
+ if (gc_needs_major_flags) {
+ reason |= gc_needs_major_flags;
do_full_mark = TRUE;
}
else if (RGENGC_FORCE_MAJOR_GC) {
@@ -9041,7 +9011,7 @@ gc_start(rb_objspace_t *objspace, unsigned int reason)
do_full_mark = TRUE;
}
- objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
+ gc_needs_major_flags = GPR_FLAG_NONE;
if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
@@ -9299,7 +9269,7 @@ gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_
gc_enter_count(event);
if (UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
- if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
+ if (RGENGC_CHECK_MODE >= 3 && (dont_gc_val() == 0)) gc_verify_internal_consistency(objspace);
during_gc = TRUE;
RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
@@ -9511,7 +9481,6 @@ gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
switch (BUILTIN_TYPE(obj)) {
case T_NONE:
- case T_NIL:
case T_MOVED:
case T_ZOMBIE:
return FALSE;
@@ -9987,9 +9956,6 @@ update_cc_tbl_i(VALUE ccs_ptr, void *data)
}
for (int i=0; i<ccs->len; i++) {
- if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
- ccs->entries[i].ci = (struct rb_callinfo *)rb_gc_location((VALUE)ccs->entries[i].ci);
- }
if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
}
@@ -10770,7 +10736,7 @@ gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned
SET(major_by, major_by);
if (orig_flags == 0) { /* set need_major_by only if flags not set explicitly */
- unsigned int need_major_flags = objspace->rgengc.need_major_gc;
+ unsigned int need_major_flags = gc_needs_major_flags;
need_major_by =
(need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
(need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
@@ -11899,7 +11865,7 @@ static inline void *
objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
{
size = objspace_malloc_size(objspace, mem, size);
- objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
+ objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC) {}
#if CALC_EXACT_MALLOC_SIZE
{
@@ -12373,6 +12339,38 @@ ruby_mimmalloc(size_t size)
return mem;
}
+void *
+ruby_mimcalloc(size_t num, size_t size)
+{
+ void *mem;
+#if CALC_EXACT_MALLOC_SIZE
+ struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
+ if (UNLIKELY(t.left)) {
+ return NULL;
+ }
+ size = t.right + sizeof(struct malloc_obj_info);
+ mem = calloc1(size);
+ if (!mem) {
+ return NULL;
+ }
+ else
+ /* set 0 for consistency of allocated_size/allocations */
+ {
+ struct malloc_obj_info *info = mem;
+ info->size = 0;
+#if USE_GC_MALLOC_OBJ_INFO_DETAILS
+ info->gen = 0;
+ info->file = NULL;
+ info->line = 0;
+#endif
+ mem = info + 1;
+ }
+#else
+ mem = calloc(num, size);
+#endif
+ return mem;
+}
+
void
ruby_mimfree(void *ptr)
{
@@ -13186,7 +13184,7 @@ rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj
}
#if GC_DEBUG
- APPEND_F("@%s:%d", RANY(obj)->file, RANY(obj)->line);
+ APPEND_F("@%s:%d", GET_RVALUE_OVERHEAD(obj)->file, GET_RVALUE_OVERHEAD(obj)->line);
#endif
}
end:
@@ -13495,7 +13493,7 @@ rb_gcdebug_print_obj_condition(VALUE obj)
{
rb_objspace_t *objspace = &rb_objspace;
- fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
+ fprintf(stderr, "created at: %s:%d\n", GET_RVALUE_OVERHEAD(obj)->file, GET_RVALUE_OVERHEAD(obj)->line);
if (BUILTIN_TYPE(obj) == T_MOVED) {
fprintf(stderr, "moved?: true\n");
@@ -13520,7 +13518,7 @@ rb_gcdebug_print_obj_condition(VALUE obj)
if (is_lazy_sweeping(objspace)) {
fprintf(stderr, "lazy sweeping?: true\n");
- fprintf(stderr, "page swept?: %s\n", GET_HEAP_PAGE(ptr)->flags.before_sweep ? "false" : "true");
+ fprintf(stderr, "page swept?: %s\n", GET_HEAP_PAGE(obj)->flags.before_sweep ? "false" : "true");
}
else {
fprintf(stderr, "lazy sweeping?: false\n");
@@ -13596,10 +13594,9 @@ rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
* traverse all living objects with an iterator.
*
* ObjectSpace also provides support for object finalizers, procs that will be
- * called when a specific object is about to be destroyed by garbage
- * collection. See the documentation for
- * <code>ObjectSpace.define_finalizer</code> for important information on
- * how to use this method correctly.
+ * called after a specific object was destroyed by garbage collection. See
+ * the documentation for +ObjectSpace.define_finalizer+ for important
+ * information on how to use this method correctly.
*
* a = "A"
* b = "B"
@@ -13639,6 +13636,12 @@ rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
void
Init_GC(void)
{
+#if USE_SHARED_GC
+ if (getenv(RUBY_GC_LIBRARY_PATH) != NULL && !dln_supported_p()) {
+ rb_warn(RUBY_GC_LIBRARY_PATH " is ignored because this executable file can't load extension libraries");
+ }
+#endif
+
#undef rb_intern
malloc_offset = gc_compute_malloc_offset();
@@ -13652,7 +13655,7 @@ Init_GC(void)
rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
- rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
+ rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(BASE_SLOT_SIZE));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));