summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c365
1 files changed, 191 insertions, 174 deletions
diff --git a/gc.c b/gc.c
index 279841fd7f..d1e0fcd042 100644
--- a/gc.c
+++ b/gc.c
@@ -224,6 +224,9 @@ size_add_overflow(size_t x, size_t y)
bool p;
#if 0
+#elif defined(ckd_add)
+ p = ckd_add(&z, x, y);
+
#elif __has_builtin(__builtin_add_overflow)
p = __builtin_add_overflow(x, y, &z);
@@ -416,7 +419,6 @@ rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
#endif
#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
-#define TICK_TYPE 1
typedef struct {
size_t size_pool_init_slots[SIZE_POOL_COUNT];
@@ -687,29 +689,33 @@ typedef struct RVALUE {
VALUE v3;
} values;
} as;
+} RVALUE;
- /* Start of RVALUE_OVERHEAD.
- * Do not directly read these members from the RVALUE as they're located
- * at the end of the slot (which may differ in size depending on the size
- * pool). */
-#if RACTOR_CHECK_MODE
+/* These members ae located at the end of the slot that the object is in. */
+#if RACTOR_CHECK_MODE || GC_DEBUG
+struct rvalue_overhead {
+# if RACTOR_CHECK_MODE
uint32_t _ractor_belonging_id;
-#endif
-#if GC_DEBUG
+# endif
+# if GC_DEBUG
const char *file;
int line;
-#endif
-} RVALUE;
+# endif
+};
-#if RACTOR_CHECK_MODE
-# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, _ractor_belonging_id))
-#elif GC_DEBUG
-# define RVALUE_OVERHEAD (sizeof(RVALUE) - offsetof(RVALUE, file))
+// Make sure that RVALUE_OVERHEAD aligns to sizeof(VALUE)
+# define RVALUE_OVERHEAD (sizeof(struct { \
+ union { \
+ struct rvalue_overhead overhead; \
+ VALUE value; \
+ }; \
+}))
+# define GET_RVALUE_OVERHEAD(obj) ((struct rvalue_overhead *)((uintptr_t)obj + rb_gc_obj_slot_size(obj)))
#else
# define RVALUE_OVERHEAD 0
#endif
-STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == (SIZEOF_VALUE * 5) + RVALUE_OVERHEAD);
+STATIC_ASSERT(sizeof_rvalue, sizeof(RVALUE) == (SIZEOF_VALUE * 5));
STATIC_ASSERT(alignof_rvalue, RUBY_ALIGNOF(RVALUE) == SIZEOF_VALUE);
typedef uintptr_t bits_t;
@@ -717,7 +723,6 @@ enum {
BITS_SIZE = sizeof(bits_t),
BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
};
-#define popcount_bits rb_popcount_intptr
struct heap_page_header {
struct heap_page *page;
@@ -954,7 +959,7 @@ typedef struct rb_objspace {
#define HEAP_PAGE_ALIGN_LOG 16
#endif
-#define BASE_SLOT_SIZE sizeof(RVALUE)
+#define BASE_SLOT_SIZE (sizeof(RVALUE) + RVALUE_OVERHEAD)
#define CEILDIV(i, mod) roomof(i, mod)
enum {
@@ -1417,17 +1422,7 @@ static const char *obj_type_name(VALUE obj);
static void gc_finalize_deferred(void *dmy);
-/*
- * 1 - TSC (H/W Time Stamp Counter)
- * 2 - getrusage
- */
-#ifndef TICK_TYPE
-#define TICK_TYPE 1
-#endif
-
#if USE_TICK_T
-
-#if TICK_TYPE == 1
/* the following code is only for internal tuning. */
/* Source code to use RDTSC is quoted and modified from
@@ -1524,28 +1519,6 @@ tick(void)
return clock();
}
#endif /* TSC */
-
-#elif TICK_TYPE == 2
-typedef double tick_t;
-#define PRItick "4.9f"
-
-static inline tick_t
-tick(void)
-{
- return getrusage_time();
-}
-#else /* TICK_TYPE */
-#error "choose tick type"
-#endif /* TICK_TYPE */
-
-#define MEASURE_LINE(expr) do { \
- volatile tick_t start_time = tick(); \
- volatile tick_t end_time; \
- expr; \
- end_time = tick(); \
- fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
-} while (0)
-
#else /* USE_TICK_T */
#define MEASURE_LINE(expr) expr
#endif /* USE_TICK_T */
@@ -1563,14 +1536,57 @@ tick(void)
#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
-#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
-#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
-#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
+static inline VALUE check_rvalue_consistency(const VALUE obj);
+#define RVALUE_MARKED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
-#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
+#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
+#define RVALUE_PINNED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
+
+static inline int
+RVALUE_MARKED(VALUE obj)
+{
+ check_rvalue_consistency(obj);
+ return RVALUE_MARKED_BITMAP(obj) != 0;
+}
+
+static inline int
+RVALUE_PINNED(VALUE obj)
+{
+ check_rvalue_consistency(obj);
+ return RVALUE_PINNED_BITMAP(obj) != 0;
+}
+
+static inline int
+RVALUE_WB_UNPROTECTED(VALUE obj)
+{
+ check_rvalue_consistency(obj);
+ return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
+}
+static inline int
+RVALUE_MARKING(VALUE obj)
+{
+ check_rvalue_consistency(obj);
+ return RVALUE_MARKING_BITMAP(obj) != 0;
+}
+
+static inline int
+RVALUE_REMEMBERED(VALUE obj)
+{
+ check_rvalue_consistency(obj);
+ return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
+}
+
+static inline int
+RVALUE_UNCOLLECTIBLE(VALUE obj)
+{
+ check_rvalue_consistency(obj);
+ return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
+}
+
+#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
@@ -1615,7 +1631,7 @@ check_rvalue_consistency_force(const VALUE obj, int terminate)
else {
const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
- const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
+ const int mark_bit = RVALUE_MARKED_BITMAP(obj) != 0;
const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
const int age = RVALUE_AGE_GET((VALUE)obj);
@@ -1720,48 +1736,6 @@ gc_object_moved_p(rb_objspace_t * objspace, VALUE obj)
}
static inline int
-RVALUE_MARKED(VALUE obj)
-{
- check_rvalue_consistency(obj);
- return RVALUE_MARK_BITMAP(obj) != 0;
-}
-
-static inline int
-RVALUE_PINNED(VALUE obj)
-{
- check_rvalue_consistency(obj);
- return RVALUE_PIN_BITMAP(obj) != 0;
-}
-
-static inline int
-RVALUE_WB_UNPROTECTED(VALUE obj)
-{
- check_rvalue_consistency(obj);
- return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
-}
-
-static inline int
-RVALUE_MARKING(VALUE obj)
-{
- check_rvalue_consistency(obj);
- return RVALUE_MARKING_BITMAP(obj) != 0;
-}
-
-static inline int
-RVALUE_REMEMBERED(VALUE obj)
-{
- check_rvalue_consistency(obj);
- return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
-}
-
-static inline int
-RVALUE_UNCOLLECTIBLE(VALUE obj)
-{
- check_rvalue_consistency(obj);
- return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
-}
-
-static inline int
RVALUE_OLD_P(VALUE obj)
{
GC_ASSERT(!RB_SPECIAL_CONST_P(obj));
@@ -1883,44 +1857,45 @@ rb_gc_initial_stress_set(VALUE flag)
initial_stress = flag;
}
-static void * Alloc_GC_impl(void);
+static void *rb_gc_impl_objspace_alloc(void);
#if USE_SHARED_GC
# include "dln.h"
-# define Alloc_GC rb_gc_functions->init
+
+# define RUBY_GC_LIBRARY_PATH "RUBY_GC_LIBRARY_PATH"
void
-ruby_external_gc_init()
+ruby_external_gc_init(void)
{
- rb_gc_function_map_t *map = malloc(sizeof(rb_gc_function_map_t));
- rb_gc_functions = map;
-
- char *gc_so_path = getenv("RUBY_GC_LIBRARY_PATH");
- if (!gc_so_path) {
- map->init = Alloc_GC_impl;
- return;
+ char *gc_so_path = getenv(RUBY_GC_LIBRARY_PATH);
+ void *handle = NULL;
+ if (gc_so_path && dln_supported_p()) {
+ char error[1024];
+ handle = dln_open(gc_so_path, error, sizeof(error));
+ if (!handle) {
+ fprintf(stderr, "%s", error);
+ rb_bug("ruby_external_gc_init: Shared library %s cannot be opened", gc_so_path);
+ }
}
- void *h = dln_open(gc_so_path);
- if (!h) {
- rb_bug(
- "ruby_external_gc_init: Shared library %s cannot be opened.",
- gc_so_path
- );
- }
+# define load_external_gc_func(name) do { \
+ if (handle) { \
+ rb_gc_functions->name = dln_symbol(handle, "rb_gc_impl_" #name); \
+ if (!rb_gc_functions->name) { \
+ rb_bug("ruby_external_gc_init: " #name " func not exported by library %s", gc_so_path); \
+ } \
+ } \
+ else { \
+ rb_gc_functions->name = rb_gc_impl_##name; \
+ } \
+} while (0)
- void *gc_init_func = dln_symbol(h, "Init_GC");
- if (!gc_init_func) {
- rb_bug(
- "ruby_external_gc_init: Init_GC func not exported by library %s",
- gc_so_path
- );
- }
+ load_external_gc_func(objspace_alloc);
- map->init = gc_init_func;
+# undef load_external_gc_func
}
-#else
-# define Alloc_GC Alloc_GC_impl
+
+# define rb_gc_impl_objspace_alloc rb_gc_functions->objspace_alloc
#endif
rb_objspace_t *
@@ -1929,9 +1904,13 @@ rb_objspace_alloc(void)
#if USE_SHARED_GC
ruby_external_gc_init();
#endif
- return (rb_objspace_t *)Alloc_GC();
+ return (rb_objspace_t *)rb_gc_impl_objspace_alloc();
}
+#if USE_SHARED_GC
+# undef rb_gc_impl_objspace_alloc
+#endif
+
static void free_stack_chunks(mark_stack_t *);
static void mark_stack_free_cache(mark_stack_t *);
static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
@@ -2660,7 +2639,7 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace,
#endif
#if GC_DEBUG
- RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
+ GET_RVALUE_OVERHEAD(obj)->file = rb_source_location_cstr(&GET_RVALUE_OVERHEAD(obj)->line);
GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
#endif
@@ -2862,14 +2841,16 @@ newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t si
// Retry allocation after moving to new page
obj = ractor_cache_allocate_slot(objspace, cache, size_pool_idx);
-
- GC_ASSERT(obj != Qfalse);
}
}
if (unlock_vm) {
RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev);
}
+
+ if (UNLIKELY(obj == Qfalse)) {
+ rb_memerror();
+ }
}
size_pool->total_allocated_objects++;
@@ -3502,7 +3483,7 @@ obj_free(rb_objspace_t *objspace, VALUE obj)
}
-#define OBJ_ID_INCREMENT (sizeof(RVALUE))
+#define OBJ_ID_INCREMENT (BASE_SLOT_SIZE)
#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT)
static int
@@ -3532,7 +3513,7 @@ static const struct st_hash_type object_id_hash_type = {
};
static void *
-Alloc_GC_impl(void)
+rb_gc_impl_objspace_alloc(void)
{
rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
ruby_current_vm_ptr->objspace = objspace;
@@ -3547,6 +3528,11 @@ Alloc_GC_impl(void)
rb_bug("Could not preregister postponed job for GC");
}
+ // TODO: debug why on Windows Ruby crashes on boot when GC is on.
+#ifdef _WIN32
+ dont_gc_on();
+#endif
+
for (int i = 0; i < SIZE_POOL_COUNT; i++) {
rb_size_pool_t *size_pool = &size_pools[i];
@@ -3554,15 +3540,14 @@ Alloc_GC_impl(void)
ccan_list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
ccan_list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
+
+ gc_params.size_pool_init_slots[i] = GC_HEAP_INIT_SLOTS;
+
+ size_pool->allocatable_pages = minimum_pages_for_size_pool(objspace, size_pool);
}
rb_darray_make(&objspace->weak_references, 0);
- // TODO: debug why on Windows Ruby crashes on boot when GC is on.
-#ifdef _WIN32
- dont_gc_on();
-#endif
-
#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
/* Need to determine if we can use mmap at runtime. */
heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
@@ -3576,15 +3561,6 @@ Alloc_GC_impl(void)
objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
#endif
- /* Set size pools allocatable pages. */
- for (int i = 0; i < SIZE_POOL_COUNT; i++) {
- rb_size_pool_t *size_pool = &size_pools[i];
-
- /* Set the default value of size_pool_init_slots. */
- gc_params.size_pool_init_slots[i] = GC_HEAP_INIT_SLOTS;
-
- size_pool->allocatable_pages = minimum_pages_for_size_pool(objspace, size_pool);
- }
heap_pages_expand_sorted(objspace);
init_mark_stack(&objspace->mark_stack);
@@ -4336,13 +4312,8 @@ rb_objspace_free_objects_i(VALUE obj, void *data)
{
rb_objspace_t *objspace = (rb_objspace_t *)data;
- switch (BUILTIN_TYPE(obj)) {
- case T_NONE:
- case T_SYMBOL:
- break;
- default:
+ if (BUILTIN_TYPE(obj) != T_NONE) {
obj_free(objspace, obj);
- break;
}
}
@@ -4371,7 +4342,16 @@ rb_objspace_call_finalizer_i(VALUE obj, void *data)
obj_free(objspace, obj);
break;
case T_SYMBOL:
- case T_ARRAY:
+ if (rb_free_at_exit) {
+ if (RSYMBOL(obj)->fstr &&
+ (BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_NONE ||
+ BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_ZOMBIE)) {
+ RSYMBOL(obj)->fstr = 0;
+ }
+
+ obj_free(objspace, obj);
+ }
+ break;
case T_NONE:
break;
default:
@@ -4441,7 +4421,7 @@ static inline bool
is_garbage_object(rb_objspace_t *objspace, VALUE ptr)
{
return is_lazy_sweeping(objspace) && GET_HEAP_PAGE(ptr)->flags.before_sweep &&
- !MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr);
+ !RVALUE_MARKED(ptr);
}
static inline bool
@@ -5055,7 +5035,7 @@ try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page,
/* We should return true if either src is successfully moved, or src is
* unmoveable. A false return will cause the sweeping cursor to be
* incremented to the next page, and src will attempt to move again */
- GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(src), src));
+ GC_ASSERT(RVALUE_MARKED(src));
asan_unlock_freelist(free_page);
VALUE dest = (VALUE)free_page->freelist;
@@ -5810,8 +5790,8 @@ invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_
VALUE object;
if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
- GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
- GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
+ GC_ASSERT(RVALUE_PINNED(forwarding_object));
+ GC_ASSERT(!RVALUE_MARKED(forwarding_object));
CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
@@ -5834,7 +5814,7 @@ invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_
orig_page->free_slots++;
heap_page_add_freeobj(objspace, orig_page, object);
- GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
+ GC_ASSERT(RVALUE_MARKED(forwarding_object));
GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
}
@@ -6727,7 +6707,7 @@ gc_pin(rb_objspace_t *objspace, VALUE obj)
GC_ASSERT(is_markable_object(obj));
if (UNLIKELY(objspace->flags.during_compacting)) {
if (LIKELY(during_gc)) {
- if (!MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj)) {
+ if (!RVALUE_PINNED(obj)) {
GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
GET_HEAP_PAGE(obj)->pinned_slots++;
MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
@@ -7433,7 +7413,7 @@ gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
rb_objspace_t *objspace = (rb_objspace_t *)ptr;
/* object should be marked or oldgen */
- if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
+ if (!RVALUE_MARKED(obj)) {
fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
reflist_dump(refs);
@@ -8220,7 +8200,7 @@ gc_compact_plane(rb_objspace_t *objspace, rb_size_pool_t *size_pool, rb_heap_t *
do {
VALUE vp = (VALUE)p;
- GC_ASSERT(vp % sizeof(RVALUE) == 0);
+ GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
if (bitset & 1) {
objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
@@ -8775,9 +8755,9 @@ rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
- if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
- if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
- if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
+ if (RVALUE_MARKING(obj) && n<max) flags[n++] = ID_marking;
+ if (RVALUE_MARKED(obj) && n<max) flags[n++] = ID_marked;
+ if (RVALUE_PINNED(obj) && n<max) flags[n++] = ID_pinned;
return n;
}
@@ -11887,7 +11867,7 @@ static inline void *
objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
{
size = objspace_malloc_size(objspace, mem, size);
- objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
+ objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC) {}
#if CALC_EXACT_MALLOC_SIZE
{
@@ -12361,6 +12341,38 @@ ruby_mimmalloc(size_t size)
return mem;
}
+void *
+ruby_mimcalloc(size_t num, size_t size)
+{
+ void *mem;
+#if CALC_EXACT_MALLOC_SIZE
+ struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
+ if (UNLIKELY(t.left)) {
+ return NULL;
+ }
+ size = t.right + sizeof(struct malloc_obj_info);
+ mem = calloc1(size);
+ if (!mem) {
+ return NULL;
+ }
+ else
+ /* set 0 for consistency of allocated_size/allocations */
+ {
+ struct malloc_obj_info *info = mem;
+ info->size = 0;
+#if USE_GC_MALLOC_OBJ_INFO_DETAILS
+ info->gen = 0;
+ info->file = NULL;
+ info->line = 0;
+#endif
+ mem = info + 1;
+ }
+#else
+ mem = calloc(num, size);
+#endif
+ return mem;
+}
+
void
ruby_mimfree(void *ptr)
{
@@ -13146,8 +13158,8 @@ rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj
APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
(void *)obj, age,
C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
- C(RVALUE_MARK_BITMAP(obj), "M"),
- C(RVALUE_PIN_BITMAP(obj), "P"),
+ C(RVALUE_MARKED_BITMAP(obj), "M"),
+ C(RVALUE_PINNED_BITMAP(obj), "P"),
C(RVALUE_MARKING_BITMAP(obj), "R"),
C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
C(rb_objspace_garbage_object_p(obj), "G"),
@@ -13174,7 +13186,7 @@ rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj
}
#if GC_DEBUG
- APPEND_F("@%s:%d", RANY(obj)->file, RANY(obj)->line);
+ APPEND_F("@%s:%d", GET_RVALUE_OVERHEAD(obj)->file, GET_RVALUE_OVERHEAD(obj)->line);
#endif
}
end:
@@ -13483,7 +13495,7 @@ rb_gcdebug_print_obj_condition(VALUE obj)
{
rb_objspace_t *objspace = &rb_objspace;
- fprintf(stderr, "created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
+ fprintf(stderr, "created at: %s:%d\n", GET_RVALUE_OVERHEAD(obj)->file, GET_RVALUE_OVERHEAD(obj)->line);
if (BUILTIN_TYPE(obj) == T_MOVED) {
fprintf(stderr, "moved?: true\n");
@@ -13499,8 +13511,8 @@ rb_gcdebug_print_obj_condition(VALUE obj)
return;
}
- fprintf(stderr, "marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
- fprintf(stderr, "pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ? "true" : "false");
+ fprintf(stderr, "marked? : %s\n", RVALUE_MARKED(obj) ? "true" : "false");
+ fprintf(stderr, "pinned? : %s\n", RVALUE_PINNED(obj) ? "true" : "false");
fprintf(stderr, "age? : %d\n", RVALUE_AGE_GET(obj));
fprintf(stderr, "old? : %s\n", RVALUE_OLD_P(obj) ? "true" : "false");
fprintf(stderr, "WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ? "false" : "true");
@@ -13508,7 +13520,7 @@ rb_gcdebug_print_obj_condition(VALUE obj)
if (is_lazy_sweeping(objspace)) {
fprintf(stderr, "lazy sweeping?: true\n");
- fprintf(stderr, "page swept?: %s\n", GET_HEAP_PAGE(ptr)->flags.before_sweep ? "false" : "true");
+ fprintf(stderr, "page swept?: %s\n", GET_HEAP_PAGE(obj)->flags.before_sweep ? "false" : "true");
}
else {
fprintf(stderr, "lazy sweeping?: false\n");
@@ -13584,10 +13596,9 @@ rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
* traverse all living objects with an iterator.
*
* ObjectSpace also provides support for object finalizers, procs that will be
- * called when a specific object is about to be destroyed by garbage
- * collection. See the documentation for
- * <code>ObjectSpace.define_finalizer</code> for important information on
- * how to use this method correctly.
+ * called after a specific object was destroyed by garbage collection. See
+ * the documentation for +ObjectSpace.define_finalizer+ for important
+ * information on how to use this method correctly.
*
* a = "A"
* b = "B"
@@ -13627,6 +13638,12 @@ rb_gcdebug_remove_stress_to_class(int argc, VALUE *argv, VALUE self)
void
Init_GC(void)
{
+#if USE_SHARED_GC
+ if (getenv(RUBY_GC_LIBRARY_PATH) != NULL && !dln_supported_p()) {
+ rb_warn(RUBY_GC_LIBRARY_PATH " is ignored because this executable file can't load extension libraries");
+ }
+#endif
+
#undef rb_intern
malloc_offset = gc_compute_malloc_offset();
@@ -13640,7 +13657,7 @@ Init_GC(void)
rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), RBOOL(GC_DEBUG));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
- rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(sizeof(RVALUE)));
+ rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_SIZE")), SIZET2NUM(BASE_SLOT_SIZE));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));