summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorko1 <ko1@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-10-30 21:53:56 +0000
committerko1 <ko1@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2018-10-30 21:53:56 +0000
commit312b105d0e263a36c129a14f2681fe16905c0244 (patch)
tree00bf11825e6f8a8cb12ba1f2c02c722f35090e0c /gc.c
parent69b8ffcd5b8b28c56212368a3fe66f076b90e4c4 (diff)
introduce TransientHeap. [Bug #14858]
* transient_heap.c, transient_heap.h: implement TransientHeap (theap). theap is designed for Ruby's object system. theap is like Eden heap on generational GC terminology. theap allocation is very fast because it only needs to bump up pointer and deallocation is also fast because we don't do anything. However we need to evacuate (Copy GC terminology) if theap memory is long-lived. Evacuation logic is needed for each type. See [Bug #14858] for details. * array.c: Now, theap for T_ARRAY is supported. ary_heap_alloc() tries to allocate memory area from theap. If this trial sccesses, this array has theap ptr and RARRAY_TRANSIENT_FLAG is turned on. We don't need to free theap ptr. * ruby.h: RARRAY_CONST_PTR() returns malloc'ed memory area. It menas that if ary is allocated at theap, force evacuation to malloc'ed memory. It makes programs slow, but very compatible with current code because theap memory can be evacuated (theap memory will be recycled). If you want to get transient heap ptr, use RARRAY_CONST_PTR_TRANSIENT() instead of RARRAY_CONST_PTR(). If you can't understand when evacuation will occur, use RARRAY_CONST_PTR(). (re-commit of r65444) git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65449 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c89
1 files changed, 66 insertions, 23 deletions
diff --git a/gc.c b/gc.c
index 00f51c9657..57619af9f3 100644
--- a/gc.c
+++ b/gc.c
@@ -35,6 +35,7 @@
#include <sys/types.h>
#include "ruby_assert.h"
#include "debug_counter.h"
+#include "transient_heap.h"
#include "mjit.h"
#undef rb_data_object_wrap
@@ -845,8 +846,6 @@ static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
static VALUE define_final0(VALUE obj, VALUE block);
static void negative_size_allocation_error(const char *);
-static void *aligned_malloc(size_t, size_t);
-static void aligned_free(void *);
static void init_mark_stack(mark_stack_t *stack);
@@ -1190,6 +1189,7 @@ RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *pag
{
MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
objspace->rgengc.old_objects++;
+ rb_transient_heap_promote(obj);
#if RGENGC_PROFILE >= 2
objspace->profile.total_promoted_count++;
@@ -1486,7 +1486,7 @@ heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
{
heap_allocated_pages--;
objspace->profile.total_freed_pages++;
- aligned_free(GET_PAGE_BODY(page->start));
+ rb_aligned_free(GET_PAGE_BODY(page->start));
free(page);
}
@@ -1524,7 +1524,7 @@ heap_page_allocate(rb_objspace_t *objspace)
int limit = HEAP_PAGE_OBJ_LIMIT;
/* assign heap_page body (contains heap_page_header and RVALUEs) */
- page_body = (struct heap_page_body *)aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
+ page_body = (struct heap_page_body *)rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
if (page_body == 0) {
rb_memerror();
}
@@ -1532,7 +1532,7 @@ heap_page_allocate(rb_objspace_t *objspace)
/* assign heap_page entry */
page = (struct heap_page *)calloc(1, sizeof(struct heap_page));
if (page == 0) {
- aligned_free(page_body);
+ rb_aligned_free(page_body);
rb_memerror();
}
@@ -4515,6 +4515,7 @@ static void
gc_mark_ptr(rb_objspace_t *objspace, VALUE obj)
{
if (LIKELY(objspace->mark_func_data == NULL)) {
+ if (RB_TYPE_P(obj, T_NONE)) rb_bug("...");
rgengc_check_relation(objspace, obj);
if (!gc_mark_set(objspace, obj)) return; /* already marked */
gc_aging(objspace, obj);
@@ -4672,14 +4673,22 @@ gc_mark_children(rb_objspace_t *objspace, VALUE obj)
case T_ARRAY:
if (FL_TEST(obj, ELTS_SHARED)) {
- gc_mark(objspace, any->as.array.as.heap.aux.shared);
+ VALUE root = any->as.array.as.heap.aux.shared;
+ gc_mark(objspace, root);
}
else {
long i, len = RARRAY_LEN(obj);
- const VALUE *ptr = RARRAY_CONST_PTR(obj);
+ const VALUE *ptr = RARRAY_CONST_PTR_TRANSIENT(obj);
for (i=0; i < len; i++) {
- gc_mark(objspace, *ptr++);
+ gc_mark(objspace, ptr[i]);
}
+
+ if (objspace->mark_func_data == NULL) {
+ if (!FL_TEST_RAW(obj, RARRAY_EMBED_FLAG) &&
+ RARRAY_TRANSIENT_P(obj)) {
+ rb_transient_heap_mark(obj, ptr);
+ }
+ }
}
break;
@@ -5455,6 +5464,13 @@ rb_gc_verify_internal_consistency(void)
gc_verify_internal_consistency(Qnil);
}
+static VALUE
+gc_verify_transient_heap_internal_consistency(VALUE dmy)
+{
+ rb_transient_heap_verify();
+ return Qnil;
+}
+
/* marks */
static void
@@ -5671,6 +5687,8 @@ gc_marks_finish(rb_objspace_t *objspace)
#endif
}
+ rb_transient_heap_finish_marking();
+
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END_MARK, 0);
return TRUE;
@@ -6562,6 +6580,7 @@ gc_start(rb_objspace_t *objspace, int reason)
objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
gc_prof_setup_new_record(objspace, reason);
gc_reset_malloc_info(objspace);
+ rb_transient_heap_start_marking(do_full_mark);
gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
GC_ASSERT(during_gc);
@@ -7812,8 +7831,8 @@ rb_memerror(void)
EC_JUMP_TAG(ec, TAG_RAISE);
}
-static void *
-aligned_malloc(size_t alignment, size_t size)
+void *
+rb_aligned_malloc(size_t alignment, size_t size)
{
void *res;
@@ -7846,8 +7865,8 @@ aligned_malloc(size_t alignment, size_t size)
return res;
}
-static void
-aligned_free(void *ptr)
+void
+rb_aligned_free(void *ptr)
{
#if defined __MINGW32__
__mingw_aligned_free(ptr);
@@ -9551,13 +9570,21 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
#if USE_RGENGC
const int age = RVALUE_FLAGS_AGE(RBASIC(obj)->flags);
- snprintf(buff, buff_size, "%p [%d%s%s%s%s] %s",
- (void *)obj, age,
- C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
- C(RVALUE_MARK_BITMAP(obj), "M"),
- C(RVALUE_MARKING_BITMAP(obj), "R"),
- C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
- obj_type_name(obj));
+ if (is_pointer_to_heap(&rb_objspace, (void *)obj)) {
+ snprintf(buff, buff_size, "%p [%d%s%s%s%s] %s",
+ (void *)obj, age,
+ C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
+ C(RVALUE_MARK_BITMAP(obj), "M"),
+ C(RVALUE_MARKING_BITMAP(obj), "R"),
+ C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
+ obj_type_name(obj));
+ }
+ else {
+ /* fake */
+ snprintf(buff, buff_size, "%p [%dXXXX] %s",
+ (void *)obj, age,
+ obj_type_name(obj));
+ }
#else
snprintf(buff, buff_size, "%p [%s] %s",
(void *)obj,
@@ -9587,10 +9614,25 @@ rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
UNEXPECTED_NODE(rb_raw_obj_info);
break;
case T_ARRAY:
- snprintf(buff, buff_size, "%s [%s%s] len: %d", buff,
- C(ARY_EMBED_P(obj), "E"),
- C(ARY_SHARED_P(obj), "S"),
- (int)RARRAY_LEN(obj));
+ if (FL_TEST(obj, ELTS_SHARED)) {
+ snprintf(buff, buff_size, "%s shared -> %s", buff,
+ rb_obj_info(RARRAY(obj)->as.heap.aux.shared));
+ }
+ else if (FL_TEST(obj, RARRAY_EMBED_FLAG)) {
+ snprintf(buff, buff_size, "%s [%s%s] len: %d (embed)", buff,
+ C(ARY_EMBED_P(obj), "E"),
+ C(ARY_SHARED_P(obj), "S"),
+ (int)RARRAY_LEN(obj));
+ }
+ else {
+ snprintf(buff, buff_size, "%s [%s%s%s] len: %d, capa:%d ptr:%p", buff,
+ C(ARY_EMBED_P(obj), "E"),
+ C(ARY_SHARED_P(obj), "S"),
+ C(RARRAY_TRANSIENT_P(obj), "T"),
+ (int)RARRAY_LEN(obj),
+ ARY_EMBED_P(obj) ? -1 : (int)RARRAY(obj)->as.heap.aux.capa,
+ RARRAY_CONST_PTR_TRANSIENT(obj));
+ }
break;
case T_STRING: {
snprintf(buff, buff_size, "%s %s", buff, RSTRING_PTR(obj));
@@ -9954,6 +9996,7 @@ Init_GC(void)
/* internal methods */
rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency, 0);
+ rb_define_singleton_method(rb_mGC, "verify_transient_heap_internal_consistency", gc_verify_transient_heap_internal_consistency, 0);
#if MALLOC_ALLOCATED_SIZE
rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);