diff options
author | nagachika <nagachika@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2017-07-09 20:24:02 +0000 |
---|---|---|
committer | nagachika <nagachika@b2dd03c8-39d4-4d8f-98ff-823fe69b080e> | 2017-07-09 20:24:02 +0000 |
commit | e15f61a0d2ef6656958f5f63498323ff3185a78e (patch) | |
tree | 7af3b610625205d25c840fbe3752c98ad40b56d0 /gc.c | |
parent | 420151cf6470795969d3d7fa89e0603df8f6e694 (diff) |
merge revision(s) 56558,59116,59136: [Backport #12670]
* gc.c (heap_page_resurrect): do not return tomb_pages when
page->freelist == NULL.
[Bug #12670]
test for [Bug #12670]
heap corruption by deferred free.
gc.c: expand sorted pages
* gc.c (heap_page_allocate): expand sorted pages before inserting
allocated new page. [Bug #12670]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_2_4@59302 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'gc.c')
-rw-r--r-- | gc.c | 50 |
1 files changed, 31 insertions, 19 deletions
@@ -1358,6 +1358,29 @@ rb_objspace_free(rb_objspace_t *objspace) } static void +heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length) +{ + struct heap_page **sorted; + size_t size = next_length * sizeof(struct heap_page *); + + gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size); + + if (heap_pages_sorted_length > 0) { + sorted = (struct heap_page **)realloc(heap_pages_sorted, size); + if (sorted) heap_pages_sorted = sorted; + } + else { + sorted = heap_pages_sorted = (struct heap_page **)malloc(size); + } + + if (sorted == 0) { + rb_memerror(); + } + + heap_pages_sorted_length = next_length; +} + +static void heap_pages_expand_sorted(rb_objspace_t *objspace) { size_t next_length = heap_allocatable_pages; @@ -1365,24 +1388,7 @@ heap_pages_expand_sorted(rb_objspace_t *objspace) next_length += heap_tomb->total_pages; if (next_length > heap_pages_sorted_length) { - struct heap_page **sorted; - size_t size = next_length * sizeof(struct heap_page *); - - gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size); - - if (heap_pages_sorted_length > 0) { - sorted = (struct heap_page **)realloc(heap_pages_sorted, size); - if (sorted) heap_pages_sorted = sorted; - } - else { - sorted = heap_pages_sorted = (struct heap_page **)malloc(size); - } - - if (sorted == 0) { - rb_memerror(); - } - - heap_pages_sorted_length = next_length; + heap_pages_expand_sorted_to(objspace, next_length); } } @@ -1520,6 +1526,9 @@ heap_page_allocate(rb_objspace_t *objspace) rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid); } } + if (heap_allocated_pages >= heap_pages_sorted_length) { + heap_pages_expand_sorted_to(objspace, heap_allocated_pages + 1); + } if (hi < heap_allocated_pages) { MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi); } @@ -1529,7 +1538,10 @@ heap_page_allocate(rb_objspace_t *objspace) heap_allocated_pages++; objspace->profile.total_allocated_pages++; - if (RGENGC_CHECK_MODE) assert(heap_allocated_pages <= heap_pages_sorted_length); + if (heap_allocated_pages > heap_pages_sorted_length) { + rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")", + heap_allocated_pages, heap_pages_sorted_length); + } if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start; if (heap_pages_himem < end) heap_pages_himem = end; |