summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorusa <usa@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2017-06-30 12:35:49 +0000
committerusa <usa@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2017-06-30 12:35:49 +0000
commit687763bc4f7cc7acb57b3ebb4641eeeec0648c22 (patch)
treee8ae1ef88a93e4ca8f21401b00d18645d6ca7bf3 /gc.c
parent896d9fb9957f3c2d0a739e156f0775faa326d13c (diff)
merge revision(s) 56558,59116,59136: [Backport #12670]
* gc.c (heap_page_resurrect): do not return tomb_pages when page->freelist == NULL. [Bug #12670] test for [Bug #12670] heap corruption by deferred free. gc.c: expand sorted pages * gc.c (heap_page_allocate): expand sorted pages before inserting allocated new page. [Bug #12670] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_2_3@59234 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c62
1 files changed, 39 insertions, 23 deletions
diff --git a/gc.c b/gc.c
index 8a345db0cc..5d73a7e157 100644
--- a/gc.c
+++ b/gc.c
@@ -1314,6 +1314,29 @@ rb_objspace_free(rb_objspace_t *objspace)
}
static void
+heap_pages_expand_sorted_to(rb_objspace_t *objspace, size_t next_length)
+{
+ struct heap_page **sorted;
+ size_t size = next_length * sizeof(struct heap_page *);
+
+ gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
+
+ if (heap_pages_sorted_length > 0) {
+ sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
+ if (sorted) heap_pages_sorted = sorted;
+ }
+ else {
+ sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
+ }
+
+ if (sorted == 0) {
+ rb_memerror();
+ }
+
+ heap_pages_sorted_length = next_length;
+}
+
+static void
heap_pages_expand_sorted(rb_objspace_t *objspace)
{
size_t next_length = heap_allocatable_pages;
@@ -1321,24 +1344,7 @@ heap_pages_expand_sorted(rb_objspace_t *objspace)
next_length += heap_tomb->page_length;
if (next_length > heap_pages_sorted_length) {
- struct heap_page **sorted;
- size_t size = next_length * sizeof(struct heap_page *);
-
- gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
-
- if (heap_pages_sorted_length > 0) {
- sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
- if (sorted) heap_pages_sorted = sorted;
- }
- else {
- sorted = heap_pages_sorted = (struct heap_page **)malloc(size);
- }
-
- if (sorted == 0) {
- rb_memerror();
- }
-
- heap_pages_sorted_length = next_length;
+ heap_pages_expand_sorted_to(objspace, next_length);
}
}
@@ -1477,6 +1483,9 @@ heap_page_allocate(rb_objspace_t *objspace)
rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
}
}
+ if (heap_allocated_pages >= heap_pages_sorted_length) {
+ heap_pages_expand_sorted_to(objspace, heap_allocated_pages + 1);
+ }
if (hi < heap_allocated_pages) {
MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
}
@@ -1486,7 +1495,10 @@ heap_page_allocate(rb_objspace_t *objspace)
heap_allocated_pages++;
objspace->profile.total_allocated_pages++;
- if (RGENGC_CHECK_MODE) assert(heap_allocated_pages <= heap_pages_sorted_length);
+ if (heap_allocated_pages > heap_pages_sorted_length) {
+ rb_bug("heap_page_allocate: allocated(%"PRIdSIZE") > sorted(%"PRIdSIZE")",
+ heap_allocated_pages, heap_pages_sorted_length);
+ }
/* adjust obj_limit (object number available in this page) */
start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
@@ -1516,12 +1528,16 @@ heap_page_allocate(rb_objspace_t *objspace)
static struct heap_page *
heap_page_resurrect(rb_objspace_t *objspace)
{
- struct heap_page *page;
+ struct heap_page *page = heap_tomb->pages;
- if ((page = heap_tomb->pages) != NULL) {
- heap_unlink_page(objspace, heap_tomb, page);
- return page;
+ while (page) {
+ if (page->freelist != NULL) {
+ heap_unlink_page(objspace, heap_tomb, page);
+ return page;
+ }
+ page = page->next;
}
+
return NULL;
}