summaryrefslogtreecommitdiff
path: root/gc.c
diff options
context:
space:
mode:
authorko1 <ko1@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2014-09-09 09:33:52 +0000
committerko1 <ko1@b2dd03c8-39d4-4d8f-98ff-823fe69b080e>2014-09-09 09:33:52 +0000
commit8ee9071cbe0a3a51af11127f9e336e68758c1bc9 (patch)
treeb57df78e6494e035828947630d81fdbd8464afc5 /gc.c
parenteb7a7801bf9cf53f620dd8aa0280694f9cf35b40 (diff)
* gc.c (rb_objspace_t::heap_pages): rename field names:
* used -> allocated_pages * increment -> allocatable_pages * length -> sorted_length And remove unused `limt' field. * gc.c: rename macros: * heap_pages_used -> heap_allocated_pages * heap_pages_length -> heap_pages_sorted_length * heap_pages_increment -> heap_allocatable_pages * gc.c (gc_stat_internal): fix symbol names ref: [Feature #9924] https://docs.google.com/spreadsheets/d/11Ua4uBr6o0k-nORrZLEIIUkHJ9JRzRR0NyZfrhEEnc8/edit?usp=sharing Yellow color fields in this table are changed. * test/ruby/test_gc.rb: catch up this change. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@47471 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Diffstat (limited to 'gc.c')
-rw-r--r--gc.c122
1 files changed, 60 insertions, 62 deletions
diff --git a/gc.c b/gc.c
index 9684afb2da..2093cf4016 100644
--- a/gc.c
+++ b/gc.c
@@ -498,13 +498,11 @@ typedef struct rb_objspace {
struct {
struct heap_page **sorted;
- size_t used;
- size_t length;
+ size_t allocated_pages;
+ size_t allocatable_pages;
+ size_t sorted_length;
RVALUE *range[2];
- size_t limit;
- size_t increment;
-
size_t swept_slots;
size_t min_free_slots;
size_t max_free_slots;
@@ -671,12 +669,12 @@ VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define malloc_increase objspace->malloc_params.increase
#define malloc_allocated_size objspace->malloc_params.allocated_size
#define heap_pages_sorted objspace->heap_pages.sorted
-#define heap_pages_used objspace->heap_pages.used
-#define heap_pages_length objspace->heap_pages.length
+#define heap_allocated_pages objspace->heap_pages.allocated_pages
+#define heap_pages_sorted_length objspace->heap_pages.sorted_length
#define heap_pages_lomem objspace->heap_pages.range[0]
#define heap_pages_himem objspace->heap_pages.range[1]
#define heap_pages_swept_slots objspace->heap_pages.swept_slots
-#define heap_pages_increment objspace->heap_pages.increment
+#define heap_allocatable_pages objspace->heap_pages.allocatable_pages
#define heap_pages_min_free_slots objspace->heap_pages.min_free_slots
#define heap_pages_max_free_slots objspace->heap_pages.max_free_slots
#define heap_pages_final_slots objspace->heap_pages.final_slots
@@ -1209,12 +1207,12 @@ rb_objspace_free(rb_objspace_t *objspace)
}
if (heap_pages_sorted) {
size_t i;
- for (i = 0; i < heap_pages_used; ++i) {
+ for (i = 0; i < heap_allocated_pages; ++i) {
heap_page_free(objspace, heap_pages_sorted[i]);
}
free(heap_pages_sorted);
- heap_pages_used = 0;
- heap_pages_length = 0;
+ heap_allocated_pages = 0;
+ heap_pages_sorted_length = 0;
heap_pages_lomem = 0;
heap_pages_himem = 0;
@@ -1230,17 +1228,17 @@ rb_objspace_free(rb_objspace_t *objspace)
static void
heap_pages_expand_sorted(rb_objspace_t *objspace)
{
- size_t next_length = heap_pages_increment;
+ size_t next_length = heap_allocatable_pages;
next_length += heap_eden->page_length;
next_length += heap_tomb->page_length;
- if (next_length > heap_pages_length) {
+ if (next_length > heap_pages_sorted_length) {
struct heap_page **sorted;
size_t size = next_length * sizeof(struct heap_page *);
gc_report(3, objspace, "heap_pages_expand_sorted: next_length: %d, size: %d\n", (int)next_length, (int)size);
- if (heap_pages_length > 0) {
+ if (heap_pages_sorted_length > 0) {
sorted = (struct heap_page **)realloc(heap_pages_sorted, size);
if (sorted) heap_pages_sorted = sorted;
}
@@ -1252,7 +1250,7 @@ heap_pages_expand_sorted(rb_objspace_t *objspace)
rb_memerror();
}
- heap_pages_length = next_length;
+ heap_pages_sorted_length = next_length;
}
}
@@ -1305,7 +1303,7 @@ heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *pag
static void
heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
{
- heap_pages_used--;
+ heap_allocated_pages--;
aligned_free(page->body);
free(page);
}
@@ -1316,7 +1314,7 @@ heap_pages_free_unused_pages(rb_objspace_t *objspace)
size_t i, j;
if (heap_tomb->pages && heap_pages_swept_slots > heap_pages_max_free_slots) {
- for (i = j = 1; j < heap_pages_used; i++) {
+ for (i = j = 1; j < heap_allocated_pages; i++) {
struct heap_page *page = heap_pages_sorted[i];
if (page->heap == heap_tomb && page->free_slots == page->total_slots) {
@@ -1337,7 +1335,7 @@ heap_pages_free_unused_pages(rb_objspace_t *objspace)
}
j++;
}
- if (RGENGC_CHECK_MODE) assert(j == heap_pages_used);
+ if (RGENGC_CHECK_MODE) assert(j == heap_allocated_pages);
}
}
@@ -1367,7 +1365,7 @@ heap_page_allocate(rb_objspace_t *objspace)
/* setup heap_pages_sorted */
lo = 0;
- hi = heap_pages_used;
+ hi = heap_allocated_pages;
while (lo < hi) {
struct heap_page *mid_page;
@@ -1383,14 +1381,14 @@ heap_page_allocate(rb_objspace_t *objspace)
rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
}
}
- if (hi < heap_pages_used) {
- MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_pages_used - hi);
+ if (hi < heap_allocated_pages) {
+ MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi], struct heap_page_header*, heap_allocated_pages - hi);
}
heap_pages_sorted[hi] = page;
- heap_pages_used++;
- if (RGENGC_CHECK_MODE) assert(heap_pages_used <= heap_pages_length);
+ heap_allocated_pages++;
+ if (RGENGC_CHECK_MODE) assert(heap_allocated_pages <= heap_pages_sorted_length);
/* adjust obj_limit (object number available in this page) */
start = (RVALUE*)((VALUE)page_body + sizeof(struct heap_page_header));
@@ -1438,8 +1436,8 @@ heap_page_create(rb_objspace_t *objspace)
page = heap_page_allocate(objspace);
method = "allocate";
}
- if (0) fprintf(stderr, "heap_page_create: %s - %p, heap_pages_used: %d, heap_pages_used: %d, tomb->page_length: %d\n",
- method, page, (int)heap_pages_length, (int)heap_pages_used, (int)heap_tomb->page_length);
+ if (0) fprintf(stderr, "heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->page_length: %d\n",
+ method, page, (int)heap_pages_sorted_length, (int)heap_allocated_pages, (int)heap_tomb->page_length);
return page;
}
@@ -1467,18 +1465,18 @@ heap_add_pages(rb_objspace_t *objspace, rb_heap_t *heap, size_t add)
{
size_t i;
- heap_pages_increment = add;
+ heap_allocatable_pages = add;
heap_pages_expand_sorted(objspace);
for (i = 0; i < add; i++) {
heap_assign_page(objspace, heap);
}
- heap_pages_increment = 0;
+ heap_allocatable_pages = 0;
}
static size_t
heap_extend_pages(rb_objspace_t *objspace)
{
- size_t used = heap_pages_used - heap_tomb->page_length;
+ size_t used = heap_allocated_pages - heap_tomb->page_length;
size_t next_used_limit = (size_t)(used * gc_params.growth_factor);
if (gc_params.growth_max_slots > 0) {
@@ -1495,21 +1493,21 @@ heap_set_increment(rb_objspace_t *objspace, size_t additional_pages)
size_t used = heap_eden->page_length;
size_t next_used_limit = used + additional_pages;
- if (next_used_limit == heap_pages_used) next_used_limit++;
+ if (next_used_limit == heap_allocated_pages) next_used_limit++;
- heap_pages_increment = next_used_limit - used;
+ heap_allocatable_pages = next_used_limit - used;
heap_pages_expand_sorted(objspace);
- gc_report(1, objspace, "heap_set_increment: heap_pages_increment is %d\n", (int)heap_pages_increment);
+ gc_report(1, objspace, "heap_set_increment: heap_allocatable_pages is %d\n", (int)heap_allocatable_pages);
}
static int
heap_increment(rb_objspace_t *objspace, rb_heap_t *heap)
{
- if (heap_pages_increment > 0) {
- gc_report(1, objspace, "heap_increment: heap_pages_length: %d, heap_pages_inc: %d, heap->page_length: %d\n",
- (int)heap_pages_length, (int)heap_pages_increment, (int)heap->page_length);
- heap_pages_increment--;
+ if (heap_allocatable_pages > 0) {
+ gc_report(1, objspace, "heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->page_length: %d\n",
+ (int)heap_pages_sorted_length, (int)heap_allocatable_pages, (int)heap->page_length);
+ heap_allocatable_pages--;
heap_assign_page(objspace, heap);
return TRUE;
}
@@ -1739,7 +1737,7 @@ is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
/* check if p looks like a pointer using bsearch*/
lo = 0;
- hi = heap_pages_used;
+ hi = heap_allocated_pages;
while (lo < hi) {
mid = (lo + hi) / 2;
page = heap_pages_sorted[mid];
@@ -2047,10 +2045,10 @@ objspace_each_objects(VALUE arg)
struct each_obj_args *args = (struct each_obj_args *)arg;
i = 0;
- while (i < heap_pages_used) {
+ while (i < heap_allocated_pages) {
while (0 < i && last_body < heap_pages_sorted[i-1]->body) i--;
- while (i < heap_pages_used && heap_pages_sorted[i]->body <= last_body) i++;
- if (heap_pages_used <= i) break;
+ while (i < heap_allocated_pages && heap_pages_sorted[i]->body <= last_body) i++;
+ if (heap_allocated_pages <= i) break;
page = heap_pages_sorted[i];
last_body = page->body;
@@ -2538,7 +2536,7 @@ rb_objspace_call_finalizer(rb_objspace_t *objspace)
gc_enter(objspace, "rb_objspace_call_finalizer");
/* run data object's finalizers */
- for (i = 0; i < heap_pages_used; i++) {
+ for (i = 0; i < heap_allocated_pages; i++) {
p = heap_pages_sorted[i]->start; pend = p + heap_pages_sorted[i]->total_slots;
while (p < pend) {
switch (BUILTIN_TYPE(p)) {
@@ -2962,7 +2960,7 @@ count_objects(int argc, VALUE *argv, VALUE os)
counts[i] = 0;
}
- for (i = 0; i < heap_pages_used; i++) {
+ for (i = 0; i < heap_allocated_pages; i++) {
struct heap_page *page = heap_pages_sorted[i];
RVALUE *p, *pend;
@@ -3216,8 +3214,8 @@ gc_sweep_start(rb_objspace_t *objspace)
rb_sweep_method_entry(GET_VM());
}
- /* sometimes heap_pages_increment is not 0 */
- heap_pages_swept_slots = heap_pages_increment * HEAP_OBJ_LIMIT;
+ /* sometimes heap_allocatable_pages is not 0 */
+ heap_pages_swept_slots = heap_allocatable_pages * HEAP_OBJ_LIMIT;
total_limit_slot = objspace_total_slot(objspace);
heap_pages_min_free_slots = (size_t)(total_limit_slot * GC_HEAP_FREE_SLOTS_MIN_RATIO);
@@ -3248,8 +3246,8 @@ gc_sweep_finish(rb_objspace_t *objspace)
heap_pages_free_unused_pages(objspace);
/* if heap_pages has unused pages, then assign them to increment */
- if (heap_pages_increment < heap_tomb->page_length) {
- heap_pages_increment = heap_tomb->page_length;
+ if (heap_allocatable_pages < heap_tomb->page_length) {
+ heap_allocatable_pages = heap_tomb->page_length;
}
#if RGENGC_PROFILE > 0
@@ -4977,7 +4975,7 @@ gc_marks_finish(rb_objspace_t *objspace)
{ /* decide full GC is needed or not */
rb_heap_t *heap = heap_eden;
size_t sweep_slots =
- (heap_pages_increment * HEAP_OBJ_LIMIT) + /* allocatable slots in empty pages */
+ (heap_allocatable_pages * HEAP_OBJ_LIMIT) + /* allocatable slots in empty pages */
(heap->total_slots - objspace->marked_objects); /* will be sweep slots */
#if RGENGC_CHECK_MODE
@@ -5011,7 +5009,7 @@ gc_marks_finish(rb_objspace_t *objspace)
}
gc_report(1, objspace, "gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
- (int)objspace->marked_objects, (int)objspace->rgengc.old_object_count, (int)heap->total_slots, (int)sweep_slots, (int)heap_pages_increment,
+ (int)objspace->marked_objects, (int)objspace->rgengc.old_object_count, (int)heap->total_slots, (int)sweep_slots, (int)heap_allocatable_pages,
objspace->rgengc.need_major_gc ? "major" : "minor");
#endif
}
@@ -5788,7 +5786,7 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark,
int do_full_mark = full_mark;
objspace->flags.immediate_sweep = immediate_sweep;
- if (!heap_pages_used) return FALSE; /* heap is not ready */
+ if (!heap_allocated_pages) return FALSE; /* heap is not ready */
if (!ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
if (RGENGC_CHECK_MODE) {
@@ -5849,7 +5847,7 @@ gc_start(rb_objspace_t *objspace, const int full_mark, const int immediate_mark,
objspace->profile.count++;
objspace->profile.latest_gc_info = reason;
objspace->profile.total_allocated_object_num_at_gc_start = objspace->total_allocated_object_num;
- objspace->profile.heap_used_at_gc_start = heap_pages_used;
+ objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
gc_prof_setup_new_record(objspace, reason);
gc_reset_malloc_info(objspace);
@@ -6270,9 +6268,9 @@ size_t
gc_stat_internal(VALUE hash_or_sym)
{
static VALUE sym_count;
- static VALUE sym_heap_used, sym_heap_length, sym_heap_increment;
+ static VALUE sym_heap_used, sym_heap_sorted_length, sym_heap_allocatable_pages;
static VALUE sym_heap_live_slot, sym_heap_free_slot, sym_heap_final_slot, sym_heap_swept_slot;
- static VALUE sym_heap_eden_page_length, sym_heap_tomb_page_length;
+ static VALUE sym_heap_eden_pages, sym_heap_tomb_pages;
static VALUE sym_total_allocated_object, sym_total_freed_object;
static VALUE sym_malloc_increase, sym_malloc_limit;
#if USE_RGENGC
@@ -6306,14 +6304,14 @@ gc_stat_internal(VALUE hash_or_sym)
#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
S(count);
S(heap_used);
- S(heap_length);
- S(heap_increment);
+ S(heap_sorted_length);
+ S(heap_allocatable_pages);
S(heap_live_slot);
S(heap_free_slot);
S(heap_final_slot);
S(heap_swept_slot);
- S(heap_eden_page_length);
- S(heap_tomb_page_length);
+ S(heap_eden_pages);
+ S(heap_tomb_pages);
S(total_allocated_object);
S(total_freed_object);
S(malloc_increase);
@@ -6351,15 +6349,15 @@ gc_stat_internal(VALUE hash_or_sym)
SET(count, objspace->profile.count);
/* implementation dependent counters */
- SET(heap_used, heap_pages_used);
- SET(heap_length, heap_pages_length);
- SET(heap_increment, heap_pages_increment);
+ SET(heap_used, heap_allocated_pages);
+ SET(heap_sorted_length, heap_pages_sorted_length);
+ SET(heap_allocatable_pages, heap_allocatable_pages);
SET(heap_live_slot, objspace_live_slot(objspace));
SET(heap_free_slot, objspace_free_slot(objspace));
SET(heap_final_slot, heap_pages_final_slots);
SET(heap_swept_slot, heap_pages_swept_slots);
- SET(heap_eden_page_length, heap_eden->page_length);
- SET(heap_tomb_page_length, heap_tomb->page_length);
+ SET(heap_eden_pages, heap_eden->page_length);
+ SET(heap_tomb_pages, heap_tomb->page_length);
SET(total_allocated_object, objspace->total_allocated_object_num);
SET(total_freed_object, objspace->profile.total_freed_object_num);
SET(malloc_increase, malloc_increase);
@@ -6418,8 +6416,8 @@ gc_stat_internal(VALUE hash_or_sym)
* {
* :count=>2,
* :heap_used=>9,
- * :heap_length=>11,
- * :heap_increment=>2,
+ * :heap_sorted_length=>11,
+ * :heap_allocatable_pages=>2,
* :heap_live_slot=>6836,
* :heap_free_slot=>519,
* :heap_final_slot=>0,