2007-10-25 16:27:34 -04:00
|
|
|
/*
|
|
|
|
* Ikarus Scheme -- A compiler for R6RS Scheme.
|
2008-01-29 00:34:34 -05:00
|
|
|
* Copyright (C) 2006,2007,2008 Abdulaziz Ghuloum
|
2007-10-25 16:27:34 -04:00
|
|
|
*
|
|
|
|
* This program is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 3 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2007-10-17 09:22:47 -04:00
|
|
|
#include "ikarus-data.h"
|
2006-11-23 19:38:26 -05:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/types.h>
|
2007-12-10 15:06:47 -05:00
|
|
|
#include <sys/time.h>
|
2006-11-23 19:38:26 -05:00
|
|
|
#include <assert.h>
|
2006-11-23 19:44:29 -05:00
|
|
|
#include <errno.h>
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
#define forward_ptr ((ikptr)-1)
|
2006-11-23 19:38:26 -05:00
|
|
|
#define minimum_heap_size (pagesize * 1024 * 4)
|
|
|
|
#define maximum_heap_size (pagesize * 1024 * 8)
|
|
|
|
#define minimum_stack_size (pagesize * 128)
|
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
#define accounting 0
|
2006-11-23 19:40:06 -05:00
|
|
|
|
2006-12-16 19:00:34 -05:00
|
|
|
#if accounting
|
2006-11-23 19:40:06 -05:00
|
|
|
static int pair_count = 0;
|
|
|
|
static int symbol_count = 0;
|
|
|
|
static int closure_count = 0;
|
|
|
|
static int vector_count = 0;
|
|
|
|
static int record_count = 0;
|
|
|
|
static int continuation_count = 0;
|
|
|
|
static int string_count = 0;
|
|
|
|
static int htable_count = 0;
|
2006-12-16 19:00:34 -05:00
|
|
|
#endif
|
2006-11-23 19:40:06 -05:00
|
|
|
|
2006-11-23 19:38:26 -05:00
|
|
|
typedef struct qupages_t{
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p; /* pointer to the scan start */
|
|
|
|
ikptr q; /* pointer to the scan end */
|
2006-11-23 19:38:26 -05:00
|
|
|
struct qupages_t* next;
|
|
|
|
} qupages_t;
|
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
|
|
|
|
typedef struct{
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr ap;
|
|
|
|
ikptr aq;
|
|
|
|
ikptr ep;
|
|
|
|
ikptr base;
|
2006-11-23 19:42:39 -05:00
|
|
|
} meta_t;
|
|
|
|
|
|
|
|
|
|
|
|
#define meta_ptrs 0
|
|
|
|
#define meta_code 1
|
|
|
|
#define meta_data 2
|
|
|
|
#define meta_weak 3
|
2006-11-23 19:44:29 -05:00
|
|
|
#define meta_pair 4
|
2007-02-25 21:29:28 -05:00
|
|
|
#define meta_symbol 5
|
|
|
|
#define meta_count 6
|
2006-11-23 19:42:39 -05:00
|
|
|
|
|
|
|
static int extension_amount[meta_count] = {
|
|
|
|
1 * pagesize,
|
2007-02-22 21:58:38 -05:00
|
|
|
1 * pagesize,
|
|
|
|
1 * pagesize,
|
2006-11-23 19:44:29 -05:00
|
|
|
1 * pagesize,
|
2007-02-25 21:29:28 -05:00
|
|
|
1 * pagesize,
|
2007-08-30 12:01:54 -04:00
|
|
|
1 * pagesize,
|
2006-11-23 19:42:39 -05:00
|
|
|
};
|
|
|
|
|
|
|
|
static unsigned int meta_mt[meta_count] = {
|
|
|
|
pointers_mt,
|
|
|
|
code_mt,
|
|
|
|
data_mt,
|
2006-11-23 19:44:29 -05:00
|
|
|
weak_pairs_mt,
|
2007-02-25 21:29:28 -05:00
|
|
|
pointers_mt,
|
|
|
|
symbols_mt
|
2006-11-23 19:42:39 -05:00
|
|
|
};
|
|
|
|
|
2006-12-19 11:41:13 -05:00
|
|
|
typedef struct gc_t{
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
meta_t meta [meta_count];
|
2006-11-23 19:42:39 -05:00
|
|
|
qupages_t* queues [meta_count];
|
|
|
|
ikpcb* pcb;
|
|
|
|
unsigned int* segment_vector;
|
|
|
|
int collect_gen;
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
int collect_gen_tag;
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr tconc_ap;
|
|
|
|
ikptr tconc_ep;
|
|
|
|
ikptr tconc_base;
|
2006-11-23 19:42:39 -05:00
|
|
|
ikpages* tconc_queue;
|
2008-01-12 17:32:43 -05:00
|
|
|
ik_ptr_page* forward_list;
|
2006-11-23 19:38:26 -05:00
|
|
|
} gc_t;
|
|
|
|
|
2008-01-12 17:32:43 -05:00
|
|
|
static void handle_guardians(gc_t* gc);
|
|
|
|
static void gc_finalize_guardians(gc_t* gc);
|
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
static unsigned int
|
|
|
|
next_gen_tag[generation_count] = {
|
|
|
|
(4 << meta_dirty_shift) | 1 | new_gen_tag,
|
|
|
|
(2 << meta_dirty_shift) | 2 | new_gen_tag,
|
|
|
|
(1 << meta_dirty_shift) | 3 | new_gen_tag,
|
|
|
|
(0 << meta_dirty_shift) | 4 | new_gen_tag,
|
|
|
|
(0 << meta_dirty_shift) | 4 | new_gen_tag
|
|
|
|
};
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static ikptr
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
meta_alloc_extending(int size, gc_t* gc, int meta_id){
|
2006-11-23 19:42:39 -05:00
|
|
|
int mapsize = align_to_next_page(size);
|
|
|
|
if(mapsize < extension_amount[meta_id]){
|
|
|
|
mapsize = extension_amount[meta_id];
|
|
|
|
}
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
meta_t* meta = &gc->meta[meta_id];
|
2006-11-23 19:42:39 -05:00
|
|
|
if((meta_id != meta_data) && meta->base){
|
2006-11-23 19:38:26 -05:00
|
|
|
qupages_t* p = ik_malloc(sizeof(qupages_t));
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr aq = meta->aq;
|
|
|
|
ikptr ap = meta->ap;
|
|
|
|
ikptr ep = meta->ep;
|
2006-11-23 19:42:39 -05:00
|
|
|
p->p = aq;
|
|
|
|
p->q = ap;
|
|
|
|
p->next = gc->queues[meta_id];
|
|
|
|
gc->queues[meta_id] = p;
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr x = ap;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(x < ep){
|
|
|
|
ref(x, 0) = 0;
|
|
|
|
x += wordsize;
|
|
|
|
}
|
|
|
|
}
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr mem = ik_mmap_typed(
|
2006-11-23 19:42:39 -05:00
|
|
|
mapsize,
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
meta_mt[meta_id] | gc->collect_gen_tag,
|
2006-11-23 19:42:39 -05:00
|
|
|
gc->pcb);
|
|
|
|
gc->segment_vector = gc->pcb->segment_vector;
|
|
|
|
meta->ap = mem + size;
|
|
|
|
meta->aq = mem;
|
|
|
|
meta->ep = mem + mapsize;
|
|
|
|
meta->base = mem;
|
|
|
|
return mem;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static inline ikptr
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
meta_alloc(int size, gc_t* gc, int meta_id){
|
2006-11-23 19:38:26 -05:00
|
|
|
assert(size == align(size));
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
meta_t* meta = &gc->meta[meta_id];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr ap = meta->ap;
|
|
|
|
ikptr ep = meta->ep;
|
|
|
|
ikptr nap = ap + size;
|
2006-11-23 19:42:39 -05:00
|
|
|
if(nap > ep){
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
return meta_alloc_extending(size, gc, meta_id);
|
2006-11-23 19:38:26 -05:00
|
|
|
} else {
|
2006-11-23 19:42:39 -05:00
|
|
|
meta->ap = nap;
|
2006-11-23 19:38:26 -05:00
|
|
|
return ap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static inline ikptr
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
gc_alloc_new_ptr(int size, gc_t* gc){
|
2006-11-23 19:38:26 -05:00
|
|
|
assert(size == align(size));
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
return meta_alloc(size, gc, meta_ptrs);
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static inline ikptr
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
gc_alloc_new_large_ptr(int size, gc_t* gc){
|
2007-08-30 12:01:54 -04:00
|
|
|
int memreq = align_to_next_page(size);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr mem =
|
2007-08-30 12:01:54 -04:00
|
|
|
ik_mmap_typed(memreq,
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
pointers_mt | large_object_tag | gc->collect_gen_tag,
|
2007-08-30 12:01:54 -04:00
|
|
|
gc->pcb);
|
|
|
|
gc->segment_vector = gc->pcb->segment_vector;
|
|
|
|
qupages_t* p = ik_malloc(sizeof(qupages_t));
|
|
|
|
p->p = mem;
|
|
|
|
p->q = mem+size;
|
2008-01-01 21:08:07 -05:00
|
|
|
bzero((char*)(long)(mem+size), memreq-size);
|
2007-08-30 12:01:54 -04:00
|
|
|
p->next = gc->queues[meta_ptrs];
|
|
|
|
gc->queues[meta_ptrs] = p;
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline void
|
2007-12-23 13:37:48 -05:00
|
|
|
enqueue_large_ptr(ikptr mem, int size, gc_t* gc){
|
2007-08-30 12:01:54 -04:00
|
|
|
int i = page_index(mem);
|
|
|
|
int j = page_index(mem+size-1);
|
|
|
|
while(i<=j){
|
|
|
|
gc->segment_vector[i] =
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
pointers_mt | large_object_tag | gc->collect_gen_tag;
|
2007-08-30 12:01:54 -04:00
|
|
|
i++;
|
|
|
|
}
|
|
|
|
qupages_t* p = ik_malloc(sizeof(qupages_t));
|
|
|
|
p->p = mem;
|
|
|
|
p->q = mem+size;
|
|
|
|
p->next = gc->queues[meta_ptrs];
|
|
|
|
gc->queues[meta_ptrs] = p;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static inline ikptr
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
gc_alloc_new_symbol_record(gc_t* gc){
|
2007-05-15 08:56:22 -04:00
|
|
|
assert(symbol_record_size == align(symbol_record_size));
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
return meta_alloc(symbol_record_size, gc, meta_symbol);
|
2007-05-15 08:56:22 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-02-25 21:29:28 -05:00
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static inline ikptr
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
gc_alloc_new_pair(gc_t* gc){
|
|
|
|
return meta_alloc(pair_size, gc, meta_pair);
|
2006-11-23 19:44:29 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static inline ikptr
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
gc_alloc_new_weak_pair(gc_t* gc){
|
|
|
|
meta_t* meta = &gc->meta[meta_weak];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr ap = meta->ap;
|
|
|
|
ikptr ep = meta->ep;
|
|
|
|
ikptr nap = ap + pair_size;
|
2006-11-23 19:44:29 -05:00
|
|
|
if(nap > ep){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr mem = ik_mmap_typed(
|
2006-11-23 19:44:29 -05:00
|
|
|
pagesize,
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
meta_mt[meta_weak] | gc->collect_gen_tag,
|
2006-11-23 19:44:29 -05:00
|
|
|
gc->pcb);
|
|
|
|
gc->segment_vector = gc->pcb->segment_vector;
|
|
|
|
meta->ap = mem + pair_size;
|
|
|
|
meta->aq = mem;
|
|
|
|
meta->ep = mem + pagesize;
|
|
|
|
meta->base = mem;
|
|
|
|
return mem;
|
|
|
|
} else {
|
|
|
|
meta->ap = nap;
|
|
|
|
return ap;
|
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static inline ikptr
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
gc_alloc_new_data(int size, gc_t* gc){
|
2006-11-23 19:42:39 -05:00
|
|
|
assert(size == align(size));
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
return meta_alloc(size, gc, meta_data);
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static inline ikptr
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
gc_alloc_new_code(int size, gc_t* gc){
|
2007-02-22 21:58:38 -05:00
|
|
|
assert(size == align(size));
|
2006-11-23 19:44:29 -05:00
|
|
|
if(size < pagesize){
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
return meta_alloc(size, gc, meta_code);
|
2006-11-23 19:44:29 -05:00
|
|
|
} else {
|
|
|
|
int memreq = align_to_next_page(size);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr mem = ik_mmap_code(memreq, gc->collect_gen, gc->pcb);
|
2006-11-23 19:44:29 -05:00
|
|
|
gc->segment_vector = gc->pcb->segment_vector;
|
|
|
|
qupages_t* p = ik_malloc(sizeof(qupages_t));
|
|
|
|
p->p = mem;
|
|
|
|
p->q = mem+size;
|
2008-01-01 21:08:07 -05:00
|
|
|
bzero((char*)(long)(mem+size), memreq-size);
|
2006-11-23 19:44:29 -05:00
|
|
|
p->next = gc->queues[meta_code];
|
|
|
|
gc->queues[meta_code] = p;
|
|
|
|
return mem;
|
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
|
|
|
|
2007-08-30 12:54:21 -04:00
|
|
|
static void
|
|
|
|
add_to_collect_count(ikpcb* pcb, int bytes){
|
|
|
|
int minor = bytes + pcb->allocation_count_minor;
|
|
|
|
while(minor >= most_bytes_in_minor){
|
|
|
|
minor -= most_bytes_in_minor;
|
|
|
|
pcb->allocation_count_major++;
|
|
|
|
}
|
|
|
|
pcb->allocation_count_minor = minor;
|
|
|
|
}
|
|
|
|
|
2007-08-30 12:01:54 -04:00
|
|
|
|
|
|
|
|
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
static void
|
2007-12-23 13:37:48 -05:00
|
|
|
gc_tconc_push_extending(gc_t* gc, ikptr tcbucket){
|
2006-11-23 19:42:39 -05:00
|
|
|
if(gc->tconc_base){
|
|
|
|
ikpages* p = ik_malloc(sizeof(ikpages));
|
|
|
|
p->base = gc->tconc_base;
|
|
|
|
p->size = pagesize;
|
|
|
|
p->next = gc->tconc_queue;
|
|
|
|
gc->tconc_queue = p;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr ap =
|
2007-06-28 18:32:16 -04:00
|
|
|
ik_mmap_typed(pagesize,
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
meta_mt[meta_ptrs] | gc->collect_gen_tag,
|
2007-06-28 18:32:16 -04:00
|
|
|
gc->pcb);
|
2007-08-30 12:54:21 -04:00
|
|
|
add_to_collect_count(gc->pcb, pagesize);
|
2007-06-28 18:32:16 -04:00
|
|
|
gc->segment_vector = gc->pcb->segment_vector;
|
2008-01-01 21:08:07 -05:00
|
|
|
bzero((char*)(long)ap, pagesize);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr nap = ap + 2*wordsize;
|
2006-11-23 19:42:39 -05:00
|
|
|
gc->tconc_base = ap;
|
|
|
|
gc->tconc_ap = nap;
|
|
|
|
gc->tconc_ep = ap + pagesize;
|
|
|
|
ref(ap,0) = tcbucket;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
static inline void
|
2007-12-23 13:37:48 -05:00
|
|
|
gc_tconc_push(gc_t* gc, ikptr tcbucket){
|
|
|
|
ikptr ap = gc->tconc_ap;
|
|
|
|
ikptr nap = ap + 2*wordsize;
|
2006-11-23 19:42:39 -05:00
|
|
|
if(nap > gc->tconc_ep){
|
|
|
|
gc_tconc_push_extending(gc, tcbucket);
|
|
|
|
} else {
|
|
|
|
gc->tconc_ap = nap;
|
|
|
|
ref(ap,0) = tcbucket;
|
|
|
|
}
|
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2006-12-16 19:00:34 -05:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2007-12-23 13:37:48 -05:00
|
|
|
static ikptr add_object_proc(gc_t* gc, ikptr x, char* caller);
|
2006-12-16 19:00:34 -05:00
|
|
|
#define add_object(gc,x,caller) add_object_proc(gc,x,caller)
|
|
|
|
#else
|
2007-12-23 13:37:48 -05:00
|
|
|
static ikptr add_object_proc(gc_t* gc, ikptr x);
|
2006-12-16 19:00:34 -05:00
|
|
|
#define add_object(gc,x,caller) add_object_proc(gc,x)
|
|
|
|
#endif
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static void collect_stack(gc_t*, ikptr top, ikptr base);
|
2006-11-23 19:38:26 -05:00
|
|
|
static void collect_loop(gc_t*);
|
2006-11-23 19:42:39 -05:00
|
|
|
static void fix_weak_pointers(gc_t*);
|
|
|
|
static void gc_add_tconcs(gc_t*);
|
2006-11-23 19:38:26 -05:00
|
|
|
|
|
|
|
/* ik_collect is called from scheme under the following conditions:
|
|
|
|
* 1. An attempt is made to allocate a small object and the ap is above
|
|
|
|
* the red line.
|
|
|
|
* 2. The current frame of the call is dead, so, upon return from ik_collect,
|
|
|
|
* the caller returns to its caller.
|
|
|
|
* 3. The frame-pointer of the caller to S_collect is saved at
|
|
|
|
* pcb->frame_pointer. No variables are live at that frame except for
|
|
|
|
* the return point (at *(pcb->frame_pointer)).
|
|
|
|
* 4. S_collect must return a new ap (in pcb->allocation_pointer) that has
|
|
|
|
* at least 2 pages of memory free.
|
|
|
|
* 5. S_collect must also update pcb->allocaton_redline to be 2 pages below
|
|
|
|
* the real end of heap.
|
|
|
|
* 6. ik_collect should not move the stack.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
ikpcb* ik_collect_vararg(int req, ikpcb* pcb){
|
|
|
|
return ik_collect(req, pcb);
|
|
|
|
}
|
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
static int collection_id_to_gen(int id){
|
|
|
|
if((id & 255) == 255) { return 4; }
|
|
|
|
if((id & 63) == 63) { return 3; }
|
|
|
|
if((id & 15) == 15) { return 2; }
|
|
|
|
if((id & 3) == 3) { return 1; }
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-11-23 19:44:29 -05:00
|
|
|
|
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
static void scan_dirty_pages(gc_t*);
|
|
|
|
|
|
|
|
static void deallocate_unused_pages(gc_t*);
|
|
|
|
|
|
|
|
static void fix_new_pages(gc_t* gc);
|
|
|
|
|
2007-02-22 21:58:38 -05:00
|
|
|
extern void verify_integrity(ikpcb* pcb, char*);
|
2006-11-23 19:42:39 -05:00
|
|
|
|
2007-08-30 12:54:21 -04:00
|
|
|
|
2006-11-23 19:38:26 -05:00
|
|
|
ikpcb*
|
2006-12-06 10:08:34 -05:00
|
|
|
ik_collect(int mem_req, ikpcb* pcb){
|
2007-02-22 21:58:38 -05:00
|
|
|
#ifndef NDEBUG
|
|
|
|
verify_integrity(pcb, "entry");
|
|
|
|
#endif
|
2007-09-06 22:45:20 -04:00
|
|
|
|
2006-12-29 05:45:30 -05:00
|
|
|
{ /* ACCOUNTING */
|
2008-01-01 04:24:36 -05:00
|
|
|
long int bytes = ((long int)pcb->allocation_pointer) -
|
|
|
|
((long int)pcb->heap_base);
|
2007-08-30 12:54:21 -04:00
|
|
|
add_to_collect_count(pcb, bytes);
|
2006-12-29 05:45:30 -05:00
|
|
|
}
|
|
|
|
|
2006-11-23 19:44:29 -05:00
|
|
|
struct rusage t0, t1;
|
2007-08-30 12:54:21 -04:00
|
|
|
struct timeval rt0, rt1;
|
|
|
|
gettimeofday(&rt0, 0);
|
2006-11-23 19:44:29 -05:00
|
|
|
getrusage(RUSAGE_SELF, &t0);
|
|
|
|
|
2007-08-30 21:58:24 -04:00
|
|
|
pcb->collect_key = false_object;
|
2006-11-23 19:38:26 -05:00
|
|
|
gc_t gc;
|
2006-11-23 19:42:39 -05:00
|
|
|
bzero(&gc, sizeof(gc_t));
|
|
|
|
gc.pcb = pcb;
|
|
|
|
gc.segment_vector = pcb->segment_vector;
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
gc.collect_gen = collection_id_to_gen(pcb->collection_id);
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
gc.collect_gen_tag = next_gen_tag[gc.collect_gen];
|
2006-11-23 19:42:39 -05:00
|
|
|
pcb->collection_id++;
|
|
|
|
#ifndef NDEBUG
|
2006-11-23 19:44:29 -05:00
|
|
|
fprintf(stderr, "ik_collect entry %d free=%d (collect gen=%d/id=%d)\n",
|
2006-12-06 10:08:34 -05:00
|
|
|
mem_req,
|
2006-11-23 19:44:29 -05:00
|
|
|
(unsigned int) pcb->allocation_redline
|
|
|
|
- (unsigned int) pcb->allocation_pointer,
|
|
|
|
gc.collect_gen, pcb->collection_id-1);
|
2006-11-23 19:42:39 -05:00
|
|
|
#endif
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2007-01-19 18:13:44 -05:00
|
|
|
/* cache heap-pages to delete later */
|
|
|
|
ikpages* old_heap_pages = pcb->heap_pages;
|
|
|
|
pcb->heap_pages = 0;
|
2006-11-23 19:38:26 -05:00
|
|
|
|
|
|
|
/* the roots are:
|
2006-11-23 19:42:39 -05:00
|
|
|
* 0. dirty pages not collected in this run
|
2006-11-23 19:38:26 -05:00
|
|
|
* 1. the stack
|
|
|
|
* 2. the next continuation
|
2006-12-25 01:28:53 -05:00
|
|
|
* 3. the symbol-table
|
2006-11-23 19:38:26 -05:00
|
|
|
*/
|
2007-01-19 18:13:44 -05:00
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
scan_dirty_pages(&gc);
|
2007-09-06 22:45:20 -04:00
|
|
|
|
2006-11-23 19:38:26 -05:00
|
|
|
collect_stack(&gc, pcb->frame_pointer, pcb->frame_base - wordsize);
|
2006-12-01 09:52:12 -05:00
|
|
|
pcb->next_k = add_object(&gc, pcb->next_k, "next_k");
|
2006-12-25 01:28:53 -05:00
|
|
|
pcb->symbol_table = add_object(&gc, pcb->symbol_table, "symbol_table");
|
2006-12-25 02:25:40 -05:00
|
|
|
pcb->gensym_table = add_object(&gc, pcb->gensym_table, "gensym_table");
|
2006-12-16 19:00:34 -05:00
|
|
|
pcb->arg_list = add_object(&gc, pcb->arg_list, "args_list_foo");
|
2007-05-05 22:42:26 -04:00
|
|
|
pcb->base_rtd = add_object(&gc, pcb->base_rtd, "base_rtd");
|
2007-11-18 10:37:13 -05:00
|
|
|
if(pcb->root0) *(pcb->root0) = add_object(&gc, *(pcb->root0), "root0");
|
|
|
|
if(pcb->root1) *(pcb->root1) = add_object(&gc, *(pcb->root1), "root1");
|
|
|
|
|
2006-11-23 19:38:26 -05:00
|
|
|
/* now we trace all live objects */
|
|
|
|
collect_loop(&gc);
|
2006-12-18 15:08:33 -05:00
|
|
|
|
|
|
|
/* next we trace all guardian/guarded objects,
|
|
|
|
the procedure does a collect_loop at the end */
|
2008-01-12 17:32:43 -05:00
|
|
|
handle_guardians(&gc);
|
2007-08-31 23:28:19 -04:00
|
|
|
#ifndef NDEBUG
|
|
|
|
fprintf(stderr, "done\n");
|
|
|
|
#endif
|
|
|
|
collect_loop(&gc);
|
|
|
|
|
2006-12-18 15:08:33 -05:00
|
|
|
/* does not allocate, only bwp's dead pointers */
|
|
|
|
fix_weak_pointers(&gc);
|
2006-11-23 19:42:39 -05:00
|
|
|
/* now deallocate all unused pages */
|
|
|
|
deallocate_unused_pages(&gc);
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
fix_new_pages(&gc);
|
2008-01-12 17:32:43 -05:00
|
|
|
gc_finalize_guardians(&gc);
|
|
|
|
|
2006-11-23 19:38:26 -05:00
|
|
|
pcb->allocation_pointer = pcb->heap_base;
|
2007-08-31 23:28:19 -04:00
|
|
|
/* does not allocate */
|
2006-11-23 19:42:39 -05:00
|
|
|
gc_add_tconcs(&gc);
|
2007-08-31 23:28:19 -04:00
|
|
|
/* does not allocate */
|
|
|
|
#ifndef NDEBUG
|
|
|
|
fprintf(stderr, "done\n");
|
|
|
|
#endif
|
2006-11-23 19:42:39 -05:00
|
|
|
pcb->weak_pairs_ap = 0;
|
|
|
|
pcb->weak_pairs_ep = 0;
|
2006-11-23 19:40:06 -05:00
|
|
|
|
2006-12-16 19:00:34 -05:00
|
|
|
#if accounting
|
2006-11-23 19:40:06 -05:00
|
|
|
fprintf(stderr,
|
|
|
|
"[%d cons|%d sym|%d cls|%d vec|%d rec|%d cck|%d str|%d htb]\n",
|
|
|
|
pair_count,
|
|
|
|
symbol_count,
|
|
|
|
closure_count,
|
|
|
|
vector_count,
|
|
|
|
record_count,
|
|
|
|
continuation_count,
|
|
|
|
string_count,
|
|
|
|
htable_count);
|
|
|
|
pair_count = 0;
|
|
|
|
symbol_count = 0;
|
|
|
|
closure_count = 0;
|
|
|
|
vector_count = 0;
|
|
|
|
record_count = 0;
|
|
|
|
continuation_count = 0;
|
|
|
|
string_count = 0;
|
|
|
|
htable_count = 0;
|
2006-12-16 19:00:34 -05:00
|
|
|
#endif
|
2006-11-23 19:42:39 -05:00
|
|
|
//ik_dump_metatable(pcb);
|
2006-11-23 19:44:29 -05:00
|
|
|
#ifndef NDEBUG
|
|
|
|
fprintf(stderr, "collect done\n");
|
|
|
|
#endif
|
2007-08-30 12:54:21 -04:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2006-12-06 10:08:34 -05:00
|
|
|
/* delete all old heap pages */
|
2007-01-19 18:13:44 -05:00
|
|
|
if(old_heap_pages){
|
|
|
|
ikpages* p = old_heap_pages;
|
2006-12-06 10:08:34 -05:00
|
|
|
do{
|
|
|
|
ikpages* next = p->next;
|
|
|
|
ik_munmap_from_segment(p->base, p->size, pcb);
|
|
|
|
ik_free(p, sizeof(ikpages));
|
|
|
|
p=next;
|
|
|
|
} while(p);
|
2007-01-19 18:13:44 -05:00
|
|
|
old_heap_pages = 0;
|
2006-12-06 10:08:34 -05:00
|
|
|
}
|
|
|
|
|
2008-01-01 04:24:36 -05:00
|
|
|
long int free_space =
|
|
|
|
((unsigned long int)pcb->allocation_redline) -
|
|
|
|
((unsigned long int)pcb->allocation_pointer);
|
2007-12-20 03:51:43 -05:00
|
|
|
if((free_space <= mem_req) || (pcb->heap_size < IK_HEAPSIZE)){
|
2006-12-06 10:08:34 -05:00
|
|
|
#ifndef NDEBUG
|
|
|
|
fprintf(stderr, "REQ=%d, got %d\n", mem_req, free_space);
|
|
|
|
#endif
|
2008-01-01 04:24:36 -05:00
|
|
|
long int memsize = (mem_req > IK_HEAPSIZE) ? mem_req : IK_HEAPSIZE;
|
2007-09-15 01:54:45 -04:00
|
|
|
memsize = align_to_next_page(memsize);
|
2006-12-06 10:08:34 -05:00
|
|
|
ik_munmap_from_segment(
|
|
|
|
pcb->heap_base,
|
|
|
|
pcb->heap_size,
|
|
|
|
pcb);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr ptr = ik_mmap_mixed(memsize+2*pagesize, pcb);
|
2006-12-06 10:08:34 -05:00
|
|
|
pcb->allocation_pointer = ptr;
|
|
|
|
pcb->allocation_redline = ptr+memsize;
|
|
|
|
pcb->heap_base = ptr;
|
|
|
|
pcb->heap_size = memsize+2*pagesize;
|
|
|
|
}
|
2006-11-23 19:44:29 -05:00
|
|
|
|
2006-12-30 14:52:37 -05:00
|
|
|
#ifndef NDEBUG
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr x = pcb->allocation_pointer;
|
2006-12-30 14:52:37 -05:00
|
|
|
while(x < pcb->allocation_redline){
|
2007-12-23 13:37:48 -05:00
|
|
|
ref(x, 0) = (ikptr)(0x1234FFFF);
|
2006-12-30 14:52:37 -05:00
|
|
|
x+=wordsize;
|
|
|
|
}
|
2007-02-22 21:58:38 -05:00
|
|
|
#endif
|
|
|
|
#ifndef NDEBUG
|
|
|
|
verify_integrity(pcb, "exit");
|
2006-12-30 14:52:37 -05:00
|
|
|
#endif
|
2007-08-30 12:54:21 -04:00
|
|
|
|
|
|
|
getrusage(RUSAGE_SELF, &t1);
|
|
|
|
gettimeofday(&rt1, 0);
|
|
|
|
|
|
|
|
pcb->collect_utime.tv_usec += t1.ru_utime.tv_usec - t0.ru_utime.tv_usec;
|
|
|
|
pcb->collect_utime.tv_sec += t1.ru_utime.tv_sec - t0.ru_utime.tv_sec;
|
|
|
|
if (pcb->collect_utime.tv_usec >= 1000000){
|
|
|
|
pcb->collect_utime.tv_usec -= 1000000;
|
|
|
|
pcb->collect_utime.tv_sec += 1;
|
|
|
|
}
|
|
|
|
else if (pcb->collect_utime.tv_usec < 0){
|
|
|
|
pcb->collect_utime.tv_usec += 1000000;
|
|
|
|
pcb->collect_utime.tv_sec -= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
pcb->collect_stime.tv_usec += t1.ru_stime.tv_usec - t0.ru_stime.tv_usec;
|
|
|
|
pcb->collect_stime.tv_sec += t1.ru_stime.tv_sec - t0.ru_stime.tv_sec;
|
|
|
|
if (pcb->collect_stime.tv_usec >= 1000000){
|
|
|
|
pcb->collect_stime.tv_usec -= 1000000;
|
|
|
|
pcb->collect_stime.tv_sec += 1;
|
|
|
|
}
|
|
|
|
else if (pcb->collect_stime.tv_usec < 0){
|
|
|
|
pcb->collect_stime.tv_usec += 1000000;
|
|
|
|
pcb->collect_stime.tv_sec -= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
pcb->collect_rtime.tv_usec += rt1.tv_usec - rt0.tv_usec;
|
|
|
|
pcb->collect_rtime.tv_sec += rt1.tv_sec - rt0.tv_sec;
|
|
|
|
if (pcb->collect_rtime.tv_usec >= 1000000){
|
|
|
|
pcb->collect_rtime.tv_usec -= 1000000;
|
|
|
|
pcb->collect_rtime.tv_sec += 1;
|
|
|
|
}
|
|
|
|
else if (pcb->collect_rtime.tv_usec < 0){
|
|
|
|
pcb->collect_rtime.tv_usec += 1000000;
|
|
|
|
pcb->collect_rtime.tv_sec -= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-11-23 19:38:26 -05:00
|
|
|
return pcb;
|
|
|
|
}
|
|
|
|
|
2006-12-19 11:41:13 -05:00
|
|
|
static inline int
|
2007-12-23 13:37:48 -05:00
|
|
|
is_live(ikptr x, gc_t* gc){
|
2006-12-19 11:41:13 -05:00
|
|
|
if(is_fixnum(x)){
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
int tag = tagof(x);
|
|
|
|
if(tag == immediate_tag){
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if(ref(x, -tag) == forward_ptr){
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
unsigned int t = gc->segment_vector[page_index(x)];
|
|
|
|
int gen = t & gen_mask;
|
|
|
|
if(gen > gc->collect_gen){
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
next_gen(int i){
|
|
|
|
return ((i == generation_count) ? generation_count : (i+1));
|
|
|
|
}
|
|
|
|
|
2008-01-12 17:32:43 -05:00
|
|
|
|
|
|
|
static ik_ptr_page*
|
|
|
|
move_tconc(ikptr tc, ik_ptr_page* ls){
|
|
|
|
if((ls == NULL) || (ls->count == ik_ptr_page_size)){
|
|
|
|
ik_ptr_page* page = (ik_ptr_page*)ik_mmap(pagesize);
|
|
|
|
page->count = 0;
|
|
|
|
page->next = ls;
|
|
|
|
ls = page;
|
|
|
|
}
|
|
|
|
ls->ptr[ls->count++] = tc;
|
|
|
|
return ls;
|
2007-09-01 01:00:35 -04:00
|
|
|
}
|
|
|
|
|
2008-01-12 17:32:43 -05:00
|
|
|
static void
|
|
|
|
handle_guardians(gc_t* gc){
|
2007-09-01 20:14:05 -04:00
|
|
|
ikpcb* pcb = gc->pcb;
|
2008-01-12 17:32:43 -05:00
|
|
|
ik_ptr_page* pend_hold_list = 0;
|
|
|
|
ik_ptr_page* pend_final_list = 0;
|
|
|
|
int gen;
|
|
|
|
/* sort protected pairs into pend_hold and pend_final lists */
|
2007-09-01 20:14:05 -04:00
|
|
|
for(gen=0; gen<=gc->collect_gen; gen++){
|
2008-01-12 17:32:43 -05:00
|
|
|
ik_ptr_page* prot_list = pcb->protected_list[gen];
|
|
|
|
pcb->protected_list[gen] = 0;
|
|
|
|
while(prot_list){
|
2007-09-01 01:00:35 -04:00
|
|
|
int i;
|
2008-01-12 17:32:43 -05:00
|
|
|
for(i=0; i<prot_list->count; i++){
|
|
|
|
ikptr p = prot_list->ptr[i];
|
|
|
|
ikptr tc = ref(p, off_car);
|
|
|
|
ikptr obj = ref(p, off_cdr);
|
|
|
|
if(tc == forward_ptr){
|
|
|
|
ikptr np = ref(p, off_cdr);
|
|
|
|
tc = ref(np, off_car);
|
|
|
|
obj = ref(np, off_cdr);
|
|
|
|
}
|
|
|
|
if(is_live(obj, gc)){
|
|
|
|
pend_hold_list = move_tconc(p, pend_hold_list);
|
2007-09-01 01:00:35 -04:00
|
|
|
} else {
|
2008-01-12 17:32:43 -05:00
|
|
|
pend_final_list = move_tconc(p, pend_final_list);
|
2007-09-01 01:00:35 -04:00
|
|
|
}
|
2007-08-31 23:28:19 -04:00
|
|
|
}
|
2008-01-12 17:32:43 -05:00
|
|
|
ik_ptr_page* next = prot_list->next;
|
|
|
|
ik_munmap((ikptr)prot_list, pagesize);
|
|
|
|
prot_list = next;
|
2007-08-31 23:28:19 -04:00
|
|
|
}
|
|
|
|
}
|
2008-01-12 17:32:43 -05:00
|
|
|
/* move live tc pend_final_list pairs into final_list,
|
|
|
|
the rest remain in pend_final_list,
|
|
|
|
final_list objects are made live and collected in
|
|
|
|
gc->forward_list */
|
|
|
|
gc->forward_list = 0;
|
|
|
|
int done = 0;
|
|
|
|
while(!done){
|
|
|
|
ik_ptr_page* final_list = 0;
|
|
|
|
ik_ptr_page* ls = pend_final_list;
|
|
|
|
pend_final_list = 0;
|
|
|
|
while(ls){
|
2007-09-01 01:00:35 -04:00
|
|
|
int i;
|
2008-01-12 17:32:43 -05:00
|
|
|
for(i=0; i<ls->count; i++){
|
|
|
|
ikptr p = ls->ptr[i];
|
|
|
|
ikptr tc = ref(p, off_car);
|
|
|
|
if(tc == forward_ptr){
|
|
|
|
ikptr np = ref(p, off_cdr);
|
|
|
|
tc = ref(np, off_car);
|
|
|
|
}
|
2007-09-01 20:14:05 -04:00
|
|
|
if(is_live(tc, gc)){
|
2008-01-12 17:32:43 -05:00
|
|
|
final_list = move_tconc(p, final_list);
|
2007-09-01 20:14:05 -04:00
|
|
|
} else {
|
2008-01-12 17:32:43 -05:00
|
|
|
pend_final_list = move_tconc(p, pend_final_list);
|
2007-09-01 20:14:05 -04:00
|
|
|
}
|
2007-09-01 01:00:35 -04:00
|
|
|
}
|
2008-01-12 17:32:43 -05:00
|
|
|
ik_ptr_page* next = ls->next;
|
|
|
|
ik_munmap((ikptr)ls, pagesize);
|
|
|
|
ls = next;
|
2007-09-01 01:00:35 -04:00
|
|
|
}
|
2008-01-12 17:32:43 -05:00
|
|
|
if(final_list == NULL){
|
|
|
|
done = 1;
|
|
|
|
} else {
|
|
|
|
ls = final_list;
|
|
|
|
while(ls){
|
|
|
|
int i;
|
|
|
|
for(i=0; i<ls->count; i++){
|
|
|
|
ikptr p = ls->ptr[i];
|
|
|
|
gc->forward_list =
|
|
|
|
move_tconc(add_object(gc, p, "guardian"),
|
|
|
|
gc->forward_list);
|
2007-09-01 20:14:05 -04:00
|
|
|
}
|
2008-01-12 17:32:43 -05:00
|
|
|
ik_ptr_page* next = ls->next;
|
|
|
|
ik_munmap((ikptr)ls, pagesize);
|
|
|
|
ls = next;
|
2007-09-01 01:00:35 -04:00
|
|
|
}
|
2007-09-01 20:14:05 -04:00
|
|
|
collect_loop(gc);
|
2007-08-31 23:28:19 -04:00
|
|
|
}
|
|
|
|
}
|
2008-01-12 17:32:43 -05:00
|
|
|
/* pend_final_list now contains things that are dead and
|
|
|
|
their tconcs are also dead, deallocate */
|
|
|
|
while(pend_final_list){
|
|
|
|
ik_ptr_page* next = pend_final_list->next;
|
|
|
|
ik_munmap((ikptr)pend_final_list, pagesize);
|
|
|
|
pend_final_list = next;
|
|
|
|
}
|
|
|
|
/* pend_hold_list pairs with live tconcs are moved to
|
|
|
|
the protected list of next generation. */
|
|
|
|
ik_ptr_page* target = pcb->protected_list[next_gen(gc->collect_gen)];
|
|
|
|
while(pend_hold_list){
|
|
|
|
int i;
|
|
|
|
for(i=0; i<pend_hold_list->count; i++){
|
|
|
|
ikptr p = pend_hold_list->ptr[i];
|
|
|
|
ikptr tc = ref(p, off_car);
|
|
|
|
if(tc == forward_ptr){
|
|
|
|
ikptr np = ref(p, off_cdr);
|
|
|
|
tc = ref(np, off_car);
|
2007-08-31 23:28:19 -04:00
|
|
|
}
|
2008-01-12 17:32:43 -05:00
|
|
|
if(is_live(tc, gc)){
|
|
|
|
target = move_tconc(add_object(gc, p, "guardian"), target);
|
2006-12-19 11:41:13 -05:00
|
|
|
}
|
|
|
|
}
|
2008-01-12 17:32:43 -05:00
|
|
|
ik_ptr_page* next = pend_hold_list->next;
|
|
|
|
ik_munmap((ikptr)pend_hold_list, pagesize);
|
|
|
|
pend_hold_list = next;
|
2006-12-19 11:41:13 -05:00
|
|
|
}
|
2007-09-01 20:14:05 -04:00
|
|
|
collect_loop(gc);
|
2008-01-12 17:32:43 -05:00
|
|
|
pcb->protected_list[next_gen(gc->collect_gen)] = target;
|
2007-09-01 20:14:05 -04:00
|
|
|
}
|
2006-12-19 11:41:13 -05:00
|
|
|
|
2008-01-12 17:32:43 -05:00
|
|
|
static void
|
|
|
|
gc_finalize_guardians(gc_t* gc){
|
|
|
|
ik_ptr_page* ls = gc->forward_list;
|
|
|
|
int tconc_count = 0;
|
|
|
|
unsigned int* dirty_vec = (unsigned int*)(long)gc->pcb->dirty_vector;
|
|
|
|
while(ls){
|
|
|
|
int i;
|
|
|
|
for(i=0; i<ls->count; i++){
|
|
|
|
tconc_count++;
|
|
|
|
ikptr p = ls->ptr[i];
|
|
|
|
ikptr tc = ref(p, off_car);
|
|
|
|
ikptr obj = ref(p, off_cdr);
|
|
|
|
ikptr last_pair = ref(tc, off_cdr);
|
|
|
|
ref(last_pair, off_car) = obj;
|
|
|
|
ref(last_pair, off_cdr) = p;
|
|
|
|
ref(p, off_car) = false_object;
|
|
|
|
ref(p, off_cdr) = false_object;
|
|
|
|
ref(tc, off_cdr) = p;
|
|
|
|
dirty_vec[page_index(tc)] = -1;
|
|
|
|
dirty_vec[page_index(last_pair)] = -1;
|
2006-12-19 11:41:13 -05:00
|
|
|
}
|
2008-01-12 17:32:43 -05:00
|
|
|
ik_ptr_page* next = ls->next;
|
|
|
|
ik_munmap((ikptr)ls, pagesize);
|
|
|
|
ls = next;
|
2006-12-19 11:41:13 -05:00
|
|
|
}
|
2006-12-18 15:08:33 -05:00
|
|
|
}
|
2006-12-19 11:41:13 -05:00
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
|
|
|
|
static int alloc_code_count = 0;
|
|
|
|
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static ikptr
|
|
|
|
add_code_entry(gc_t* gc, ikptr entry){
|
|
|
|
ikptr x = entry - disp_code_data;
|
2006-11-23 19:42:39 -05:00
|
|
|
if(ref(x,0) == forward_ptr){
|
|
|
|
return ref(x,wordsize) + off_code_data;
|
|
|
|
}
|
|
|
|
int idx = page_index(x);
|
|
|
|
unsigned int t = gc->segment_vector[idx];
|
|
|
|
int gen = t & gen_mask;
|
|
|
|
if(gen > gc->collect_gen){
|
2006-11-23 19:38:26 -05:00
|
|
|
return entry;
|
|
|
|
}
|
2006-11-23 19:44:29 -05:00
|
|
|
int code_size = unfix(ref(x, disp_code_code_size));
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr reloc_vec = ref(x, disp_code_reloc_vector);
|
|
|
|
ikptr freevars = ref(x, disp_code_freevars);
|
|
|
|
ikptr annotation = ref(x, disp_code_annotation);
|
2006-11-23 19:44:29 -05:00
|
|
|
int required_mem = align(disp_code_data + code_size);
|
|
|
|
if(required_mem >= pagesize){
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
int new_tag = gc->collect_gen_tag;
|
2006-11-23 19:44:29 -05:00
|
|
|
int idx = page_index(x);
|
|
|
|
gc->segment_vector[idx] = new_tag | code_mt;
|
|
|
|
int i;
|
|
|
|
for(i=pagesize, idx++; i<required_mem; i+=pagesize, idx++){
|
|
|
|
gc->segment_vector[idx] = new_tag | data_mt;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2006-11-23 19:44:29 -05:00
|
|
|
qupages_t* p = ik_malloc(sizeof(qupages_t));
|
|
|
|
p->p = x;
|
|
|
|
p->q = x+required_mem;
|
|
|
|
p->next = gc->queues[meta_code];
|
|
|
|
gc->queues[meta_code] = p;
|
|
|
|
return entry;
|
|
|
|
} else {
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_code(required_mem, gc);
|
2006-11-23 19:44:29 -05:00
|
|
|
ref(y, 0) = code_tag;
|
|
|
|
ref(y, disp_code_code_size) = fix(code_size);
|
|
|
|
ref(y, disp_code_reloc_vector) = reloc_vec;
|
|
|
|
ref(y, disp_code_freevars) = freevars;
|
2007-09-04 19:59:14 -04:00
|
|
|
ref(y, disp_code_annotation) = annotation;
|
2008-01-01 21:08:07 -05:00
|
|
|
memcpy((char*)(long)(y+disp_code_data),
|
|
|
|
(char*)(long)(x+disp_code_data),
|
|
|
|
code_size);
|
2006-11-23 19:44:29 -05:00
|
|
|
ref(x, 0) = forward_ptr;
|
|
|
|
ref(x, wordsize) = y + vector_tag;
|
|
|
|
return y+disp_code_data;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-23 19:44:29 -05:00
|
|
|
|
2006-11-23 19:38:26 -05:00
|
|
|
#define DEBUG_STACK 0
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static void collect_stack(gc_t* gc, ikptr top, ikptr end){
|
2006-11-23 19:38:26 -05:00
|
|
|
if(DEBUG_STACK){
|
2008-01-01 04:24:36 -05:00
|
|
|
fprintf(stderr, "collecting stack from 0x%016lx .. 0x%016lx\n",
|
2008-01-01 21:08:07 -05:00
|
|
|
(long) top, (long) end);
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
while(top < end){
|
|
|
|
if(DEBUG_STACK){
|
2008-01-01 21:08:07 -05:00
|
|
|
fprintf(stderr, "collecting frame at 0x%016lx: ", (long) top);
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr rp = ref(top, 0);
|
2008-01-01 04:24:36 -05:00
|
|
|
long int rp_offset = unfix(ref(rp, disp_frame_offset));
|
2006-11-23 19:38:26 -05:00
|
|
|
if(DEBUG_STACK){
|
2008-01-01 21:08:07 -05:00
|
|
|
fprintf(stderr, "rp_offset=%ld\n", (long)rp_offset);
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
if(rp_offset <= 0){
|
2008-01-01 21:08:07 -05:00
|
|
|
fprintf(stderr, "invalid rp_offset %ld\n", (long)rp_offset);
|
2006-11-23 19:38:26 -05:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
/* since the return point is alive, we need to find the code
|
|
|
|
* object containing it and mark it live as well. the rp is
|
|
|
|
* updated to reflect the new code object. */
|
|
|
|
|
2008-01-01 04:24:36 -05:00
|
|
|
long int code_offset = rp_offset - disp_frame_offset;
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr code_entry = rp - code_offset;
|
|
|
|
ikptr new_code_entry = add_code_entry(gc, code_entry);
|
|
|
|
ikptr new_rp = new_code_entry + code_offset;
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(top, 0) = new_rp;
|
|
|
|
|
|
|
|
/* now for some livemask action.
|
|
|
|
* every return point has a live mark above it. the live mask
|
|
|
|
* is a sequence of bytes (every byte for 8 frame cells). the
|
|
|
|
* size of the live mask is determined by the size of the frame.
|
|
|
|
* this is how the call frame instruction sequence looks like:
|
|
|
|
*
|
|
|
|
* | ... |
|
|
|
|
* | code junk |
|
|
|
|
* +------------+
|
|
|
|
* | byte 0 | for fv0 .. fv7
|
|
|
|
* | byte 1 | for fv8 .. fv15
|
|
|
|
* | ... | ...
|
|
|
|
* +------------+
|
|
|
|
* | framesize |
|
|
|
|
* | word |
|
|
|
|
* +------------+
|
|
|
|
* | multivalue |
|
|
|
|
* | word |
|
|
|
|
* +------------+
|
|
|
|
* | frameoffst | the frame offset determined how far its
|
|
|
|
* | word | address is off from the start of the code
|
|
|
|
* +------------+
|
|
|
|
* | padding | the size of this part is fixed so that we
|
|
|
|
* | and call | can correlate the frame info (above) with rp
|
|
|
|
* +------------+
|
|
|
|
* | code junk | <---- rp
|
|
|
|
* | ... |
|
|
|
|
*
|
|
|
|
* WITH ONE EXCEPTION:
|
|
|
|
* if the framesize is 0, then the actual frame size is stored
|
|
|
|
* on the stack immediately below the return point.
|
|
|
|
* there is no live mask in this case, instead all values in the
|
|
|
|
* frame are live.
|
|
|
|
*/
|
2008-01-01 21:08:07 -05:00
|
|
|
long int framesize = ref(rp, disp_frame_size);
|
2006-11-23 19:38:26 -05:00
|
|
|
if(DEBUG_STACK){
|
2008-01-01 21:08:07 -05:00
|
|
|
fprintf(stderr, "fs=%ld\n", (long)framesize);
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
if(framesize < 0){
|
2008-01-01 21:08:07 -05:00
|
|
|
fprintf(stderr, "invalid frame size %ld\n", (long)framesize);
|
2006-11-23 19:38:26 -05:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
else if(framesize == 0){
|
2008-01-01 21:08:07 -05:00
|
|
|
framesize = ref(top, wordsize);
|
2006-11-23 19:38:26 -05:00
|
|
|
if(framesize <= 0){
|
2008-01-01 21:08:07 -05:00
|
|
|
fprintf(stderr, "invalid redirected framesize=%ld\n", (long)framesize);
|
2006-11-23 19:38:26 -05:00
|
|
|
exit(-1);
|
|
|
|
}
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr base = top + framesize - wordsize;
|
2006-11-23 19:38:26 -05:00
|
|
|
while(base > top){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr new_obj = add_object(gc,ref(base,0), "frame");
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(base,0) = new_obj;
|
|
|
|
base -= wordsize;
|
|
|
|
}
|
|
|
|
} else {
|
2008-01-01 04:24:36 -05:00
|
|
|
long int frame_cells = framesize >> fx_shift;
|
|
|
|
long int bytes_in_mask = (frame_cells+7) >> 3;
|
2008-01-01 21:08:07 -05:00
|
|
|
char* mask = (char*)(long)(rp+disp_frame_size-bytes_in_mask);
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr* fp = (ikptr*)(long)(top + framesize);
|
2008-01-01 04:24:36 -05:00
|
|
|
long int i;
|
2006-11-23 19:38:26 -05:00
|
|
|
for(i=0; i<bytes_in_mask; i++, fp-=8){
|
|
|
|
unsigned char m = mask[i];
|
2006-11-29 17:06:16 -05:00
|
|
|
#if DEBUG_STACK
|
2006-11-29 18:45:13 -05:00
|
|
|
fprintf(stderr, "m[%d]=0x%x\n", i, m);
|
2006-11-29 17:06:16 -05:00
|
|
|
#endif
|
2006-11-29 18:45:13 -05:00
|
|
|
if(m & 0x01) { fp[-0] = add_object(gc, fp[-0], "frame0"); }
|
|
|
|
if(m & 0x02) { fp[-1] = add_object(gc, fp[-1], "frame1"); }
|
|
|
|
if(m & 0x04) { fp[-2] = add_object(gc, fp[-2], "frame2"); }
|
|
|
|
if(m & 0x08) { fp[-3] = add_object(gc, fp[-3], "frame3"); }
|
|
|
|
if(m & 0x10) { fp[-4] = add_object(gc, fp[-4], "frame4"); }
|
|
|
|
if(m & 0x20) { fp[-5] = add_object(gc, fp[-5], "frame5"); }
|
|
|
|
if(m & 0x40) { fp[-6] = add_object(gc, fp[-6], "frame6"); }
|
|
|
|
if(m & 0x80) { fp[-7] = add_object(gc, fp[-7], "frame7"); }
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
top += framesize;
|
|
|
|
}
|
|
|
|
if(top != end){
|
2008-01-01 04:24:36 -05:00
|
|
|
fprintf(stderr, "frames did not match up 0x%016lx .. 0x%016lx\n",
|
|
|
|
(long int) top, (long int) end);
|
2006-11-23 19:38:26 -05:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
if(DEBUG_STACK){
|
|
|
|
fprintf(stderr, "done with stack!\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-23 19:44:29 -05:00
|
|
|
|
|
|
|
static void
|
2007-12-23 13:37:48 -05:00
|
|
|
add_list(gc_t* gc, unsigned int t, ikptr x, ikptr* loc){
|
2006-11-23 19:44:29 -05:00
|
|
|
int collect_gen = gc->collect_gen;
|
|
|
|
while(1){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr fst = ref(x, off_car);
|
|
|
|
ikptr snd = ref(x, off_cdr);
|
|
|
|
ikptr y;
|
2006-11-23 19:44:29 -05:00
|
|
|
if((t & type_mask) != weak_pairs_type){
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
y = gc_alloc_new_pair(gc) + pair_tag;
|
2006-11-23 19:44:29 -05:00
|
|
|
} else {
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
y = gc_alloc_new_weak_pair(gc) + pair_tag;
|
2006-11-23 19:44:29 -05:00
|
|
|
}
|
|
|
|
*loc = y;
|
|
|
|
ref(x,off_car) = forward_ptr;
|
|
|
|
ref(x,off_cdr) = y;
|
|
|
|
ref(y,off_car) = fst;
|
|
|
|
int stag = tagof(snd);
|
|
|
|
if(stag == pair_tag){
|
|
|
|
if(ref(snd, -pair_tag) == forward_ptr){
|
|
|
|
ref(y, off_cdr) = ref(snd, wordsize-pair_tag);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
t = gc->segment_vector[page_index(snd)];
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
int gen = t & gen_mask;
|
2006-11-23 19:44:29 -05:00
|
|
|
if(gen > collect_gen){
|
|
|
|
ref(y, off_cdr) = snd;
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
x = snd;
|
2008-01-01 21:08:07 -05:00
|
|
|
loc = (ikptr*)(long)(y + off_cdr);
|
2006-11-23 19:44:29 -05:00
|
|
|
/* don't return */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if( (stag == immediate_tag)
|
|
|
|
|| (stag == 0)
|
|
|
|
|| (stag == (1<<fx_shift))) {
|
|
|
|
ref(y,off_cdr) = snd;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else if (ref(snd, -stag) == forward_ptr){
|
|
|
|
ref(y, off_cdr) = ref(snd, wordsize-stag);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else {
|
2006-11-29 17:06:16 -05:00
|
|
|
ref(y, off_cdr) = add_object(gc, snd, "add_list");
|
2006-11-23 19:44:29 -05:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-12-23 13:37:48 -05:00
|
|
|
static ikptr
|
2006-12-16 19:00:34 -05:00
|
|
|
#ifndef NDEBUG
|
2007-12-23 13:37:48 -05:00
|
|
|
add_object_proc(gc_t* gc, ikptr x, char* caller)
|
2006-12-16 19:00:34 -05:00
|
|
|
#else
|
2007-12-23 13:37:48 -05:00
|
|
|
add_object_proc(gc_t* gc, ikptr x)
|
2006-12-16 19:00:34 -05:00
|
|
|
#endif
|
|
|
|
{
|
2006-11-23 19:38:26 -05:00
|
|
|
if(is_fixnum(x)){
|
|
|
|
return x;
|
|
|
|
}
|
2007-02-22 21:58:38 -05:00
|
|
|
assert(x != forward_ptr);
|
2006-11-23 19:38:26 -05:00
|
|
|
int tag = tagof(x);
|
|
|
|
if(tag == immediate_tag){
|
|
|
|
return x;
|
|
|
|
}
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr fst = ref(x, -tag);
|
2006-11-23 19:38:26 -05:00
|
|
|
if(fst == forward_ptr){
|
|
|
|
/* already moved */
|
|
|
|
return ref(x, wordsize-tag);
|
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
unsigned int t = gc->segment_vector[page_index(x)];
|
|
|
|
int gen = t & gen_mask;
|
|
|
|
if(gen > gc->collect_gen){
|
|
|
|
return x;
|
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
if(tag == pair_tag){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y;
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
add_list(gc, t, x, &y);
|
2006-11-23 19:38:26 -05:00
|
|
|
return y;
|
|
|
|
}
|
2007-05-15 08:56:22 -04:00
|
|
|
#if 0
|
2006-11-23 19:38:26 -05:00
|
|
|
else if(tag == symbol_tag){
|
2007-12-23 13:37:48 -05:00
|
|
|
//ikptr y = gc_alloc_new_ptr(align(symbol_size),gen, gc) + symbol_tag;
|
|
|
|
ikptr y = gc_alloc_new_symbol(gen, gc) + symbol_tag;
|
2007-02-25 21:29:28 -05:00
|
|
|
ref(y, off_symbol_string) = ref(x, off_symbol_string);
|
|
|
|
ref(y, off_symbol_ustring) = ref(x, off_symbol_ustring);
|
|
|
|
ref(y, off_symbol_value) = ref(x, off_symbol_value);
|
|
|
|
ref(y, off_symbol_plist) = ref(x, off_symbol_plist);
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(y, off_symbol_system_value) = ref(x, off_symbol_system_value);
|
2007-02-25 21:29:28 -05:00
|
|
|
ref(y, off_symbol_code) = ref(x, off_symbol_code);
|
|
|
|
ref(y, off_symbol_errcode) = ref(x, off_symbol_errcode);
|
|
|
|
ref(y, off_symbol_unused) = 0;
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(x, -symbol_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-symbol_tag) = y;
|
2006-12-16 19:00:34 -05:00
|
|
|
#if accounting
|
2006-11-23 19:40:06 -05:00
|
|
|
symbol_count++;
|
2006-12-16 19:00:34 -05:00
|
|
|
#endif
|
2006-11-23 19:38:26 -05:00
|
|
|
return y;
|
|
|
|
}
|
2007-05-15 08:56:22 -04:00
|
|
|
#endif
|
2006-11-23 19:38:26 -05:00
|
|
|
else if(tag == closure_tag){
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr size =
|
|
|
|
disp_closure_data +
|
|
|
|
ref(fst, disp_code_freevars - disp_code_data);
|
2006-11-23 19:38:26 -05:00
|
|
|
if(size > 1024){
|
2008-01-01 21:08:07 -05:00
|
|
|
fprintf(stderr, "large closure size=0x%016lx\n", (long)size);
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr asize = align(size);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_ptr(asize, gc) + closure_tag;
|
2006-11-23 19:44:29 -05:00
|
|
|
ref(y, asize-closure_tag-wordsize) = 0;
|
2008-01-01 21:08:07 -05:00
|
|
|
memcpy((char*)(long)(y-closure_tag),
|
|
|
|
(char*)(long)(x-closure_tag),
|
|
|
|
size);
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(y,-closure_tag) = add_code_entry(gc, ref(y,-closure_tag));
|
|
|
|
ref(x,-closure_tag) = forward_ptr;
|
|
|
|
ref(x,wordsize-closure_tag) = y;
|
2006-12-16 19:00:34 -05:00
|
|
|
#if accounting
|
|
|
|
closure_count++;
|
|
|
|
#endif
|
2006-11-23 19:38:26 -05:00
|
|
|
return y;
|
|
|
|
}
|
|
|
|
else if(tag == vector_tag){
|
|
|
|
if(is_fixnum(fst)){
|
|
|
|
/* real vector */
|
2007-02-22 21:58:38 -05:00
|
|
|
//fprintf(stderr, "X=0x%08x, FST=0x%08x\n", (int)x, (int)fst);
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr size = fst;
|
2008-03-15 21:06:47 -04:00
|
|
|
assert(((long)size) >= 0);
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr memreq = align(size + disp_vector_data);
|
2007-08-30 12:01:54 -04:00
|
|
|
if(memreq >= pagesize){
|
|
|
|
if((t & large_object_mask) == large_object_tag){
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
enqueue_large_ptr(x-vector_tag, size+disp_vector_data, gc);
|
2007-08-30 12:01:54 -04:00
|
|
|
return x;
|
|
|
|
} else {
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_large_ptr(size+disp_vector_data, gc)
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
+ vector_tag;
|
2007-08-30 12:01:54 -04:00
|
|
|
ref(y, disp_vector_length-vector_tag) = fst;
|
|
|
|
ref(y, memreq-vector_tag-wordsize) = 0;
|
2008-01-01 21:08:07 -05:00
|
|
|
memcpy((char*)(long)(y+off_vector_data),
|
|
|
|
(char*)(long)(x+off_vector_data),
|
|
|
|
size);
|
2007-08-30 12:01:54 -04:00
|
|
|
ref(x,-vector_tag) = forward_ptr;
|
|
|
|
ref(x,wordsize-vector_tag) = y;
|
|
|
|
return y;
|
|
|
|
}
|
|
|
|
} else {
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_ptr(memreq, gc) + vector_tag;
|
2007-08-30 12:01:54 -04:00
|
|
|
ref(y, disp_vector_length-vector_tag) = fst;
|
|
|
|
ref(y, memreq-vector_tag-wordsize) = 0;
|
2008-01-01 21:08:07 -05:00
|
|
|
memcpy((char*)(long)(y+off_vector_data),
|
|
|
|
(char*)(long)(x+off_vector_data),
|
|
|
|
size);
|
2007-08-30 12:01:54 -04:00
|
|
|
ref(x,-vector_tag) = forward_ptr;
|
|
|
|
ref(x,wordsize-vector_tag) = y;
|
|
|
|
return y;
|
|
|
|
}
|
2006-12-16 19:00:34 -05:00
|
|
|
#if accounting
|
|
|
|
vector_count++;
|
|
|
|
#endif
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2007-05-15 08:56:22 -04:00
|
|
|
else if(fst == symbol_record_tag){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_symbol_record(gc) + record_tag;
|
2007-05-15 08:56:22 -04:00
|
|
|
ref(y, -record_tag) = symbol_record_tag;
|
|
|
|
ref(y, off_symbol_record_string) = ref(x, off_symbol_record_string);
|
|
|
|
ref(y, off_symbol_record_ustring) = ref(x, off_symbol_record_ustring);
|
|
|
|
ref(y, off_symbol_record_value) = ref(x, off_symbol_record_value);
|
|
|
|
ref(y, off_symbol_record_proc) = ref(x, off_symbol_record_proc);
|
|
|
|
ref(y, off_symbol_record_plist) = ref(x, off_symbol_record_plist);
|
|
|
|
ref(x, -record_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-record_tag) = y;
|
|
|
|
return y;
|
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
else if(tagof(fst) == rtd_tag){
|
2007-11-21 16:39:16 -05:00
|
|
|
/* struct / record */
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr size = ref(fst, off_rtd_length);
|
2007-11-21 16:39:16 -05:00
|
|
|
if(size & ((1<<align_shift)-1)) {
|
|
|
|
/* size = n * object_alignment + 4 =>
|
|
|
|
memreq = n * object_alignment + 8
|
|
|
|
= (n+1) * object_alignment => aligned */
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_ptr(size+wordsize, gc) + vector_tag;
|
2007-11-21 16:39:16 -05:00
|
|
|
ref(y, -vector_tag) = fst;
|
|
|
|
{
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr i;
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = y+disp_record_data-vector_tag;
|
|
|
|
ikptr q = x+disp_record_data-vector_tag;
|
2007-11-21 16:39:16 -05:00
|
|
|
ref(p, 0) = ref(q, 0);
|
|
|
|
for(i=wordsize; i<size; i+=(2*wordsize)){
|
|
|
|
ref(p, i) = ref(q, i);
|
|
|
|
ref(p, i+wordsize) = ref(q, i+wordsize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ref(x,-vector_tag) = forward_ptr;
|
|
|
|
ref(x,wordsize-vector_tag) = y;
|
|
|
|
return y;
|
|
|
|
} else {
|
|
|
|
/* size = n * object_alignment =>
|
|
|
|
memreq = n * object_alignment + 4 + 4 (pad) */
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_ptr(size+(2*wordsize), gc) + vector_tag;
|
2007-11-21 16:39:16 -05:00
|
|
|
ref(y, -vector_tag) = fst;
|
|
|
|
{
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr i;
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = y+disp_record_data-vector_tag;
|
|
|
|
ikptr q = x+disp_record_data-vector_tag;
|
2007-11-21 16:39:16 -05:00
|
|
|
for(i=0; i<size; i+=(2*wordsize)){
|
|
|
|
ref(p, i) = ref(q, i);
|
|
|
|
ref(p, i+wordsize) = ref(q, i+wordsize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ref(y, size+disp_record_data-vector_tag) = 0;
|
|
|
|
ref(x,-vector_tag) = forward_ptr;
|
|
|
|
ref(x,wordsize-vector_tag) = y;
|
|
|
|
return y;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if(fst == code_tag){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr entry = x + off_code_data;
|
|
|
|
ikptr new_entry = add_code_entry(gc, entry);
|
2006-11-23 19:38:26 -05:00
|
|
|
return new_entry - off_code_data;
|
|
|
|
}
|
|
|
|
else if(fst == continuation_tag){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr top = ref(x, off_continuation_top);
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr size = ref(x, off_continuation_size);
|
2006-12-06 10:08:34 -05:00
|
|
|
#ifndef NDEBUG
|
2006-11-23 19:38:26 -05:00
|
|
|
if(size > 4096){
|
|
|
|
fprintf(stderr, "large cont size=0x%08x\n", size);
|
|
|
|
}
|
2006-12-06 10:08:34 -05:00
|
|
|
#endif
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr next = ref(x, off_continuation_next);
|
|
|
|
ikptr y = gc_alloc_new_ptr(continuation_size, gc) + vector_tag;
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(x, -vector_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-vector_tag) = y;
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr new_top = gc_alloc_new_data(align(size), gc);
|
2008-01-01 21:08:07 -05:00
|
|
|
memcpy((char*)(long)new_top,
|
|
|
|
(char*)(long)top,
|
|
|
|
size);
|
2006-11-23 19:38:26 -05:00
|
|
|
collect_stack(gc, new_top, new_top + size);
|
|
|
|
ref(y, -vector_tag) = continuation_tag;
|
|
|
|
ref(y, off_continuation_top) = new_top;
|
2007-12-23 13:37:48 -05:00
|
|
|
ref(y, off_continuation_size) = (ikptr) size;
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(y, off_continuation_next) = next;
|
2006-12-16 19:00:34 -05:00
|
|
|
#if accounting
|
|
|
|
continuation_count++;
|
|
|
|
#endif
|
2006-12-16 19:07:21 -05:00
|
|
|
return y;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
else if(tagof(fst) == pair_tag){
|
|
|
|
/* tcbucket */
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_ptr(tcbucket_size, gc) + vector_tag;
|
2006-11-23 19:42:39 -05:00
|
|
|
ref(y,off_tcbucket_tconc) = fst;
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr key = ref(x, off_tcbucket_key);
|
2006-11-23 19:42:39 -05:00
|
|
|
ref(y,off_tcbucket_key) = key;
|
|
|
|
ref(y,off_tcbucket_val) = ref(x, off_tcbucket_val);
|
|
|
|
ref(y,off_tcbucket_next) = ref(x, off_tcbucket_next);
|
|
|
|
if((! is_fixnum(key)) && (tagof(key) != immediate_tag)){
|
|
|
|
unsigned int kt = gc->segment_vector[page_index(key)];
|
|
|
|
if((kt & gen_mask) <= gc->collect_gen){
|
2007-08-30 11:06:21 -04:00
|
|
|
/* key will be moved */
|
2006-11-23 19:42:39 -05:00
|
|
|
gc_tconc_push(gc, y);
|
|
|
|
}
|
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(x, -vector_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-vector_tag) = y;
|
|
|
|
return y;
|
|
|
|
}
|
2008-01-01 04:24:36 -05:00
|
|
|
else if((((long int)fst) & port_mask) == port_tag){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_ptr(port_size, gc) + vector_tag;
|
2006-11-23 19:48:14 -05:00
|
|
|
ref(y, -vector_tag) = fst;
|
2008-01-01 21:08:07 -05:00
|
|
|
long int i;
|
2006-11-23 19:48:14 -05:00
|
|
|
for(i=wordsize; i<port_size; i+=wordsize){
|
|
|
|
ref(y, i-vector_tag) = ref(x, i-vector_tag);
|
|
|
|
}
|
|
|
|
ref(x, -vector_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-vector_tag) = y;
|
|
|
|
return y;
|
|
|
|
}
|
2007-01-21 20:36:22 -05:00
|
|
|
else if(fst == flonum_tag){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr new = gc_alloc_new_data(flonum_size, gc) + vector_tag;
|
2007-01-21 20:36:22 -05:00
|
|
|
ref(new, -vector_tag) = flonum_tag;
|
|
|
|
flonum_data(new) = flonum_data(x);
|
|
|
|
ref(x, -vector_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-vector_tag) = new;
|
|
|
|
return new;
|
|
|
|
}
|
2008-01-01 21:08:07 -05:00
|
|
|
else if((fst & bignum_mask) == bignum_tag){
|
2008-01-01 04:24:36 -05:00
|
|
|
long int len = ((unsigned long int)fst) >> bignum_length_shift;
|
|
|
|
long int memreq = align(disp_bignum_data + len*wordsize);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr new = gc_alloc_new_data(memreq, gc) + vector_tag;
|
2008-01-01 21:08:07 -05:00
|
|
|
memcpy((char*)(long)(new-vector_tag),
|
|
|
|
(char*)(long)(x-vector_tag),
|
|
|
|
memreq);
|
2006-12-29 05:45:30 -05:00
|
|
|
ref(x, -vector_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-vector_tag) = new;
|
2006-11-23 19:48:14 -05:00
|
|
|
return new;
|
|
|
|
}
|
2007-06-10 00:32:19 -04:00
|
|
|
else if(fst == ratnum_tag){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = gc_alloc_new_data(ratnum_size, gc) + vector_tag;
|
|
|
|
ikptr num = ref(x, disp_ratnum_num-vector_tag);
|
|
|
|
ikptr den = ref(x, disp_ratnum_den-vector_tag);
|
2007-06-10 00:32:19 -04:00
|
|
|
ref(x, -vector_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-vector_tag) = y;
|
|
|
|
ref(y, -vector_tag) = fst;
|
|
|
|
ref(y, disp_ratnum_num-vector_tag) = add_object(gc, num, "num");
|
|
|
|
ref(y, disp_ratnum_den-vector_tag) = add_object(gc, den, "den");
|
|
|
|
return y;
|
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
else {
|
2008-01-01 04:24:36 -05:00
|
|
|
fprintf(stderr, "unhandled vector with fst=0x%016lx\n",
|
|
|
|
(long int)fst);
|
2007-02-22 21:58:38 -05:00
|
|
|
assert(0);
|
2006-11-23 19:38:26 -05:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if(tag == string_tag){
|
|
|
|
if(is_fixnum(fst)){
|
2008-01-01 04:24:36 -05:00
|
|
|
long int strlen = unfix(fst);
|
|
|
|
long int memreq = align(strlen*string_char_size + disp_string_data);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr new_str = gc_alloc_new_data(memreq, gc) + string_tag;
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(new_str, off_string_length) = fst;
|
2008-01-01 21:08:07 -05:00
|
|
|
memcpy((char*)(long)(new_str+off_string_data),
|
|
|
|
(char*)(long)(x + off_string_data),
|
2007-06-10 00:32:19 -04:00
|
|
|
strlen*string_char_size);
|
2006-11-23 19:38:26 -05:00
|
|
|
ref(x, -string_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-string_tag) = new_str;
|
2006-12-16 19:00:34 -05:00
|
|
|
#if accounting
|
|
|
|
string_count++;
|
|
|
|
#endif
|
2006-11-23 19:38:26 -05:00
|
|
|
return new_str;
|
|
|
|
}
|
|
|
|
else {
|
2008-01-01 04:24:36 -05:00
|
|
|
fprintf(stderr, "unhandled string 0x%016lx with fst=0x%016lx\n",
|
|
|
|
(long int)x, (long int)fst);
|
2006-11-23 19:38:26 -05:00
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
}
|
2007-05-17 04:34:52 -04:00
|
|
|
else if(tag == bytevector_tag){
|
2008-01-01 04:24:36 -05:00
|
|
|
long int len = unfix(fst);
|
|
|
|
long int memreq = align(len + disp_bytevector_data + 1);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr new_bv = gc_alloc_new_data(memreq, gc) + bytevector_tag;
|
2007-05-17 04:34:52 -04:00
|
|
|
ref(new_bv, off_bytevector_length) = fst;
|
2008-01-01 21:08:07 -05:00
|
|
|
memcpy((char*)(long)(new_bv+off_bytevector_data),
|
|
|
|
(char*)(long)(x + off_bytevector_data),
|
2007-05-17 04:34:52 -04:00
|
|
|
len + 1);
|
|
|
|
ref(x, -bytevector_tag) = forward_ptr;
|
|
|
|
ref(x, wordsize-bytevector_tag) = new_bv;
|
|
|
|
return new_bv;
|
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
fprintf(stderr, "unhandled tag: %d\n", tag);
|
|
|
|
exit(-1);
|
|
|
|
}
|
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
static void
|
2007-12-23 13:37:48 -05:00
|
|
|
relocate_new_code(ikptr x, gc_t* gc){
|
|
|
|
ikptr relocvector = ref(x, disp_code_reloc_vector);
|
2007-02-22 21:58:38 -05:00
|
|
|
relocvector = add_object(gc, relocvector, "relocvec");
|
2006-11-23 19:44:29 -05:00
|
|
|
ref(x, disp_code_reloc_vector) = relocvector;
|
2007-09-04 19:59:14 -04:00
|
|
|
ref(x, disp_code_annotation) =
|
|
|
|
add_object(gc, ref(x, disp_code_annotation), "annotation");
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr relocsize = ref(relocvector, off_vector_length);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = relocvector + off_vector_data;
|
|
|
|
ikptr q = p + relocsize;
|
|
|
|
ikptr code = x + disp_code_data;
|
2006-11-23 19:44:29 -05:00
|
|
|
while(p < q){
|
2008-01-01 04:24:36 -05:00
|
|
|
long int r = unfix(ref(p, 0));
|
|
|
|
long int tag = r & 3;
|
|
|
|
long int code_off = r >> 2;
|
2006-11-23 19:44:29 -05:00
|
|
|
if(tag == 0){
|
|
|
|
/* undisplaced pointer */
|
2007-02-22 21:58:38 -05:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// fprintf(stderr, "r=0x%08x code_off=%d reloc_size=0x%08x\n",
|
|
|
|
// r, code_off, relocsize);
|
|
|
|
#endif
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr old_object = ref(p, wordsize);
|
|
|
|
ikptr new_object = add_object(gc, old_object, "reloc1");
|
2006-11-23 19:44:29 -05:00
|
|
|
ref(code, code_off) = new_object;
|
|
|
|
p += (2*wordsize);
|
|
|
|
}
|
|
|
|
else if(tag == 2){
|
|
|
|
/* displaced pointer */
|
2008-01-01 04:24:36 -05:00
|
|
|
long int obj_off = unfix(ref(p, wordsize));
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr old_object = ref(p, 2*wordsize);
|
|
|
|
ikptr new_object = add_object(gc, old_object, "reloc2");
|
2006-11-23 19:44:29 -05:00
|
|
|
ref(code, code_off) = new_object + obj_off;
|
|
|
|
p += (3 * wordsize);
|
|
|
|
}
|
|
|
|
else if(tag == 3){
|
|
|
|
/* displaced relative pointer */
|
2008-01-01 04:24:36 -05:00
|
|
|
long int obj_off = unfix(ref(p, wordsize));
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr obj = ref(p, 2*wordsize);
|
2007-02-22 21:58:38 -05:00
|
|
|
#ifndef NDEBUG
|
|
|
|
//fprintf(stderr, "obj=0x%08x, obj_off=0x%08x\n", (int)obj,
|
|
|
|
// obj_off);
|
|
|
|
#endif
|
|
|
|
obj = add_object(gc, obj, "reloc3");
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr displaced_object = obj + obj_off;
|
|
|
|
ikptr next_word = code + code_off + wordsize;
|
2008-01-01 04:24:36 -05:00
|
|
|
ikptr relative_distance = displaced_object - (long int)next_word;
|
2006-11-23 19:44:29 -05:00
|
|
|
ref(next_word, -wordsize) = relative_distance;
|
|
|
|
p += (3*wordsize);
|
|
|
|
}
|
|
|
|
else if(tag == 1){
|
|
|
|
/* do nothing */
|
|
|
|
p += (2 * wordsize);
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
else {
|
2008-01-01 04:24:36 -05:00
|
|
|
fprintf(stderr, "invalid rtag %ld in 0x%016lx\n", tag, r);
|
2006-11-23 19:44:29 -05:00
|
|
|
exit(-1);
|
2007-02-22 21:58:38 -05:00
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
collect_loop(gc_t* gc){
|
|
|
|
int done;
|
|
|
|
do{
|
|
|
|
done = 1;
|
2006-11-23 19:44:29 -05:00
|
|
|
{ /* scan the pending pairs pages */
|
|
|
|
qupages_t* qu = gc->queues[meta_pair];
|
2006-11-23 19:38:26 -05:00
|
|
|
if(qu){
|
|
|
|
done = 0;
|
2006-11-23 19:44:29 -05:00
|
|
|
gc->queues[meta_pair] = 0;
|
2006-11-23 19:38:26 -05:00
|
|
|
do{
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = qu->p;
|
|
|
|
ikptr q = qu->q;
|
2006-11-23 19:38:26 -05:00
|
|
|
while(p < q){
|
2006-11-29 17:06:16 -05:00
|
|
|
ref(p,0) = add_object(gc, ref(p,0), "loop");
|
2006-11-23 19:44:29 -05:00
|
|
|
p += (2*wordsize);
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
qupages_t* next = qu->next;
|
|
|
|
ik_free(qu, sizeof(qupages_t));
|
|
|
|
qu = next;
|
|
|
|
} while(qu);
|
|
|
|
}
|
|
|
|
}
|
2006-11-23 19:44:29 -05:00
|
|
|
|
|
|
|
{ /* scan the pending pointer pages */
|
|
|
|
qupages_t* qu = gc->queues[meta_ptrs];
|
2006-11-23 19:42:39 -05:00
|
|
|
if(qu){
|
2006-11-23 19:38:26 -05:00
|
|
|
done = 0;
|
2006-11-23 19:44:29 -05:00
|
|
|
gc->queues[meta_ptrs] = 0;
|
2006-11-23 19:38:26 -05:00
|
|
|
do{
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = qu->p;
|
|
|
|
ikptr q = qu->q;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(p < q){
|
2006-11-29 17:06:16 -05:00
|
|
|
ref(p,0) = add_object(gc, ref(p,0), "pending");
|
2006-11-23 19:44:29 -05:00
|
|
|
p += wordsize;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
qupages_t* next = qu->next;
|
|
|
|
ik_free(qu, sizeof(qupages_t));
|
|
|
|
qu = next;
|
|
|
|
} while(qu);
|
|
|
|
}
|
|
|
|
}
|
2007-02-25 21:29:28 -05:00
|
|
|
|
|
|
|
{ /* scan the pending symbol pages */
|
|
|
|
qupages_t* qu = gc->queues[meta_symbol];
|
|
|
|
if(qu){
|
|
|
|
done = 0;
|
|
|
|
gc->queues[meta_symbol] = 0;
|
|
|
|
do{
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = qu->p;
|
|
|
|
ikptr q = qu->q;
|
2007-02-25 21:29:28 -05:00
|
|
|
while(p < q){
|
|
|
|
ref(p,0) = add_object(gc, ref(p,0), "symbols");
|
|
|
|
p += wordsize;
|
|
|
|
}
|
|
|
|
qupages_t* next = qu->next;
|
|
|
|
ik_free(qu, sizeof(qupages_t));
|
|
|
|
qu = next;
|
|
|
|
} while(qu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
{ /* scan the pending code objects */
|
|
|
|
qupages_t* codes = gc->queues[meta_code];
|
|
|
|
if(codes){
|
|
|
|
gc->queues[meta_code] = 0;
|
|
|
|
done = 0;
|
|
|
|
do{
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = codes->p;
|
|
|
|
ikptr q = codes->q;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(p < q){
|
|
|
|
relocate_new_code(p, gc);
|
|
|
|
alloc_code_count--;
|
2006-11-23 19:44:29 -05:00
|
|
|
p += align(disp_code_data + unfix(ref(p, disp_code_code_size)));
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
qupages_t* next = codes->next;
|
|
|
|
ik_free(codes, sizeof(qupages_t));
|
|
|
|
codes = next;
|
|
|
|
} while(codes);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
{/* see if there are any remaining in the main ptr segment */
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
{
|
|
|
|
meta_t* meta = &gc->meta[meta_pair];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = meta->aq;
|
|
|
|
ikptr q = meta->ap;
|
2006-11-23 19:42:39 -05:00
|
|
|
if(p < q){
|
|
|
|
done = 0;
|
|
|
|
do{
|
|
|
|
meta->aq = q;
|
|
|
|
while(p < q){
|
2006-11-29 17:06:16 -05:00
|
|
|
ref(p,0) = add_object(gc, ref(p,0), "rem");
|
2006-11-23 19:44:29 -05:00
|
|
|
p += (2*wordsize);
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
|
|
|
p = meta->aq;
|
|
|
|
q = meta->ap;
|
|
|
|
} while (p < q);
|
|
|
|
}
|
|
|
|
}
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
{
|
|
|
|
meta_t* meta = &gc->meta[meta_symbol];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = meta->aq;
|
|
|
|
ikptr q = meta->ap;
|
2007-02-25 21:29:28 -05:00
|
|
|
if(p < q){
|
|
|
|
done = 0;
|
|
|
|
do{
|
|
|
|
meta->aq = q;
|
|
|
|
while(p < q){
|
|
|
|
ref(p,0) = add_object(gc, ref(p,0), "sym");
|
|
|
|
p += wordsize;
|
|
|
|
}
|
|
|
|
p = meta->aq;
|
|
|
|
q = meta->ap;
|
|
|
|
} while (p < q);
|
|
|
|
}
|
|
|
|
}
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
{
|
|
|
|
meta_t* meta = &gc->meta[meta_ptrs];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = meta->aq;
|
|
|
|
ikptr q = meta->ap;
|
2006-11-23 19:42:39 -05:00
|
|
|
if(p < q){
|
|
|
|
done = 0;
|
|
|
|
do{
|
|
|
|
meta->aq = q;
|
|
|
|
while(p < q){
|
2006-11-29 17:06:16 -05:00
|
|
|
ref(p,0) = add_object(gc, ref(p,0), "rem2");
|
2006-11-23 19:44:29 -05:00
|
|
|
p += wordsize;
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
|
|
|
p = meta->aq;
|
|
|
|
q = meta->ap;
|
|
|
|
} while (p < q);
|
|
|
|
}
|
|
|
|
}
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
{
|
|
|
|
meta_t* meta = &gc->meta[meta_code];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = meta->aq;
|
|
|
|
ikptr q = meta->ap;
|
2006-11-23 19:42:39 -05:00
|
|
|
if(p < q){
|
|
|
|
done = 0;
|
|
|
|
do{
|
|
|
|
meta->aq = q;
|
|
|
|
do{
|
|
|
|
alloc_code_count--;
|
|
|
|
relocate_new_code(p, gc);
|
2006-11-23 19:44:29 -05:00
|
|
|
p += align(disp_code_data + unfix(ref(p, disp_code_code_size)));
|
2006-11-23 19:42:39 -05:00
|
|
|
} while (p < q);
|
|
|
|
p = meta->aq;
|
|
|
|
q = meta->ap;
|
|
|
|
} while (p < q);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* phew */
|
|
|
|
} while (! done);
|
|
|
|
{
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
/* zero out remaining pointers */
|
|
|
|
/* FIXME: did you hear of code reuse? */
|
|
|
|
{
|
|
|
|
meta_t* meta = &gc->meta[meta_pair];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = meta->ap;
|
|
|
|
ikptr q = meta->ep;
|
2006-11-23 19:44:29 -05:00
|
|
|
while(p < q){
|
|
|
|
ref(p, 0) = 0;
|
|
|
|
p += wordsize;
|
|
|
|
}
|
|
|
|
}
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
{
|
|
|
|
meta_t* meta = &gc->meta[meta_symbol];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = meta->ap;
|
|
|
|
ikptr q = meta->ep;
|
2007-02-25 21:29:28 -05:00
|
|
|
while(p < q){
|
|
|
|
ref(p, 0) = 0;
|
|
|
|
p += wordsize;
|
|
|
|
}
|
|
|
|
}
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
{
|
|
|
|
meta_t* meta = &gc->meta[meta_ptrs];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = meta->ap;
|
|
|
|
ikptr q = meta->ep;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(p < q){
|
|
|
|
ref(p, 0) = 0;
|
|
|
|
p += wordsize;
|
|
|
|
}
|
|
|
|
}
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
{
|
|
|
|
meta_t* meta = &gc->meta[meta_weak];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = meta->ap;
|
|
|
|
ikptr q = meta->ep;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(p < q){
|
|
|
|
ref(p, 0) = 0;
|
|
|
|
p += wordsize;
|
|
|
|
}
|
|
|
|
}
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
{
|
|
|
|
meta_t* meta = &gc->meta[meta_code];
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = meta->ap;
|
|
|
|
ikptr q = meta->ep;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(p < q){
|
|
|
|
ref(p, 0) = 0;
|
|
|
|
p += wordsize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fix_weak_pointers(gc_t* gc){
|
|
|
|
unsigned int* segment_vec = gc->segment_vector;
|
|
|
|
ikpcb* pcb = gc->pcb;
|
2008-01-01 04:24:36 -05:00
|
|
|
long int lo_idx = page_index(pcb->memory_base);
|
|
|
|
long int hi_idx = page_index(pcb->memory_end);
|
|
|
|
long int i = lo_idx;
|
2006-11-23 19:42:39 -05:00
|
|
|
int collect_gen = gc->collect_gen;
|
|
|
|
while(i < hi_idx){
|
|
|
|
unsigned int t = segment_vec[i];
|
2007-09-06 22:45:20 -04:00
|
|
|
if((t & (type_mask|new_gen_mask)) ==
|
|
|
|
(weak_pairs_type|new_gen_tag)){
|
|
|
|
//int gen = t & gen_mask;
|
|
|
|
if (1) { //(gen > collect_gen){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = (ikptr)(i << pageshift);
|
|
|
|
ikptr q = p + pagesize;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(p < q){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr x = ref(p, 0);
|
2006-11-23 19:42:39 -05:00
|
|
|
if(! is_fixnum(x)){
|
|
|
|
int tag = tagof(x);
|
|
|
|
if(tag != immediate_tag){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr fst = ref(x, -tag);
|
2006-11-23 19:42:39 -05:00
|
|
|
if(fst == forward_ptr){
|
2007-09-06 22:45:20 -04:00
|
|
|
ref(p, 0) = ref(x, wordsize-tag);
|
|
|
|
} else {
|
2006-11-23 19:42:39 -05:00
|
|
|
int x_gen = segment_vec[page_index(x)] & gen_mask;
|
|
|
|
if(x_gen <= collect_gen){
|
2007-09-06 22:45:20 -04:00
|
|
|
ref(p, 0) = bwp_object;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
p += (2*wordsize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
|
|
|
|
static unsigned int dirty_mask[generation_count] = {
|
|
|
|
0x88888888,
|
|
|
|
0xCCCCCCCC,
|
|
|
|
0xEEEEEEEE,
|
|
|
|
0xFFFFFFFF,
|
|
|
|
0x00000000
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static unsigned int cleanup_mask[generation_count] = {
|
|
|
|
0x00000000,
|
|
|
|
0x88888888,
|
|
|
|
0xCCCCCCCC,
|
|
|
|
0xEEEEEEEE,
|
|
|
|
0xFFFFFFFF
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2008-01-01 04:24:36 -05:00
|
|
|
scan_dirty_pointers_page(gc_t* gc, long int page_idx, int mask){
|
2008-01-01 21:08:07 -05:00
|
|
|
unsigned int* segment_vec = (unsigned int*)(long)gc->segment_vector;
|
|
|
|
unsigned int* dirty_vec = (unsigned int*)(long)gc->pcb->dirty_vector;
|
2006-11-23 19:42:39 -05:00
|
|
|
unsigned int t = segment_vec[page_idx];
|
|
|
|
unsigned int d = dirty_vec[page_idx];
|
|
|
|
unsigned int masked_d = d & mask;
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = (ikptr)(page_idx << pageshift);
|
2006-11-23 19:42:39 -05:00
|
|
|
int j;
|
|
|
|
unsigned int new_d = 0;
|
|
|
|
for(j=0; j<cards_per_page; j++){
|
|
|
|
if(masked_d & (0xF << (j*meta_dirty_shift))){
|
|
|
|
/* dirty card */
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr q = p + cardsize;
|
2006-11-23 19:42:39 -05:00
|
|
|
unsigned int card_d = 0;
|
|
|
|
while(p < q){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr x = ref(p, 0);
|
2006-11-23 19:42:39 -05:00
|
|
|
if(is_fixnum(x) || (tagof(x) == immediate_tag)){
|
|
|
|
/* do nothing */
|
|
|
|
} else {
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr y = add_object(gc, x, "nothing");
|
2006-11-23 19:42:39 -05:00
|
|
|
segment_vec = gc->segment_vector;
|
|
|
|
ref(p, 0) = y;
|
|
|
|
card_d = card_d | segment_vec[page_index(y)];
|
|
|
|
}
|
|
|
|
p += wordsize;
|
|
|
|
}
|
|
|
|
card_d = (card_d & meta_dirty_mask) >> meta_dirty_shift;
|
|
|
|
new_d = new_d | (card_d<<(j*meta_dirty_shift));
|
|
|
|
} else {
|
|
|
|
p += cardsize;
|
|
|
|
new_d = new_d | (d & (0xF << (j*meta_dirty_shift)));
|
|
|
|
}
|
|
|
|
}
|
2008-01-01 21:08:07 -05:00
|
|
|
dirty_vec = (unsigned int*)(long)gc->pcb->dirty_vector;
|
2006-11-23 19:42:39 -05:00
|
|
|
new_d = new_d & cleanup_mask[t & gen_mask];
|
|
|
|
dirty_vec[page_idx] = new_d;
|
|
|
|
}
|
|
|
|
|
2006-11-23 19:44:29 -05:00
|
|
|
static void
|
2008-01-01 04:24:36 -05:00
|
|
|
scan_dirty_code_page(gc_t* gc, long int page_idx, unsigned int mask){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = (ikptr)(page_idx << pageshift);
|
|
|
|
ikptr start = p;
|
|
|
|
ikptr q = p + pagesize;
|
2008-01-01 21:08:07 -05:00
|
|
|
unsigned int* segment_vec = (unsigned int*)(long)gc->segment_vector;
|
|
|
|
unsigned int* dirty_vec = (unsigned int*)(long)gc->pcb->dirty_vector;
|
2006-11-23 19:44:29 -05:00
|
|
|
//unsigned int d = dirty_vec[page_idx];
|
|
|
|
unsigned int t = segment_vec[page_idx];
|
|
|
|
//unsigned int masked_d = d & mask;
|
|
|
|
unsigned int new_d = 0;
|
|
|
|
while(p < q){
|
|
|
|
if(ref(p, 0) != code_tag){
|
|
|
|
p = q;
|
|
|
|
}
|
|
|
|
else {
|
2008-01-01 04:24:36 -05:00
|
|
|
long int j = ((long int)p - (long int)start) / cardsize;
|
|
|
|
long int code_size = unfix(ref(p, disp_code_code_size));
|
2006-11-23 19:44:29 -05:00
|
|
|
relocate_new_code(p, gc);
|
|
|
|
segment_vec = gc->segment_vector;
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr rvec = ref(p, disp_code_reloc_vector);
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr len = ref(rvec, off_vector_length);
|
2008-03-15 21:06:47 -04:00
|
|
|
assert(((long)len) >= 0);
|
2008-01-01 04:24:36 -05:00
|
|
|
long int i;
|
|
|
|
unsigned long int code_d = segment_vec[page_index(rvec)];
|
2006-11-23 19:44:29 -05:00
|
|
|
for(i=0; i<len; i+=wordsize){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr r = ref(rvec, i+off_vector_data);
|
2006-11-23 19:44:29 -05:00
|
|
|
if(is_fixnum(r) || (tagof(r) == immediate_tag)){
|
|
|
|
/* do nothing */
|
|
|
|
} else {
|
2006-11-29 17:06:16 -05:00
|
|
|
r = add_object(gc, r, "nothing2");
|
2006-11-23 19:44:29 -05:00
|
|
|
segment_vec = gc->segment_vector;
|
|
|
|
code_d = code_d | segment_vec[page_index(r)];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
new_d = new_d | (code_d<<(j*meta_dirty_shift));
|
|
|
|
p += align(code_size + disp_code_data);
|
|
|
|
}
|
|
|
|
}
|
2008-01-01 21:08:07 -05:00
|
|
|
dirty_vec = (unsigned int*)(long)gc->pcb->dirty_vector;
|
2006-11-23 19:44:29 -05:00
|
|
|
new_d = new_d & cleanup_mask[t & gen_mask];
|
|
|
|
dirty_vec[page_idx] = new_d;
|
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
scan_dirty_pages(gc_t* gc){
|
|
|
|
ikpcb* pcb = gc->pcb;
|
2008-01-01 21:08:07 -05:00
|
|
|
long int lo_idx = page_index(pcb->memory_base);
|
|
|
|
long int hi_idx = page_index(pcb->memory_end);
|
|
|
|
unsigned int* dirty_vec = (unsigned int*)(long)pcb->dirty_vector;
|
|
|
|
unsigned int* segment_vec = (unsigned int*)(long)pcb->segment_vector;
|
2006-11-23 19:42:39 -05:00
|
|
|
int collect_gen = gc->collect_gen;
|
|
|
|
unsigned int mask = dirty_mask[collect_gen];
|
2008-01-01 21:08:07 -05:00
|
|
|
long int i = lo_idx;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(i < hi_idx){
|
|
|
|
unsigned int d = dirty_vec[i];
|
|
|
|
if(d & mask){
|
|
|
|
unsigned int t = segment_vec[i];
|
|
|
|
if((t & gen_mask) > collect_gen){
|
|
|
|
int type = t & type_mask;
|
|
|
|
if(type == pointers_type){
|
|
|
|
scan_dirty_pointers_page(gc, i, mask);
|
2008-01-01 21:08:07 -05:00
|
|
|
dirty_vec = (unsigned int*)(long)pcb->dirty_vector;
|
|
|
|
segment_vec = (unsigned int*)(long)pcb->segment_vector;
|
2006-11-23 19:44:29 -05:00
|
|
|
}
|
2007-02-25 21:29:28 -05:00
|
|
|
else if(type == symbols_type){
|
|
|
|
scan_dirty_pointers_page(gc, i, mask);
|
2008-01-01 21:08:07 -05:00
|
|
|
dirty_vec = (unsigned int*)(long)pcb->dirty_vector;
|
|
|
|
segment_vec = (unsigned int*)(long)pcb->segment_vector;
|
2007-02-25 21:29:28 -05:00
|
|
|
}
|
2006-11-23 19:44:29 -05:00
|
|
|
else if (type == weak_pairs_type){
|
2007-09-06 22:45:20 -04:00
|
|
|
scan_dirty_pointers_page(gc, i, mask);
|
2008-01-01 21:08:07 -05:00
|
|
|
dirty_vec = (unsigned int*)(long)pcb->dirty_vector;
|
|
|
|
segment_vec = (unsigned int*)(long)pcb->segment_vector;
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
2006-11-23 19:44:29 -05:00
|
|
|
else if (type == code_type){
|
|
|
|
if((t & gen_mask) > collect_gen){
|
|
|
|
scan_dirty_code_page(gc, i, mask);
|
2008-01-01 21:08:07 -05:00
|
|
|
dirty_vec = (unsigned int*)(long)pcb->dirty_vector;
|
|
|
|
segment_vec = (unsigned int*)(long)pcb->segment_vector;
|
2006-11-23 19:44:29 -05:00
|
|
|
}
|
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
else if (t & scannable_mask) {
|
|
|
|
fprintf(stderr, "BUG: unhandled scan of type 0x%08x\n", t);
|
|
|
|
exit(-1);
|
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
deallocate_unused_pages(gc_t* gc){
|
|
|
|
ikpcb* pcb = gc->pcb;
|
|
|
|
int collect_gen = gc->collect_gen;
|
|
|
|
unsigned int* segment_vec = pcb->segment_vector;
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr memory_base = pcb->memory_base;
|
|
|
|
ikptr memory_end = pcb->memory_end;
|
|
|
|
ikptr lo_idx = page_index(memory_base);
|
|
|
|
ikptr hi_idx = page_index(memory_end);
|
|
|
|
ikptr i = lo_idx;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(i < hi_idx){
|
|
|
|
unsigned int t = segment_vec[i];
|
|
|
|
if(t & dealloc_mask){
|
|
|
|
int gen = t & old_gen_mask;
|
|
|
|
if(gen <= collect_gen){
|
|
|
|
/* we're interested */
|
|
|
|
if(t & new_gen_mask){
|
|
|
|
/* do nothing yet */
|
|
|
|
} else {
|
2008-01-01 21:08:07 -05:00
|
|
|
ik_munmap_from_segment((ikptr)(i<<pageshift),pagesize,pcb);
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
i++;
|
|
|
|
}
|
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
|
2006-11-23 19:42:39 -05:00
|
|
|
|
|
|
|
static void
|
|
|
|
fix_new_pages(gc_t* gc){
|
|
|
|
ikpcb* pcb = gc->pcb;
|
|
|
|
unsigned int* segment_vec = pcb->segment_vector;
|
2008-01-01 21:08:07 -05:00
|
|
|
ikptr memory_base = pcb->memory_base;
|
|
|
|
ikptr memory_end = pcb->memory_end;
|
|
|
|
ikptr lo_idx = page_index(memory_base);
|
|
|
|
ikptr hi_idx = page_index(memory_end);
|
|
|
|
ikptr i = lo_idx;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(i < hi_idx){
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
segment_vec[i] &= ~new_gen_mask;
|
|
|
|
/*
|
2006-11-23 19:42:39 -05:00
|
|
|
unsigned int t = segment_vec[i];
|
2007-09-06 22:45:20 -04:00
|
|
|
if(t & new_gen_mask){
|
2006-11-23 19:42:39 -05:00
|
|
|
segment_vec[i] = t & ~new_gen_mask;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
Change of strategy in the garbage collector.
Previously: when a generation (say 2) is collected,
everything in gen 0 moves to gen 1
everything in gen 1 moves to gen 2
everything in gen 2 moves to gen 3
Now: when a generation (say 2 again) is collected
everything in gen 0 moves to gen 3
everything in gen 1 moves to gen 3
everything in gen 2 moves to gen 3
So, some objects get a leap from young to old if they're lucky to be
in the right time.
Consequences: when an object is moved by the collector, we don't
need to track old->new pointers and masks because all moved objects
are clean now. This both simplifies the collector and makes it more
efficient and might open the door for further optimization
opportunities. For bootstrap time, we get about 5% overall
saving and about 20% GC-time saving. Not bad.
BEFORE:
running stats for macro expansion:
45 collections
2558 ms elapsed cpu time, including 212 ms collecting
2576 ms elapsed real time, including 216 ms collecting
186972152 bytes allocated
running stats for code generation and serialization:
86 collections
4365 ms elapsed cpu time, including 1444 ms collecting
4374 ms elapsed real time, including 1449 ms collecting
362819096 bytes allocated
running stats for the entire bootstrap process:
131 collections
6928 ms elapsed cpu time, including 1657 ms collecting
6953 ms elapsed real time, including 1666 ms collecting
549818232 bytes allocated
AFTER:
running stats for macro expansion:
45 collections
2506 ms elapsed cpu time, including 169 ms collecting
2511 ms elapsed real time, including 171 ms collecting
186968056 bytes allocated
running stats for code generation and serialization:
86 collections
4083 ms elapsed cpu time, including 1189 ms collecting
4085 ms elapsed real time, including 1191 ms collecting
362810904 bytes allocated
running stats for the entire bootstrap process:
131 collections
6591 ms elapsed cpu time, including 1359 ms collecting
6599 ms elapsed real time, including 1362 ms collecting
549805944 bytes allocated
Happy Happy Joy Joy
2007-12-15 10:43:29 -05:00
|
|
|
*/
|
2006-11-23 19:42:39 -05:00
|
|
|
i++;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2007-12-23 13:37:48 -05:00
|
|
|
add_one_tconc(ikpcb* pcb, ikptr p){
|
|
|
|
ikptr tcbucket = ref(p,0);
|
|
|
|
ikptr tc = ref(tcbucket, off_tcbucket_tconc);
|
2006-11-23 19:42:39 -05:00
|
|
|
assert(tagof(tc) == pair_tag);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr d = ref(tc, off_cdr);
|
2006-11-23 19:42:39 -05:00
|
|
|
assert(tagof(d) == pair_tag);
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr new_pair = p + pair_tag;
|
2006-11-23 19:42:39 -05:00
|
|
|
ref(d, off_car) = tcbucket;
|
|
|
|
ref(d, off_cdr) = new_pair;
|
|
|
|
ref(new_pair, off_car) = false_object;
|
|
|
|
ref(new_pair, off_cdr) = false_object;
|
|
|
|
ref(tc, off_cdr) = new_pair;
|
2007-12-23 13:37:48 -05:00
|
|
|
ref(tcbucket, -vector_tag) = (ikptr)(tcbucket_size - wordsize);
|
2008-01-01 21:08:07 -05:00
|
|
|
((int*)(long)pcb->dirty_vector)[page_index(tc)] = -1;
|
|
|
|
((int*)(long)pcb->dirty_vector)[page_index(d)] = -1;
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
gc_add_tconcs(gc_t* gc){
|
|
|
|
if(gc->tconc_base == 0){
|
|
|
|
return;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
ikpcb* pcb = gc->pcb;
|
|
|
|
{
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = gc->tconc_base;
|
|
|
|
ikptr q = gc->tconc_ap;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(p < q){
|
2007-06-28 18:53:18 -04:00
|
|
|
add_one_tconc(pcb, p);
|
2007-06-28 18:46:27 -04:00
|
|
|
p += 2*wordsize;
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
2006-11-23 19:42:39 -05:00
|
|
|
ikpages* qu = gc->tconc_queue;
|
|
|
|
while(qu){
|
2007-12-23 13:37:48 -05:00
|
|
|
ikptr p = qu->base;
|
|
|
|
ikptr q = p + qu->size;
|
2006-11-23 19:42:39 -05:00
|
|
|
while(p < q){
|
2007-06-28 18:53:18 -04:00
|
|
|
add_one_tconc(pcb, p);
|
2007-06-28 18:46:27 -04:00
|
|
|
p += 2*wordsize;
|
2006-11-23 19:42:39 -05:00
|
|
|
}
|
|
|
|
ikpages* next = qu->next;
|
|
|
|
ik_free(qu, sizeof(ikpages));
|
|
|
|
qu = next;
|
2006-11-23 19:38:26 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-08-31 23:28:19 -04:00
|
|
|
|