1 |
747 |
jeremybenn |
// Copyright 2009 The Go Authors. All rights reserved.
|
2 |
|
|
// Use of this source code is governed by a BSD-style
|
3 |
|
|
// license that can be found in the LICENSE file.
|
4 |
|
|
|
5 |
|
|
// Page heap.
|
6 |
|
|
//
|
7 |
|
|
// See malloc.h for overview.
|
8 |
|
|
//
|
9 |
|
|
// When a MSpan is in the heap free list, state == MSpanFree
|
10 |
|
|
// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
|
11 |
|
|
//
|
12 |
|
|
// When a MSpan is allocated, state == MSpanInUse
|
13 |
|
|
// and heapmap(i) == span for all s->start <= i < s->start+s->npages.
|
14 |
|
|
|
15 |
|
|
#include "runtime.h"
|
16 |
|
|
#include "arch.h"
|
17 |
|
|
#include "malloc.h"
|
18 |
|
|
|
19 |
|
|
static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
|
20 |
|
|
static bool MHeap_Grow(MHeap*, uintptr);
|
21 |
|
|
static void MHeap_FreeLocked(MHeap*, MSpan*);
|
22 |
|
|
static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
|
23 |
|
|
static MSpan *BestFit(MSpan*, uintptr, MSpan*);
|
24 |
|
|
|
25 |
|
|
static void
|
26 |
|
|
RecordSpan(void *vh, byte *p)
|
27 |
|
|
{
|
28 |
|
|
MHeap *h;
|
29 |
|
|
MSpan *s;
|
30 |
|
|
|
31 |
|
|
h = vh;
|
32 |
|
|
s = (MSpan*)p;
|
33 |
|
|
s->allnext = h->allspans;
|
34 |
|
|
h->allspans = s;
|
35 |
|
|
}
|
36 |
|
|
|
37 |
|
|
// Initialize the heap; fetch memory using alloc.
|
38 |
|
|
void
|
39 |
|
|
runtime_MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
|
40 |
|
|
{
|
41 |
|
|
uint32 i;
|
42 |
|
|
|
43 |
|
|
runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
|
44 |
|
|
runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
|
45 |
|
|
// h->mapcache needs no init
|
46 |
|
|
for(i=0; i<nelem(h->free); i++)
|
47 |
|
|
runtime_MSpanList_Init(&h->free[i]);
|
48 |
|
|
runtime_MSpanList_Init(&h->large);
|
49 |
|
|
for(i=0; i<nelem(h->central); i++)
|
50 |
|
|
runtime_MCentral_Init(&h->central[i], i);
|
51 |
|
|
}
|
52 |
|
|
|
53 |
|
|
// Allocate a new span of npage pages from the heap
|
54 |
|
|
// and record its size class in the HeapMap and HeapMapCache.
|
55 |
|
|
MSpan*
|
56 |
|
|
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
|
57 |
|
|
{
|
58 |
|
|
MSpan *s;
|
59 |
|
|
|
60 |
|
|
runtime_lock(h);
|
61 |
|
|
runtime_purgecachedstats(runtime_m());
|
62 |
|
|
s = MHeap_AllocLocked(h, npage, sizeclass);
|
63 |
|
|
if(s != nil) {
|
64 |
|
|
mstats.heap_inuse += npage<<PageShift;
|
65 |
|
|
if(acct) {
|
66 |
|
|
mstats.heap_objects++;
|
67 |
|
|
mstats.heap_alloc += npage<<PageShift;
|
68 |
|
|
}
|
69 |
|
|
}
|
70 |
|
|
runtime_unlock(h);
|
71 |
|
|
return s;
|
72 |
|
|
}
|
73 |
|
|
|
74 |
|
|
static MSpan*
|
75 |
|
|
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
|
76 |
|
|
{
|
77 |
|
|
uintptr n;
|
78 |
|
|
MSpan *s, *t;
|
79 |
|
|
PageID p;
|
80 |
|
|
|
81 |
|
|
// Try in fixed-size lists up to max.
|
82 |
|
|
for(n=npage; n < nelem(h->free); n++) {
|
83 |
|
|
if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
|
84 |
|
|
s = h->free[n].next;
|
85 |
|
|
goto HaveSpan;
|
86 |
|
|
}
|
87 |
|
|
}
|
88 |
|
|
|
89 |
|
|
// Best fit in list of large spans.
|
90 |
|
|
if((s = MHeap_AllocLarge(h, npage)) == nil) {
|
91 |
|
|
if(!MHeap_Grow(h, npage))
|
92 |
|
|
return nil;
|
93 |
|
|
if((s = MHeap_AllocLarge(h, npage)) == nil)
|
94 |
|
|
return nil;
|
95 |
|
|
}
|
96 |
|
|
|
97 |
|
|
HaveSpan:
|
98 |
|
|
// Mark span in use.
|
99 |
|
|
if(s->state != MSpanFree)
|
100 |
|
|
runtime_throw("MHeap_AllocLocked - MSpan not free");
|
101 |
|
|
if(s->npages < npage)
|
102 |
|
|
runtime_throw("MHeap_AllocLocked - bad npages");
|
103 |
|
|
runtime_MSpanList_Remove(s);
|
104 |
|
|
s->state = MSpanInUse;
|
105 |
|
|
mstats.heap_idle -= s->npages<<PageShift;
|
106 |
|
|
|
107 |
|
|
if(s->npages > npage) {
|
108 |
|
|
// Trim extra and put it back in the heap.
|
109 |
|
|
t = runtime_FixAlloc_Alloc(&h->spanalloc);
|
110 |
|
|
mstats.mspan_inuse = h->spanalloc.inuse;
|
111 |
|
|
mstats.mspan_sys = h->spanalloc.sys;
|
112 |
|
|
runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
|
113 |
|
|
s->npages = npage;
|
114 |
|
|
p = t->start;
|
115 |
|
|
if(sizeof(void*) == 8)
|
116 |
|
|
p -= ((uintptr)h->arena_start>>PageShift);
|
117 |
|
|
if(p > 0)
|
118 |
|
|
h->map[p-1] = s;
|
119 |
|
|
h->map[p] = t;
|
120 |
|
|
h->map[p+t->npages-1] = t;
|
121 |
|
|
*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
|
122 |
|
|
t->state = MSpanInUse;
|
123 |
|
|
MHeap_FreeLocked(h, t);
|
124 |
|
|
}
|
125 |
|
|
|
126 |
|
|
if(*(uintptr*)(s->start<<PageShift) != 0)
|
127 |
|
|
runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
|
128 |
|
|
|
129 |
|
|
// Record span info, because gc needs to be
|
130 |
|
|
// able to map interior pointer to containing span.
|
131 |
|
|
s->sizeclass = sizeclass;
|
132 |
|
|
p = s->start;
|
133 |
|
|
if(sizeof(void*) == 8)
|
134 |
|
|
p -= ((uintptr)h->arena_start>>PageShift);
|
135 |
|
|
for(n=0; n<npage; n++)
|
136 |
|
|
h->map[p+n] = s;
|
137 |
|
|
return s;
|
138 |
|
|
}
|
139 |
|
|
|
140 |
|
|
// Allocate a span of exactly npage pages from the list of large spans.
|
141 |
|
|
static MSpan*
|
142 |
|
|
MHeap_AllocLarge(MHeap *h, uintptr npage)
|
143 |
|
|
{
|
144 |
|
|
return BestFit(&h->large, npage, nil);
|
145 |
|
|
}
|
146 |
|
|
|
147 |
|
|
// Search list for smallest span with >= npage pages.
|
148 |
|
|
// If there are multiple smallest spans, take the one
|
149 |
|
|
// with the earliest starting address.
|
150 |
|
|
static MSpan*
|
151 |
|
|
BestFit(MSpan *list, uintptr npage, MSpan *best)
|
152 |
|
|
{
|
153 |
|
|
MSpan *s;
|
154 |
|
|
|
155 |
|
|
for(s=list->next; s != list; s=s->next) {
|
156 |
|
|
if(s->npages < npage)
|
157 |
|
|
continue;
|
158 |
|
|
if(best == nil
|
159 |
|
|
|| s->npages < best->npages
|
160 |
|
|
|| (s->npages == best->npages && s->start < best->start))
|
161 |
|
|
best = s;
|
162 |
|
|
}
|
163 |
|
|
return best;
|
164 |
|
|
}
|
165 |
|
|
|
166 |
|
|
// Try to add at least npage pages of memory to the heap,
|
167 |
|
|
// returning whether it worked.
|
168 |
|
|
static bool
|
169 |
|
|
MHeap_Grow(MHeap *h, uintptr npage)
|
170 |
|
|
{
|
171 |
|
|
uintptr ask;
|
172 |
|
|
void *v;
|
173 |
|
|
MSpan *s;
|
174 |
|
|
PageID p;
|
175 |
|
|
|
176 |
|
|
// Ask for a big chunk, to reduce the number of mappings
|
177 |
|
|
// the operating system needs to track; also amortizes
|
178 |
|
|
// the overhead of an operating system mapping.
|
179 |
|
|
// Allocate a multiple of 64kB (16 pages).
|
180 |
|
|
npage = (npage+15)&~15;
|
181 |
|
|
ask = npage<<PageShift;
|
182 |
|
|
if(ask < HeapAllocChunk)
|
183 |
|
|
ask = HeapAllocChunk;
|
184 |
|
|
|
185 |
|
|
v = runtime_MHeap_SysAlloc(h, ask);
|
186 |
|
|
if(v == nil) {
|
187 |
|
|
if(ask > (npage<<PageShift)) {
|
188 |
|
|
ask = npage<<PageShift;
|
189 |
|
|
v = runtime_MHeap_SysAlloc(h, ask);
|
190 |
|
|
}
|
191 |
|
|
if(v == nil) {
|
192 |
|
|
runtime_printf("runtime: out of memory: cannot allocate %llu-byte block (%llu in use)\n", (unsigned long long)ask, (unsigned long long)mstats.heap_sys);
|
193 |
|
|
return false;
|
194 |
|
|
}
|
195 |
|
|
}
|
196 |
|
|
mstats.heap_sys += ask;
|
197 |
|
|
|
198 |
|
|
// Create a fake "in use" span and free it, so that the
|
199 |
|
|
// right coalescing happens.
|
200 |
|
|
s = runtime_FixAlloc_Alloc(&h->spanalloc);
|
201 |
|
|
mstats.mspan_inuse = h->spanalloc.inuse;
|
202 |
|
|
mstats.mspan_sys = h->spanalloc.sys;
|
203 |
|
|
runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
|
204 |
|
|
p = s->start;
|
205 |
|
|
if(sizeof(void*) == 8)
|
206 |
|
|
p -= ((uintptr)h->arena_start>>PageShift);
|
207 |
|
|
h->map[p] = s;
|
208 |
|
|
h->map[p + s->npages - 1] = s;
|
209 |
|
|
s->state = MSpanInUse;
|
210 |
|
|
MHeap_FreeLocked(h, s);
|
211 |
|
|
return true;
|
212 |
|
|
}
|
213 |
|
|
|
214 |
|
|
// Look up the span at the given address.
|
215 |
|
|
// Address is guaranteed to be in map
|
216 |
|
|
// and is guaranteed to be start or end of span.
|
217 |
|
|
MSpan*
|
218 |
|
|
runtime_MHeap_Lookup(MHeap *h, void *v)
|
219 |
|
|
{
|
220 |
|
|
uintptr p;
|
221 |
|
|
|
222 |
|
|
p = (uintptr)v;
|
223 |
|
|
if(sizeof(void*) == 8)
|
224 |
|
|
p -= (uintptr)h->arena_start;
|
225 |
|
|
return h->map[p >> PageShift];
|
226 |
|
|
}
|
227 |
|
|
|
228 |
|
|
// Look up the span at the given address.
|
229 |
|
|
// Address is *not* guaranteed to be in map
|
230 |
|
|
// and may be anywhere in the span.
|
231 |
|
|
// Map entries for the middle of a span are only
|
232 |
|
|
// valid for allocated spans. Free spans may have
|
233 |
|
|
// other garbage in their middles, so we have to
|
234 |
|
|
// check for that.
|
235 |
|
|
MSpan*
|
236 |
|
|
runtime_MHeap_LookupMaybe(MHeap *h, void *v)
|
237 |
|
|
{
|
238 |
|
|
MSpan *s;
|
239 |
|
|
PageID p, q;
|
240 |
|
|
|
241 |
|
|
if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
|
242 |
|
|
return nil;
|
243 |
|
|
p = (uintptr)v>>PageShift;
|
244 |
|
|
q = p;
|
245 |
|
|
if(sizeof(void*) == 8)
|
246 |
|
|
q -= (uintptr)h->arena_start >> PageShift;
|
247 |
|
|
s = h->map[q];
|
248 |
|
|
if(s == nil || p < s->start || p - s->start >= s->npages)
|
249 |
|
|
return nil;
|
250 |
|
|
if(s->state != MSpanInUse)
|
251 |
|
|
return nil;
|
252 |
|
|
return s;
|
253 |
|
|
}
|
254 |
|
|
|
255 |
|
|
// Free the span back into the heap.
|
256 |
|
|
void
|
257 |
|
|
runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
|
258 |
|
|
{
|
259 |
|
|
runtime_lock(h);
|
260 |
|
|
runtime_purgecachedstats(runtime_m());
|
261 |
|
|
mstats.heap_inuse -= s->npages<<PageShift;
|
262 |
|
|
if(acct) {
|
263 |
|
|
mstats.heap_alloc -= s->npages<<PageShift;
|
264 |
|
|
mstats.heap_objects--;
|
265 |
|
|
}
|
266 |
|
|
MHeap_FreeLocked(h, s);
|
267 |
|
|
runtime_unlock(h);
|
268 |
|
|
}
|
269 |
|
|
|
270 |
|
|
static void
|
271 |
|
|
MHeap_FreeLocked(MHeap *h, MSpan *s)
|
272 |
|
|
{
|
273 |
|
|
uintptr *sp, *tp;
|
274 |
|
|
MSpan *t;
|
275 |
|
|
PageID p;
|
276 |
|
|
|
277 |
|
|
if(s->state != MSpanInUse || s->ref != 0) {
|
278 |
|
|
// runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
|
279 |
|
|
runtime_throw("MHeap_FreeLocked - invalid free");
|
280 |
|
|
}
|
281 |
|
|
mstats.heap_idle += s->npages<<PageShift;
|
282 |
|
|
s->state = MSpanFree;
|
283 |
|
|
runtime_MSpanList_Remove(s);
|
284 |
|
|
sp = (uintptr*)(s->start<<PageShift);
|
285 |
|
|
|
286 |
|
|
// Coalesce with earlier, later spans.
|
287 |
|
|
p = s->start;
|
288 |
|
|
if(sizeof(void*) == 8)
|
289 |
|
|
p -= (uintptr)h->arena_start >> PageShift;
|
290 |
|
|
if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) {
|
291 |
|
|
tp = (uintptr*)(t->start<<PageShift);
|
292 |
|
|
*tp |= *sp; // propagate "needs zeroing" mark
|
293 |
|
|
s->start = t->start;
|
294 |
|
|
s->npages += t->npages;
|
295 |
|
|
p -= t->npages;
|
296 |
|
|
h->map[p] = s;
|
297 |
|
|
runtime_MSpanList_Remove(t);
|
298 |
|
|
t->state = MSpanDead;
|
299 |
|
|
runtime_FixAlloc_Free(&h->spanalloc, t);
|
300 |
|
|
mstats.mspan_inuse = h->spanalloc.inuse;
|
301 |
|
|
mstats.mspan_sys = h->spanalloc.sys;
|
302 |
|
|
}
|
303 |
|
|
if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) {
|
304 |
|
|
tp = (uintptr*)(t->start<<PageShift);
|
305 |
|
|
*sp |= *tp; // propagate "needs zeroing" mark
|
306 |
|
|
s->npages += t->npages;
|
307 |
|
|
h->map[p + s->npages - 1] = s;
|
308 |
|
|
runtime_MSpanList_Remove(t);
|
309 |
|
|
t->state = MSpanDead;
|
310 |
|
|
runtime_FixAlloc_Free(&h->spanalloc, t);
|
311 |
|
|
mstats.mspan_inuse = h->spanalloc.inuse;
|
312 |
|
|
mstats.mspan_sys = h->spanalloc.sys;
|
313 |
|
|
}
|
314 |
|
|
|
315 |
|
|
// Insert s into appropriate list.
|
316 |
|
|
if(s->npages < nelem(h->free))
|
317 |
|
|
runtime_MSpanList_Insert(&h->free[s->npages], s);
|
318 |
|
|
else
|
319 |
|
|
runtime_MSpanList_Insert(&h->large, s);
|
320 |
|
|
|
321 |
|
|
// TODO(rsc): IncrementalScavenge() to return memory to OS.
|
322 |
|
|
}
|
323 |
|
|
|
324 |
|
|
// Initialize a new span with the given start and npages.
|
325 |
|
|
void
|
326 |
|
|
runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
|
327 |
|
|
{
|
328 |
|
|
span->next = nil;
|
329 |
|
|
span->prev = nil;
|
330 |
|
|
span->start = start;
|
331 |
|
|
span->npages = npages;
|
332 |
|
|
span->freelist = nil;
|
333 |
|
|
span->ref = 0;
|
334 |
|
|
span->sizeclass = 0;
|
335 |
|
|
span->state = 0;
|
336 |
|
|
}
|
337 |
|
|
|
338 |
|
|
// Initialize an empty doubly-linked list.
|
339 |
|
|
void
|
340 |
|
|
runtime_MSpanList_Init(MSpan *list)
|
341 |
|
|
{
|
342 |
|
|
list->state = MSpanListHead;
|
343 |
|
|
list->next = list;
|
344 |
|
|
list->prev = list;
|
345 |
|
|
}
|
346 |
|
|
|
347 |
|
|
void
|
348 |
|
|
runtime_MSpanList_Remove(MSpan *span)
|
349 |
|
|
{
|
350 |
|
|
if(span->prev == nil && span->next == nil)
|
351 |
|
|
return;
|
352 |
|
|
span->prev->next = span->next;
|
353 |
|
|
span->next->prev = span->prev;
|
354 |
|
|
span->prev = nil;
|
355 |
|
|
span->next = nil;
|
356 |
|
|
}
|
357 |
|
|
|
358 |
|
|
bool
|
359 |
|
|
runtime_MSpanList_IsEmpty(MSpan *list)
|
360 |
|
|
{
|
361 |
|
|
return list->next == list;
|
362 |
|
|
}
|
363 |
|
|
|
364 |
|
|
void
|
365 |
|
|
runtime_MSpanList_Insert(MSpan *list, MSpan *span)
|
366 |
|
|
{
|
367 |
|
|
if(span->next != nil || span->prev != nil) {
|
368 |
|
|
// runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
|
369 |
|
|
runtime_throw("MSpanList_Insert");
|
370 |
|
|
}
|
371 |
|
|
span->next = list->next;
|
372 |
|
|
span->prev = list;
|
373 |
|
|
span->next->prev = span;
|
374 |
|
|
span->prev->next = span;
|
375 |
|
|
}
|
376 |
|
|
|
377 |
|
|
|