OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [libgo/] [runtime/] [malloc.h] - Blame information for rev 833

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 747 jeremybenn
// Copyright 2009 The Go Authors. All rights reserved.
2
// Use of this source code is governed by a BSD-style
3
// license that can be found in the LICENSE file.
4
 
5
// Memory allocator, based on tcmalloc.
6
// http://goog-perftools.sourceforge.net/doc/tcmalloc.html
7
 
8
// The main allocator works in runs of pages.
9
// Small allocation sizes (up to and including 32 kB) are
10
// rounded to one of about 100 size classes, each of which
11
// has its own free list of objects of exactly that size.
12
// Any free page of memory can be split into a set of objects
13
// of one size class, which are then managed using free list
14
// allocators.
15
//
16
// The allocator's data structures are:
17
//
18
//      FixAlloc: a free-list allocator for fixed-size objects,
19
//              used to manage storage used by the allocator.
20
//      MHeap: the malloc heap, managed at page (4096-byte) granularity.
21
//      MSpan: a run of pages managed by the MHeap.
22
//      MCentral: a shared free list for a given size class.
23
//      MCache: a per-thread (in Go, per-M) cache for small objects.
24
//      MStats: allocation statistics.
25
//
26
// Allocating a small object proceeds up a hierarchy of caches:
27
//
28
//      1. Round the size up to one of the small size classes
29
//         and look in the corresponding MCache free list.
30
//         If the list is not empty, allocate an object from it.
31
//         This can all be done without acquiring a lock.
32
//
33
//      2. If the MCache free list is empty, replenish it by
34
//         taking a bunch of objects from the MCentral free list.
35
//         Moving a bunch amortizes the cost of acquiring the MCentral lock.
36
//
37
//      3. If the MCentral free list is empty, replenish it by
38
//         allocating a run of pages from the MHeap and then
39
//         chopping that memory into a objects of the given size.
40
//         Allocating many objects amortizes the cost of locking
41
//         the heap.
42
//
43
//      4. If the MHeap is empty or has no page runs large enough,
44
//         allocate a new group of pages (at least 1MB) from the
45
//         operating system.  Allocating a large run of pages
46
//         amortizes the cost of talking to the operating system.
47
//
48
// Freeing a small object proceeds up the same hierarchy:
49
//
50
//      1. Look up the size class for the object and add it to
51
//         the MCache free list.
52
//
53
//      2. If the MCache free list is too long or the MCache has
54
//         too much memory, return some to the MCentral free lists.
55
//
56
//      3. If all the objects in a given span have returned to
57
//         the MCentral list, return that span to the page heap.
58
//
59
//      4. If the heap has too much memory, return some to the
60
//         operating system.
61
//
62
//      TODO(rsc): Step 4 is not implemented.
63
//
64
// Allocating and freeing a large object uses the page heap
65
// directly, bypassing the MCache and MCentral free lists.
66
//
67
// The small objects on the MCache and MCentral free lists
68
// may or may not be zeroed.  They are zeroed if and only if
69
// the second word of the object is zero.  The spans in the
70
// page heap are always zeroed.  When a span full of objects
71
// is returned to the page heap, the objects that need to be
72
// are zeroed first.  There are two main benefits to delaying the
73
// zeroing this way:
74
//
75
//      1. stack frames allocated from the small object lists
76
//         can avoid zeroing altogether.
77
//      2. the cost of zeroing when reusing a small object is
78
//         charged to the mutator, not the garbage collector.
79
//
80
// This C code was written with an eye toward translating to Go
81
// in the future.  Methods have the form Type_Method(Type *t, ...).
82
 
83
typedef struct MCentral MCentral;
84
typedef struct MHeap    MHeap;
85
typedef struct MSpan    MSpan;
86
typedef struct MStats   MStats;
87
typedef struct MLink    MLink;
88
 
89
enum
90
{
91
        PageShift       = 12,
92
        PageSize        = 1<<PageShift,
93
        PageMask        = PageSize - 1,
94
};
95
typedef uintptr PageID;         // address >> PageShift
96
 
97
enum
98
{
99
        // Computed constant.  The definition of MaxSmallSize and the
100
        // algorithm in msize.c produce some number of different allocation
101
        // size classes.  NumSizeClasses is that number.  It's needed here
102
        // because there are static arrays of this length; when msize runs its
103
        // size choosing algorithm it double-checks that NumSizeClasses agrees.
104
        NumSizeClasses = 61,
105
 
106
        // Tunable constants.
107
        MaxSmallSize = 32<<10,
108
 
109
        FixAllocChunk = 128<<10,        // Chunk size for FixAlloc
110
        MaxMCacheListLen = 256,         // Maximum objects on MCacheList
111
        MaxMCacheSize = 2<<20,          // Maximum bytes in one MCache
112
        MaxMHeapList = 1<<(20 - PageShift),     // Maximum page length for fixed-size list in MHeap.
113
        HeapAllocChunk = 1<<20,         // Chunk size for heap growth
114
 
115
        // Number of bits in page to span calculations (4k pages).
116
        // On 64-bit, we limit the arena to 16G, so 22 bits suffices.
117
        // On 32-bit, we don't bother limiting anything: 20 bits for 4G.
118
#if __SIZEOF_POINTER__ == 8
119
        MHeapMap_Bits = 22,
120
#else
121
        MHeapMap_Bits = 20,
122
#endif
123
 
124
        // Max number of threads to run garbage collection.
125
        // 2, 3, and 4 are all plausible maximums depending
126
        // on the hardware details of the machine.  The garbage
127
        // collector scales well to 4 cpus.
128
        MaxGcproc = 4,
129
};
130
 
131
// A generic linked list of blocks.  (Typically the block is bigger than sizeof(MLink).)
132
struct MLink
133
{
134
        MLink *next;
135
};
136
 
137
// SysAlloc obtains a large chunk of zeroed memory from the
138
// operating system, typically on the order of a hundred kilobytes
139
// or a megabyte.  If the pointer argument is non-nil, the caller
140
// wants a mapping there or nowhere.
141
//
142
// SysUnused notifies the operating system that the contents
143
// of the memory region are no longer needed and can be reused
144
// for other purposes.  The program reserves the right to start
145
// accessing those pages in the future.
146
//
147
// SysFree returns it unconditionally; this is only used if
148
// an out-of-memory error has been detected midway through
149
// an allocation.  It is okay if SysFree is a no-op.
150
//
151
// SysReserve reserves address space without allocating memory.
152
// If the pointer passed to it is non-nil, the caller wants the
153
// reservation there, but SysReserve can still choose another
154
// location if that one is unavailable.
155
//
156
// SysMap maps previously reserved address space for use.
157
 
158
void*   runtime_SysAlloc(uintptr nbytes);
159
void    runtime_SysFree(void *v, uintptr nbytes);
160
void    runtime_SysUnused(void *v, uintptr nbytes);
161
void    runtime_SysMap(void *v, uintptr nbytes);
162
void*   runtime_SysReserve(void *v, uintptr nbytes);
163
 
164
// FixAlloc is a simple free-list allocator for fixed size objects.
165
// Malloc uses a FixAlloc wrapped around SysAlloc to manages its
166
// MCache and MSpan objects.
167
//
168
// Memory returned by FixAlloc_Alloc is not zeroed.
169
// The caller is responsible for locking around FixAlloc calls.
170
// Callers can keep state in the object but the first word is
171
// smashed by freeing and reallocating.
172
struct FixAlloc
173
{
174
        uintptr size;
175
        void *(*alloc)(uintptr);
176
        void (*first)(void *arg, byte *p);      // called first time p is returned
177
        void *arg;
178
        MLink *list;
179
        byte *chunk;
180
        uint32 nchunk;
181
        uintptr inuse;  // in-use bytes now
182
        uintptr sys;    // bytes obtained from system
183
};
184
 
185
void    runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
186
void*   runtime_FixAlloc_Alloc(FixAlloc *f);
187
void    runtime_FixAlloc_Free(FixAlloc *f, void *p);
188
 
189
 
190
// Statistics.
191
// Shared with Go: if you edit this structure, also edit extern.go.
192
struct MStats
193
{
194
        // General statistics.
195
        uint64  alloc;          // bytes allocated and still in use
196
        uint64  total_alloc;    // bytes allocated (even if freed)
197
        uint64  sys;            // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
198
        uint64  nlookup;        // number of pointer lookups
199
        uint64  nmalloc;        // number of mallocs
200
        uint64  nfree;  // number of frees
201
 
202
        // Statistics about malloc heap.
203
        // protected by mheap.Lock
204
        uint64  heap_alloc;     // bytes allocated and still in use
205
        uint64  heap_sys;       // bytes obtained from system
206
        uint64  heap_idle;      // bytes in idle spans
207
        uint64  heap_inuse;     // bytes in non-idle spans
208
        uint64  heap_objects;   // total number of allocated objects
209
 
210
        // Statistics about allocation of low-level fixed-size structures.
211
        // Protected by FixAlloc locks.
212
        uint64  stacks_inuse;   // bootstrap stacks
213
        uint64  stacks_sys;
214
        uint64  mspan_inuse;    // MSpan structures
215
        uint64  mspan_sys;
216
        uint64  mcache_inuse;   // MCache structures
217
        uint64  mcache_sys;
218
        uint64  buckhash_sys;   // profiling bucket hash table
219
 
220
        // Statistics about garbage collector.
221
        // Protected by stopping the world during GC.
222
        uint64  next_gc;        // next GC (in heap_alloc time)
223
        uint64  pause_total_ns;
224
        uint64  pause_ns[256];
225
        uint32  numgc;
226
        bool    enablegc;
227
        bool    debuggc;
228
 
229
        // Statistics about allocation size classes.
230
        struct {
231
                uint32 size;
232
                uint64 nmalloc;
233
                uint64 nfree;
234
        } by_size[NumSizeClasses];
235
};
236
 
237
extern MStats mstats
238
  __asm__ ("libgo_runtime.runtime.VmemStats");
239
 
240
 
241
// Size classes.  Computed and initialized by InitSizes.
242
//
243
// SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
244
//      1 <= sizeclass < NumSizeClasses, for n.
245
//      Size class 0 is reserved to mean "not small".
246
//
247
// class_to_size[i] = largest size in class i
248
// class_to_allocnpages[i] = number of pages to allocate when
249
//      making new objects in class i
250
// class_to_transfercount[i] = number of objects to move when
251
//      taking a bunch of objects out of the central lists
252
//      and putting them in the thread free list.
253
 
254
int32   runtime_SizeToClass(int32);
255
extern  int32   runtime_class_to_size[NumSizeClasses];
256
extern  int32   runtime_class_to_allocnpages[NumSizeClasses];
257
extern  int32   runtime_class_to_transfercount[NumSizeClasses];
258
extern  void    runtime_InitSizes(void);
259
 
260
 
261
// Per-thread (in Go, per-M) cache for small objects.
262
// No locking needed because it is per-thread (per-M).
263
typedef struct MCacheList MCacheList;
264
struct MCacheList
265
{
266
        MLink *list;
267
        uint32 nlist;
268
        uint32 nlistmin;
269
};
270
 
271
struct MCache
272
{
273
        MCacheList list[NumSizeClasses];
274
        uint64 size;
275
        int64 local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
276
        int64 local_objects;    // objects allocated (or freed) from cache since last lock of heap
277
        int64 local_alloc;      // bytes allocated (or freed) since last lock of heap
278
        int64 local_total_alloc;        // bytes allocated (even if freed) since last lock of heap
279
        int64 local_nmalloc;    // number of mallocs since last lock of heap
280
        int64 local_nfree;      // number of frees since last lock of heap
281
        int64 local_nlookup;    // number of pointer lookups since last lock of heap
282
        int32 next_sample;      // trigger heap sample after allocating this many bytes
283
        // Statistics about allocation size classes since last lock of heap
284
        struct {
285
                int64 nmalloc;
286
                int64 nfree;
287
        } local_by_size[NumSizeClasses];
288
 
289
};
290
 
291
void*   runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
292
void    runtime_MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
293
void    runtime_MCache_ReleaseAll(MCache *c);
294
 
295
// An MSpan is a run of pages.
296
enum
297
{
298
        MSpanInUse = 0,
299
        MSpanFree,
300
        MSpanListHead,
301
        MSpanDead,
302
};
303
struct MSpan
304
{
305
        MSpan   *next;          // in a span linked list
306
        MSpan   *prev;          // in a span linked list
307
        MSpan   *allnext;               // in the list of all spans
308
        PageID  start;          // starting page number
309
        uintptr npages;         // number of pages in span
310
        MLink   *freelist;      // list of free objects
311
        uint32  ref;            // number of allocated objects in this span
312
        uint32  sizeclass;      // size class
313
        uint32  state;          // MSpanInUse etc
314
        byte    *limit; // end of data in span
315
};
316
 
317
void    runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
318
 
319
// Every MSpan is in one doubly-linked list,
320
// either one of the MHeap's free lists or one of the
321
// MCentral's span lists.  We use empty MSpan structures as list heads.
322
void    runtime_MSpanList_Init(MSpan *list);
323
bool    runtime_MSpanList_IsEmpty(MSpan *list);
324
void    runtime_MSpanList_Insert(MSpan *list, MSpan *span);
325
void    runtime_MSpanList_Remove(MSpan *span);  // from whatever list it is in
326
 
327
 
328
// Central list of free objects of a given size.
329
struct MCentral
330
{
331
        Lock;
332
        int32 sizeclass;
333
        MSpan nonempty;
334
        MSpan empty;
335
        int32 nfree;
336
};
337
 
338
void    runtime_MCentral_Init(MCentral *c, int32 sizeclass);
339
int32   runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **first);
340
void    runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *first);
341
 
342
// Main malloc heap.
343
// The heap itself is the "free[]" and "large" arrays,
344
// but all the other global data is here too.
345
struct MHeap
346
{
347
        Lock;
348
        MSpan free[MaxMHeapList];       // free lists of given length
349
        MSpan large;                    // free lists length >= MaxMHeapList
350
        MSpan *allspans;
351
 
352
        // span lookup
353
        MSpan *map[1<<MHeapMap_Bits];
354
 
355
        // range of addresses we might see in the heap
356
        byte *bitmap;
357
        uintptr bitmap_mapped;
358
        byte *arena_start;
359
        byte *arena_used;
360
        byte *arena_end;
361
 
362
        // central free lists for small size classes.
363
        // the union makes sure that the MCentrals are
364
        // spaced CacheLineSize bytes apart, so that each MCentral.Lock
365
        // gets its own cache line.
366
        union {
367
                MCentral;
368
                byte pad[CacheLineSize];
369
        } central[NumSizeClasses];
370
 
371
        FixAlloc spanalloc;     // allocator for Span*
372
        FixAlloc cachealloc;    // allocator for MCache*
373
};
374
extern MHeap runtime_mheap;
375
 
376
void    runtime_MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
377
MSpan*  runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
378
void    runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct);
379
MSpan*  runtime_MHeap_Lookup(MHeap *h, void *v);
380
MSpan*  runtime_MHeap_LookupMaybe(MHeap *h, void *v);
381
void    runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *size, int32 *npages, int32 *nobj);
382
void*   runtime_MHeap_SysAlloc(MHeap *h, uintptr n);
383
void    runtime_MHeap_MapBits(MHeap *h);
384
 
385
void*   runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
386
int32   runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
387
void    runtime_gc(int32 force);
388
void    runtime_markallocated(void *v, uintptr n, bool noptr);
389
void    runtime_checkallocated(void *v, uintptr n);
390
void    runtime_markfreed(void *v, uintptr n);
391
void    runtime_checkfreed(void *v, uintptr n);
392
int32   runtime_checking;
393
void    runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
394
void    runtime_unmarkspan(void *v, uintptr size);
395
bool    runtime_blockspecial(void*);
396
void    runtime_setblockspecial(void*, bool);
397
void    runtime_purgecachedstats(M*);
398
 
399
enum
400
{
401
        // flags to malloc
402
        FlagNoPointers = 1<<0,   // no pointers here
403
        FlagNoProfiling = 1<<1, // must not profile
404
        FlagNoGC = 1<<2,        // must not free or scan for pointers
405
};
406
 
407
void    runtime_MProf_Malloc(void*, uintptr);
408
void    runtime_MProf_Free(void*, uintptr);
409
void    runtime_MProf_Mark(void (*scan)(byte *, int64));
410
int32   runtime_helpgc(bool*);
411
void    runtime_gchelper(void);
412
 
413
// Malloc profiling settings.
414
// Must match definition in extern.go.
415
enum {
416
        MProf_None = 0,
417
        MProf_Sample = 1,
418
        MProf_All = 2,
419
};
420
extern int32 runtime_malloc_profile;
421
 
422
struct __go_func_type;
423
bool    runtime_getfinalizer(void *p, bool del, void (**fn)(void*), const struct __go_func_type **ft);
424
void    runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64));

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.