OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [conts/] [libmem/] [mm/] [alloc_page.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * A proof-of-concept linked-list based page allocator.
3
 *
4
 * Copyright (C) 2007 Bahadir Balban
5
 */
6
#include <stdio.h>
7
#include <string.h>
8
#include <l4/config.h>
9
#include <l4/macros.h>
10
#include <l4/types.h>
11
#include <l4/lib/list.h>
12
#include "alloc_page.h"
13
#include INC_GLUE(memory.h)
14
#include INC_SUBARCH(mm.h)
15
#include INC_GLUE(memlayout.h)
16
#include <l4lib/macros.h>
17
#include L4LIB_INC_ARCH(syscalls.h)
18
#include L4LIB_INC_ARCH(syslib.h)
19
 
20
struct page_allocator allocator;
21
 
22
/*
23
 * Allocate a new page area from the page area cache
24
 */
25
static struct page_area *new_page_area(struct page_allocator *p)
26
{
27
        struct mem_cache *cache;
28
        struct page_area *new_area;
29
 
30
        list_foreach_struct(cache, &p->pga_cache_list, list) {
31
                if ((new_area = mem_cache_alloc(cache)) != 0) {
32
                        new_area->cache = cache;
33
                        p->pga_free--;
34
                        return new_area;
35
                }
36
        }
37
        return 0;
38
}
39
 
40
/* Given the page @quantity, finds a free region, divides and returns new area. */
41
static struct page_area *
42
get_free_page_area(int quantity, struct page_allocator *p)
43
{
44
        struct page_area *new, *area;
45
 
46
        if (quantity <= 0)
47
                return 0;
48
 
49
        list_foreach_struct(area, &p->page_area_list, list) {
50
 
51
                /* Check for exact size match */
52
                if (area->numpages == quantity && !area->used) {
53
                        area->used = 1;
54
                        return area;
55
                }
56
 
57
                /* Divide a bigger area */
58
                if (area->numpages > quantity && !area->used) {
59
                        new = new_page_area(p);
60
                        area->numpages -= quantity;
61
                        new->pfn = area->pfn + area->numpages;
62
                        new->numpages = quantity;
63
                        new->used = 1;
64
                        link_init(&new->list);
65
                        list_insert(&new->list, &area->list);
66
                        return new;
67
                }
68
        }
69
 
70
        /* No more pages */
71
        return 0;
72
}
73
 
74
 
75
/*
76
 * All physical memory is tracked by a simple linked list implementation. A
77
 * single list contains both used and free page_area descriptors. Each page_area
78
 * describes a continuous region of physical pages, indicating its location by
79
 * it's pfn.
80
 *
81
 * alloc_page() keeps track of all page-granuled memory, except the bits that
82
 * were in use before the allocator initialised. This covers anything that is
83
 * outside the @start @end range. This includes the page tables, first caches
84
 * allocated by this function, compile-time allocated kernel data and text.
85
 * Also other memory regions like IO are not tracked by alloc_page() but by
86
 * other means.
87
 */
88
 
89
void init_page_allocator(unsigned long start, unsigned long end)
90
{
91
        /* Initialise a page area cache in the first page */
92
        struct page_area *freemem, *area;
93
        struct mem_cache *cache;
94
 
95
        link_init(&allocator.page_area_list);
96
        link_init(&allocator.pga_cache_list);
97
 
98
        /* Initialise the first page area cache */
99
        cache = mem_cache_init(phys_to_virt((void *)start), PAGE_SIZE,
100
                               sizeof(struct page_area), 0);
101
        list_insert(&cache->list, &allocator.pga_cache_list);
102
 
103
        /* Initialise the first area that describes the page just allocated */
104
        area = mem_cache_alloc(cache);
105
        link_init(&area->list);
106
        area->pfn = __pfn(start);
107
        area->used = 1;
108
        area->numpages = 1;
109
        area->cache = cache;
110
        list_insert(&area->list, &allocator.page_area_list);
111
 
112
        /* Update freemem start address */
113
        start += PAGE_SIZE;
114
 
115
        /* Initialise first area that describes all of free physical memory */
116
        freemem = mem_cache_alloc(cache);
117
        link_init(&freemem->list);
118
        freemem->pfn = __pfn(start);
119
        freemem->numpages = __pfn(end) - freemem->pfn;
120
        freemem->cache = cache;
121
        freemem->used = 0;
122
 
123
        /* Add it as the first unused page area */
124
        list_insert(&freemem->list, &allocator.page_area_list);
125
 
126
        /* Initialise free page area counter */
127
        allocator.pga_free = mem_cache_total_empty(cache);
128
}
129
 
130
/*
131
 * Check if we're about to run out of free page area structures.
132
 * If so, allocate a new cache of page areas.
133
 */
134
int check_page_areas(struct page_allocator *p)
135
{
136
        struct page_area *new;
137
        struct mem_cache *newcache;
138
 
139
        /* If only one free area left */
140
        if (p->pga_free == 1) {
141
 
142
                /* Use that area to allocate a new page */
143
                if (!(new = get_free_page_area(1, p)))
144
                        return -1;      /* Out of memory */
145
 
146
                /* Free page areas must now be reduced to 0 */
147
                BUG_ON(p->pga_free != 0);
148
 
149
                /* Initialise it as a new source of page area structures */
150
                newcache = mem_cache_init(phys_to_virt((void *)__pfn_to_addr(new->pfn)),
151
                                          PAGE_SIZE, sizeof(struct page_area), 0);
152
 
153
                /*
154
                 * Update the free page area counter
155
                 * NOTE: need to lock the allocator here
156
                 */
157
                p->pga_free += mem_cache_total_empty(newcache);
158
 
159
                /*
160
                 * Add the new cache to available
161
                 * list of free page area caches
162
                 */
163
                list_insert(&newcache->list, &p->pga_cache_list);
164
                /* Unlock here */
165
        }
166
        return 0;
167
}
168
 
169
void *alloc_page(int quantity)
170
{
171
        struct page_area *new;
172
 
173
        /*
174
         * First make sure we have enough page
175
         * area structures in the cache
176
         */
177
        if (check_page_areas(&allocator) < 0)
178
                return 0; /* Out of memory */
179
 
180
        /*
181
         * Now allocate the actual pages, using the available
182
         * page area structures to describe the allocation
183
         */
184
        new = get_free_page_area(quantity, &allocator);
185
 
186
        /* Return physical address */
187
        return (void *)__pfn_to_addr(new->pfn);
188
}
189
 
190
 
191
/* Merges two page areas, frees area cache if empty, returns the merged area. */
192
struct page_area *merge_free_areas(struct page_area *before,
193
                                   struct page_area *after)
194
{
195
        struct mem_cache *c;
196
 
197
        BUG_ON(before->pfn + before->numpages != after->pfn);
198
        BUG_ON(before->used || after->used)
199
        BUG_ON(before == after);
200
 
201
        before->numpages += after->numpages;
202
        list_remove(&after->list);
203
        c = after->cache;
204
        mem_cache_free(c, after);
205
 
206
        /* Recursively free the cache page */
207
        if (mem_cache_is_empty(c)) {
208
                list_remove(&c->list);
209
                if (free_page(virt_to_phys(c)) < 0) {
210
                        printf("Page ptr: 0x%lx, virt_to_phys = 0x%lx\n"
211
                               "Page not found in cache.\n",
212
                               (unsigned long)c, (unsigned long)virt_to_phys(c));
213
                        BUG();
214
                }
215
        }
216
        return before;
217
}
218
 
219
static int find_and_free_page_area(void *addr, struct page_allocator *p)
220
{
221
        struct page_area *area, *prev, *next;
222
 
223
        /* First find the page area to be freed. */
224
        list_foreach_struct(area, &p->page_area_list, list)
225
                if (__pfn_to_addr(area->pfn) == (unsigned long)addr &&
226
                    area->used) {       /* Found it */
227
                        area->used = 0;
228
                        goto found;
229
                }
230
        return -1; /* Finished the loop, but area not found. */
231
 
232
found:
233
        /* Now merge with adjacent areas, if possible */
234
        if (area->list.prev != &p->page_area_list) {
235
                prev = link_to_struct(area->list.prev, struct page_area, list);
236
                if (!prev->used)
237
                        area = merge_free_areas(prev, area);
238
        }
239
        if (area->list.next != &p->page_area_list) {
240
                next = link_to_struct(area->list.next, struct page_area, list);
241
                if (!next->used)
242
                        area = merge_free_areas(area, next);
243
        }
244
        return 0;
245
}
246
 
247
int free_page(void *paddr)
248
{
249
        return find_and_free_page_area(paddr, &allocator);
250
}
251
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.