OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [conts/] [posix/] [mm0/] [mm/] [memory.c] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Initialise the memory structures.
3
 *
4
 * Copyright (C) 2007, 2008 Bahadir Balban
5
 */
6
#include <init.h>
7
#include <memory.h>
8
#include <l4/macros.h>
9
#include <l4/config.h>
10
#include <l4/types.h>
11
#include <l4/api/errno.h>
12
#include <l4/generic/space.h>
13
#include L4LIB_INC_ARCH(syslib.h)
14
#include INC_GLUE(memory.h)
15
#include INC_SUBARCH(mm.h)
16
#include <memory.h>
17
#include <file.h>
18
#include <user.h>
19
#include <linker.h>
20
 
21
struct address_pool pager_vaddr_pool;
22
 
23
/* FIXME:
24
 * ID pool id allocation size (i.e. bitlimit/nwords parameters)
25
 * must be in sync with address pool allocation range. Here, since
26
 * the id pool needs to be determined at compile time, the two
27
 * parameters don't match yet.
28
 */
29
 
30
/* Bitmap size to represent an address pool of 256 MB. */
31
#define ADDRESS_POOL_256MB              2048
32
 
33
unsigned long free_virtual_address_start;
34
 
35
/* Same as a regular id pool except that its bitmap size is fixed */
36
static struct pager_virtual_address_id_pool {
37
        int nwords;
38
        int bitlimit;
39
        u32 bitmap[ADDRESS_POOL_256MB];
40
} pager_virtual_address_id_pool = {
41
        .nwords = ADDRESS_POOL_256MB,
42
        .bitlimit = ADDRESS_POOL_256MB * 32,
43
};
44
 
45
/* For supplying contiguous virtual addresses to pager
46
 *
47
 * MM0:
48
 * task->start
49
 * Text
50
 * Data
51
 * Bss
52
 * Stack
53
 * mmap area start
54
 * mmap area end
55
 *
56
 * pager address pool
57
 *
58
 * task->end
59
 */
60
int pager_address_pool_init(void)
61
{
62
        address_pool_init_with_idpool(&pager_vaddr_pool,
63
                                      (struct id_pool *)
64
                                      &pager_virtual_address_id_pool,
65
                                      PAGER_EXT_VIRTUAL_START,
66
                                      PAGER_EXT_VIRTUAL_END);
67
        return 0;
68
}
69
 
70
void *l4_new_virtual(int npages)
71
{
72
        return pager_new_address(npages);
73
}
74
 
75
void *l4_del_virtual(void *virt, int npages)
76
{
77
        pager_delete_address(virt, npages);
78
        return 0;
79
}
80
 
81
/* Maps a page from a vm_file to the pager's address space */
82
void *pager_map_page(struct vm_file *f, unsigned long page_offset)
83
{
84
        int err;
85
        struct page *p;
86
 
87
        if ((err = read_file_pages(f, page_offset, page_offset + 1)) < 0)
88
                return PTR_ERR(err);
89
 
90
        if ((p = find_page(&f->vm_obj, page_offset)))
91
                return (void *)l4_map_helper((void *)page_to_phys(p), 1);
92
        else
93
                return 0;
94
}
95
 
96
/* Unmaps a page's virtual address from the pager's address space */
97
void pager_unmap_page(void *addr)
98
{
99
        l4_unmap_helper(addr, 1);
100
}
101
 
102
void *pager_new_address(int npages)
103
{
104
        return address_new(&pager_vaddr_pool, npages);
105
}
106
 
107
int pager_delete_address(void *virt_addr, int npages)
108
{
109
        return address_del(&pager_vaddr_pool, virt_addr, npages);
110
}
111
 
112
/* Maps a page from a vm_file to the pager's address space */
113
void *pager_map_pages(struct vm_file *f, unsigned long page_offset, unsigned long npages)
114
{
115
        int err;
116
        struct page *p;
117
        void *addr_start, *addr;
118
 
119
        /* Get the pages */
120
        if ((err = read_file_pages(f, page_offset, page_offset + npages)) < 0)
121
                return PTR_ERR(err);
122
 
123
        /* Get the address range */
124
        if (!(addr_start = pager_new_address(npages)))
125
                return PTR_ERR(-ENOMEM);
126
        addr = addr_start;
127
 
128
        /* Map pages contiguously one by one */
129
        for (unsigned long pfn = page_offset; pfn < page_offset + npages; pfn++) {
130
                BUG_ON(!(p = find_page(&f->vm_obj, pfn)))
131
                        l4_map((void *)page_to_phys(p), addr, 1, MAP_USR_RW, self_tid());
132
                        addr += PAGE_SIZE;
133
        }
134
 
135
        return addr_start;
136
}
137
 
138
/* Unmaps a page's virtual address from the pager's address space */
139
void pager_unmap_pages(void *addr, unsigned long npages)
140
{
141
        /* Align to page if unaligned */
142
        if (!is_page_aligned(addr))
143
                addr = (void *)page_align(addr);
144
 
145
        /* Unmap so many pages */
146
        l4_unmap_helper(addr, npages);
147
}
148
 
149
/*
150
 * Maps multiple pages on a contiguous virtual address range,
151
 * returns pointer to byte offset in the file.
152
 */
153
void *pager_map_file_range(struct vm_file *f, unsigned long byte_offset,
154
                           unsigned long size)
155
{
156
        unsigned long mapsize = (byte_offset & PAGE_MASK) + size;
157
 
158
        void *page = pager_map_pages(f, __pfn(byte_offset), __pfn(page_align_up(mapsize)));
159
 
160
        return (void *)((unsigned long)page | (PAGE_MASK & byte_offset));
161
}
162
 
163
/* FIXME: PAGE_COLOR!!! */
164
void *pager_validate_map_user_range2(struct tcb *user, void *userptr,
165
                                    unsigned long size, unsigned int vm_flags)
166
{
167
        unsigned long start = page_align(userptr);
168
        unsigned long end = page_align_up(userptr + size);
169
        unsigned long npages = __pfn(end - start);
170
        void *virt, *virt_start;
171
        void *mapped = 0;
172
 
173
        /* Validate that user task owns this address range */
174
        if (pager_validate_user_range(user, userptr, size, vm_flags) < 0)
175
                return 0;
176
 
177
        /* Get the address range */
178
        if (!(virt_start = pager_new_address(npages)))
179
                return PTR_ERR(-ENOMEM);
180
        virt = virt_start;
181
 
182
        /* Map every page contiguously in the allocated virtual address range */
183
        for (unsigned long addr = start; addr < end; addr += PAGE_SIZE) {
184
                struct page *p = task_prefault_page(user, addr, vm_flags);
185
 
186
                if (IS_ERR(p)) {
187
                        /* Unmap pages mapped so far */
188
                        l4_unmap_helper(virt_start, __pfn(addr - start));
189
 
190
                        /* Delete virtual address range */
191
                        pager_delete_address(virt_start, npages);
192
 
193
                        return p;
194
                }
195
 
196
                l4_map((void *)page_to_phys(p),
197
                       virt, 1, MAP_USR_RW, self_tid());
198
                virt += PAGE_SIZE;
199
        }
200
 
201
        /* Set the mapped pointer to offset of user pointer given */
202
        mapped = virt_start;
203
        mapped = (void *)(((unsigned long)mapped) |
204
                          ((unsigned long)(PAGE_MASK &
205
                                           (unsigned long)userptr)));
206
 
207
        /* Return the mapped pointer */
208
        return mapped;
209
}
210
 
211
 
212
/*
213
 * Find the page's offset from membank physical start,
214
 * simply add the same offset to virtual start
215
 */
216
void *phys_to_virt(void *p)
217
{
218
        unsigned long paddr = (unsigned long)p;
219
 
220
        return (void *)(paddr - membank[0].start + PAGER_VIRTUAL_START);
221
}
222
 
223
/*
224
 * Find the page's offset from virtual start, add it to membank
225
 * physical start offset
226
 */
227
void *virt_to_phys(void *v)
228
{
229
        unsigned long vaddr = (unsigned long)v;
230
 
231
        return (void *)(vaddr - PAGER_VIRTUAL_START + membank[0].start);
232
}
233
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.