1 |
2 |
drasko |
/*
|
2 |
|
|
* munmap() for unmapping a portion of an address space.
|
3 |
|
|
*
|
4 |
|
|
* Copyright (C) 2008 Bahadir Balban
|
5 |
|
|
*/
|
6 |
|
|
#include <mmap.h>
|
7 |
|
|
#include <file.h>
|
8 |
|
|
#include <l4/api/errno.h>
|
9 |
|
|
#include <l4/lib/math.h>
|
10 |
|
|
#include L4LIB_INC_ARCH(syslib.h)
|
11 |
|
|
#include <vm_area.h>
|
12 |
|
|
#include <malloc/malloc.h>
|
13 |
|
|
|
14 |
|
|
/* This splits a vma, splitter region must be in the *middle* of original vma */
|
15 |
|
|
int vma_split(struct vm_area *vma, struct tcb *task,
|
16 |
|
|
const unsigned long pfn_start, const unsigned long pfn_end)
|
17 |
|
|
{
|
18 |
|
|
struct vm_area *new;
|
19 |
|
|
unsigned long unmap_start = pfn_start, unmap_end = pfn_end;
|
20 |
|
|
int err;
|
21 |
|
|
|
22 |
|
|
/* Allocate an uninitialised vma first */
|
23 |
|
|
if (!(new = vma_new(0, 0, 0, 0)))
|
24 |
|
|
return -ENOMEM;
|
25 |
|
|
|
26 |
|
|
/*
|
27 |
|
|
* Some sanity checks to show that splitter range does end up
|
28 |
|
|
* producing two smaller vmas.
|
29 |
|
|
*/
|
30 |
|
|
BUG_ON(vma->pfn_start >= pfn_start || vma->pfn_end <= pfn_end);
|
31 |
|
|
|
32 |
|
|
/* Update new and original vmas */
|
33 |
|
|
new->pfn_end = vma->pfn_end;
|
34 |
|
|
new->pfn_start = pfn_end;
|
35 |
|
|
new->file_offset = vma->file_offset + new->pfn_start - vma->pfn_start;
|
36 |
|
|
vma->pfn_end = pfn_start;
|
37 |
|
|
new->flags = vma->flags;
|
38 |
|
|
|
39 |
|
|
/*
|
40 |
|
|
* Copy the object links of original vma to new vma. A split like this
|
41 |
|
|
* increases the map count of mapped object(s) since now 2 vmas on the
|
42 |
|
|
* same task maps the same object(s).
|
43 |
|
|
*/
|
44 |
|
|
vma_copy_links(new, vma);
|
45 |
|
|
|
46 |
|
|
/* Add new one next to original vma */
|
47 |
|
|
list_insert_tail(&new->list, &vma->list);
|
48 |
|
|
|
49 |
|
|
/* Unmap the removed portion */
|
50 |
|
|
BUG_ON((err = l4_unmap((void *)__pfn_to_addr(unmap_start),
|
51 |
|
|
unmap_end - unmap_start, task->tid)) < 0);
|
52 |
|
|
|
53 |
|
|
return 0;
|
54 |
|
|
}
|
55 |
|
|
|
56 |
|
|
/* This shrinks the vma from *one* end only, either start or end */
|
57 |
|
|
int vma_shrink(struct vm_area *vma, struct tcb *task,
|
58 |
|
|
const unsigned long pfn_start, const unsigned long pfn_end)
|
59 |
|
|
{
|
60 |
|
|
unsigned long diff, unmap_start, unmap_end;
|
61 |
|
|
int err;
|
62 |
|
|
|
63 |
|
|
/* Shrink from the end */
|
64 |
|
|
if (vma->pfn_start < pfn_start) {
|
65 |
|
|
BUG_ON(pfn_start >= vma->pfn_end);
|
66 |
|
|
unmap_start = pfn_start;
|
67 |
|
|
unmap_end = vma->pfn_end;
|
68 |
|
|
vma->pfn_end = pfn_start;
|
69 |
|
|
|
70 |
|
|
/* Shrink from the beginning */
|
71 |
|
|
} else if (vma->pfn_end > pfn_end) {
|
72 |
|
|
BUG_ON(pfn_end <= vma->pfn_start);
|
73 |
|
|
unmap_start = vma->pfn_start;
|
74 |
|
|
unmap_end = pfn_end;
|
75 |
|
|
diff = pfn_end - vma->pfn_start;
|
76 |
|
|
vma->file_offset += diff;
|
77 |
|
|
vma->pfn_start = pfn_end;
|
78 |
|
|
} else
|
79 |
|
|
BUG();
|
80 |
|
|
|
81 |
|
|
/* Unmap the shrinked portion */
|
82 |
|
|
BUG_ON((err = l4_unmap((void *)__pfn_to_addr(unmap_start),
|
83 |
|
|
unmap_end - unmap_start, task->tid)) < 0);
|
84 |
|
|
|
85 |
|
|
return 0;
|
86 |
|
|
}
|
87 |
|
|
|
88 |
|
|
/* Destroys a single vma from a task and unmaps its range from task space */
|
89 |
|
|
int vma_destroy_single(struct tcb *task, struct vm_area *vma)
|
90 |
|
|
{
|
91 |
|
|
int ret;
|
92 |
|
|
|
93 |
|
|
/* Release all object links */
|
94 |
|
|
if ((ret = vma_drop_merge_delete_all(vma)) < 0)
|
95 |
|
|
return ret;
|
96 |
|
|
|
97 |
|
|
/*
|
98 |
|
|
* Unmap the whole vma address range. Note that this
|
99 |
|
|
* may return -1 if the area was already faulted, which
|
100 |
|
|
* means the area was unmapped before being touched.
|
101 |
|
|
*/
|
102 |
|
|
l4_unmap((void *)__pfn_to_addr(vma->pfn_start),
|
103 |
|
|
vma->pfn_end - vma->pfn_start, task->tid);
|
104 |
|
|
|
105 |
|
|
/* Unlink and delete vma */
|
106 |
|
|
list_remove(&vma->list);
|
107 |
|
|
kfree(vma);
|
108 |
|
|
|
109 |
|
|
return 0;
|
110 |
|
|
}
|
111 |
|
|
|
112 |
|
|
/*
|
113 |
|
|
* Unmaps the given region from a vma. Depending on the region and vma range,
|
114 |
|
|
* this may result in either shrinking, splitting or destruction of the vma.
|
115 |
|
|
*/
|
116 |
|
|
int vma_unmap(struct vm_area *vma, struct tcb *task,
|
117 |
|
|
const unsigned long pfn_start, const unsigned long pfn_end)
|
118 |
|
|
{
|
119 |
|
|
// printf("Unmapping vma. Tid: %d, 0x%x-0x%x\n",task->tid, __pfn_to_addr(pfn_start), __pfn_to_addr(pfn_end));
|
120 |
|
|
|
121 |
|
|
/* Split needed? */
|
122 |
|
|
if (vma->pfn_start < pfn_start && vma->pfn_end > pfn_end)
|
123 |
|
|
return vma_split(vma, task, pfn_start, pfn_end);
|
124 |
|
|
/* Shrink needed? */
|
125 |
|
|
else if (((vma->pfn_start >= pfn_start) && (vma->pfn_end > pfn_end))
|
126 |
|
|
|| ((vma->pfn_start < pfn_start) && (vma->pfn_end <= pfn_end)))
|
127 |
|
|
return vma_shrink(vma, task, pfn_start, pfn_end);
|
128 |
|
|
/* Destroy needed? */
|
129 |
|
|
else if ((vma->pfn_start >= pfn_start) && (vma->pfn_end <= pfn_end))
|
130 |
|
|
return vma_destroy_single(task, vma);
|
131 |
|
|
else
|
132 |
|
|
BUG();
|
133 |
|
|
|
134 |
|
|
return 0;
|
135 |
|
|
}
|
136 |
|
|
|
137 |
|
|
/* Checks vma and vm_object type and flushes its pages accordingly */
|
138 |
|
|
int vma_flush_pages(struct vm_area *vma)
|
139 |
|
|
{
|
140 |
|
|
struct vm_object *vmo;
|
141 |
|
|
struct vm_obj_link *vmo_link;
|
142 |
|
|
int err;
|
143 |
|
|
|
144 |
|
|
/* Read-only vmas need not flush objects */
|
145 |
|
|
if (!(vma->flags & VM_WRITE))
|
146 |
|
|
return 0;
|
147 |
|
|
|
148 |
|
|
/*
|
149 |
|
|
* We just check the first object under the vma, since there
|
150 |
|
|
* could only be a single VM_SHARED file-backed object in the chain.
|
151 |
|
|
*/
|
152 |
|
|
BUG_ON(list_empty(&vma->list));
|
153 |
|
|
vmo_link = link_to_struct(vma->vm_obj_list.next, struct vm_obj_link, list);
|
154 |
|
|
vmo = vmo_link->obj;
|
155 |
|
|
|
156 |
|
|
/* Only dirty objects would need flushing */
|
157 |
|
|
if (!(vmo->flags & VM_DIRTY))
|
158 |
|
|
return 0;
|
159 |
|
|
|
160 |
|
|
/* Only vfs file objects are flushed */
|
161 |
|
|
if (vmo->flags & VM_OBJ_FILE &&
|
162 |
|
|
vmo->flags & VMA_SHARED &&
|
163 |
|
|
!(vmo->flags & VMA_ANONYMOUS)) {
|
164 |
|
|
|
165 |
|
|
/* Only vfs files ought to match above criteria */
|
166 |
|
|
BUG_ON(vm_object_to_file(vmo)->type != VM_FILE_VFS);
|
167 |
|
|
|
168 |
|
|
/* Flush the pages */
|
169 |
|
|
if ((err = flush_file_pages(vm_object_to_file(vmo))) < 0)
|
170 |
|
|
return err;
|
171 |
|
|
}
|
172 |
|
|
|
173 |
|
|
return 0;
|
174 |
|
|
}
|
175 |
|
|
|
176 |
|
|
/*
|
177 |
|
|
* Unmaps the given virtual address range from the task, the region
|
178 |
|
|
* may span into zero or more vmas, and may involve shrinking, splitting
|
179 |
|
|
* and destruction of multiple vmas.
|
180 |
|
|
*
|
181 |
|
|
* NOTE: Shared object addresses are returned back to their pools when
|
182 |
|
|
* such objects are deleted, and not via this function.
|
183 |
|
|
*/
|
184 |
|
|
int do_munmap(struct tcb *task, unsigned long vaddr, unsigned long npages)
|
185 |
|
|
{
|
186 |
|
|
const unsigned long munmap_start = __pfn(vaddr);
|
187 |
|
|
const unsigned long munmap_end = munmap_start + npages;
|
188 |
|
|
struct vm_area *vma, *n;
|
189 |
|
|
int err;
|
190 |
|
|
|
191 |
|
|
list_foreach_removable_struct(vma, n, &task->vm_area_head->list, list) {
|
192 |
|
|
/* Check for intersection */
|
193 |
|
|
if (set_intersection(munmap_start, munmap_end,
|
194 |
|
|
vma->pfn_start, vma->pfn_end)) {
|
195 |
|
|
/*
|
196 |
|
|
* Flush pages if vma is writable,
|
197 |
|
|
* dirty and file-backed.
|
198 |
|
|
*/
|
199 |
|
|
if ((err = vma_flush_pages(vma)) < 0)
|
200 |
|
|
return err;
|
201 |
|
|
|
202 |
|
|
/* Unmap the vma accordingly. This may delete the vma */
|
203 |
|
|
if ((err = vma_unmap(vma, task, munmap_start,
|
204 |
|
|
munmap_end)) < 0)
|
205 |
|
|
return err;
|
206 |
|
|
}
|
207 |
|
|
}
|
208 |
|
|
|
209 |
|
|
return 0;
|
210 |
|
|
}
|
211 |
|
|
|
212 |
|
|
int sys_munmap(struct tcb *task, void *start, unsigned long length)
|
213 |
|
|
{
|
214 |
|
|
/* Must be aligned on a page boundary */
|
215 |
|
|
if (!is_page_aligned(start))
|
216 |
|
|
return -EINVAL;
|
217 |
|
|
|
218 |
|
|
return do_munmap(task, (unsigned long)start,
|
219 |
|
|
__pfn(page_align_up(length)));
|
220 |
|
|
}
|
221 |
|
|
|
222 |
|
|
|
223 |
|
|
/* Syncs mapped area. Currently just synchronously */
|
224 |
|
|
int do_msync(struct tcb *task, void *vaddr, unsigned long npages, int flags)
|
225 |
|
|
{
|
226 |
|
|
const unsigned long msync_start = __pfn(vaddr);
|
227 |
|
|
const unsigned long msync_end = msync_start + npages;
|
228 |
|
|
struct vm_area *vma;
|
229 |
|
|
unsigned long addr = (unsigned long)vaddr;
|
230 |
|
|
int err;
|
231 |
|
|
|
232 |
|
|
/* Find a vma that overlaps with this address range */
|
233 |
|
|
while ((vma = find_vma(addr, &task->vm_area_head->list))) {
|
234 |
|
|
|
235 |
|
|
/* Flush pages if vma is writable, dirty and file-backed. */
|
236 |
|
|
if ((err = vma_flush_pages(vma)) < 0)
|
237 |
|
|
return err;
|
238 |
|
|
|
239 |
|
|
/* Update address to next vma */
|
240 |
|
|
addr = __pfn_to_addr(vma->pfn_end);
|
241 |
|
|
|
242 |
|
|
/* Are we still good to go? */
|
243 |
|
|
if (addr >= msync_end)
|
244 |
|
|
break;
|
245 |
|
|
}
|
246 |
|
|
|
247 |
|
|
return 0;
|
248 |
|
|
}
|
249 |
|
|
|
250 |
|
|
int sys_msync(struct tcb *task, void *start, unsigned long length, int flags)
|
251 |
|
|
{
|
252 |
|
|
/* Must be aligned on a page boundary */
|
253 |
|
|
if (!is_page_aligned(start))
|
254 |
|
|
return -EINVAL;
|
255 |
|
|
|
256 |
|
|
/*
|
257 |
|
|
* TODO: We need to pass sync'ed and non-sync'ed file flushes to vfs
|
258 |
|
|
* and support synced and non-synced io.
|
259 |
|
|
*/
|
260 |
|
|
return do_msync(task, start, __pfn(page_align_up(length)), flags);
|
261 |
|
|
}
|
262 |
|
|
|