OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [lguest/] [page_tables.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*P:700 The pagetable code, on the other hand, still shows the scars of
2
 * previous encounters.  It's functional, and as neat as it can be in the
3
 * circumstances, but be wary, for these things are subtle and break easily.
4
 * The Guest provides a virtual to physical mapping, but we can neither trust
5
 * it nor use it: we verify and convert it here to point the hardware to the
6
 * actual Guest pages when running the Guest. :*/
7
 
8
/* Copyright (C) Rusty Russell IBM Corporation 2006.
9
 * GPL v2 and any later version */
10
#include <linux/mm.h>
11
#include <linux/types.h>
12
#include <linux/spinlock.h>
13
#include <linux/random.h>
14
#include <linux/percpu.h>
15
#include <asm/tlbflush.h>
16
#include <asm/uaccess.h>
17
#include "lg.h"
18
 
19
/*M:008 We hold reference to pages, which prevents them from being swapped.
20
 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
21
 * to swap out.  If we had this, and a shrinker callback to trim PTE pages, we
22
 * could probably consider launching Guests as non-root. :*/
23
 
24
/*H:300
25
 * The Page Table Code
26
 *
27
 * We use two-level page tables for the Guest.  If you're not entirely
28
 * comfortable with virtual addresses, physical addresses and page tables then
29
 * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with
30
 * diagrams!).
31
 *
32
 * The Guest keeps page tables, but we maintain the actual ones here: these are
33
 * called "shadow" page tables.  Which is a very Guest-centric name: these are
34
 * the real page tables the CPU uses, although we keep them up to date to
35
 * reflect the Guest's.  (See what I mean about weird naming?  Since when do
36
 * shadows reflect anything?)
37
 *
38
 * Anyway, this is the most complicated part of the Host code.  There are seven
39
 * parts to this:
40
 *  (i) Looking up a page table entry when the Guest faults,
41
 *  (ii) Making sure the Guest stack is mapped,
42
 *  (iii) Setting up a page table entry when the Guest tells us one has changed,
43
 *  (iv) Switching page tables,
44
 *  (v) Flushing (throwing away) page tables,
45
 *  (vi) Mapping the Switcher when the Guest is about to run,
46
 *  (vii) Setting up the page tables initially.
47
 :*/
48
 
49
 
50
/* 1024 entries in a page table page maps 1024 pages: 4MB.  The Switcher is
51
 * conveniently placed at the top 4MB, so it uses a separate, complete PTE
52
 * page.  */
53
#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
54
 
55
/* We actually need a separate PTE page for each CPU.  Remember that after the
56
 * Switcher code itself comes two pages for each CPU, and we don't want this
57
 * CPU's guest to see the pages of any other CPU. */
58
static DEFINE_PER_CPU(pte_t *, switcher_pte_pages);
59
#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu)
60
 
61
/*H:320 The page table code is curly enough to need helper functions to keep it
62
 * clear and clean.
63
 *
64
 * There are two functions which return pointers to the shadow (aka "real")
65
 * page tables.
66
 *
67
 * spgd_addr() takes the virtual address and returns a pointer to the top-level
68
 * page directory entry (PGD) for that address.  Since we keep track of several
69
 * page tables, the "i" argument tells us which one we're interested in (it's
70
 * usually the current one). */
71
static pgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr)
72
{
73
        unsigned int index = pgd_index(vaddr);
74
 
75
        /* We kill any Guest trying to touch the Switcher addresses. */
76
        if (index >= SWITCHER_PGD_INDEX) {
77
                kill_guest(lg, "attempt to access switcher pages");
78
                index = 0;
79
        }
80
        /* Return a pointer index'th pgd entry for the i'th page table. */
81
        return &lg->pgdirs[i].pgdir[index];
82
}
83
 
84
/* This routine then takes the page directory entry returned above, which
85
 * contains the address of the page table entry (PTE) page.  It then returns a
86
 * pointer to the PTE entry for the given address. */
87
static pte_t *spte_addr(struct lguest *lg, pgd_t spgd, unsigned long vaddr)
88
{
89
        pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
90
        /* You should never call this if the PGD entry wasn't valid */
91
        BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
92
        return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE];
93
}
94
 
95
/* These two functions just like the above two, except they access the Guest
96
 * page tables.  Hence they return a Guest address. */
97
static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr)
98
{
99
        unsigned int index = vaddr >> (PGDIR_SHIFT);
100
        return lg->pgdirs[lg->pgdidx].gpgdir + index * sizeof(pgd_t);
101
}
102
 
103
static unsigned long gpte_addr(struct lguest *lg,
104
                               pgd_t gpgd, unsigned long vaddr)
105
{
106
        unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
107
        BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
108
        return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t);
109
}
110
 
111
/*H:350 This routine takes a page number given by the Guest and converts it to
112
 * an actual, physical page number.  It can fail for several reasons: the
113
 * virtual address might not be mapped by the Launcher, the write flag is set
114
 * and the page is read-only, or the write flag was set and the page was
115
 * shared so had to be copied, but we ran out of memory.
116
 *
117
 * This holds a reference to the page, so release_pte() is careful to
118
 * put that back. */
119
static unsigned long get_pfn(unsigned long virtpfn, int write)
120
{
121
        struct page *page;
122
        /* This value indicates failure. */
123
        unsigned long ret = -1UL;
124
 
125
        /* get_user_pages() is a complex interface: it gets the "struct
126
         * vm_area_struct" and "struct page" assocated with a range of pages.
127
         * It also needs the task's mmap_sem held, and is not very quick.
128
         * It returns the number of pages it got. */
129
        down_read(&current->mm->mmap_sem);
130
        if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT,
131
                           1, write, 1, &page, NULL) == 1)
132
                ret = page_to_pfn(page);
133
        up_read(&current->mm->mmap_sem);
134
        return ret;
135
}
136
 
137
/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table
138
 * entry can be a little tricky.  The flags are (almost) the same, but the
139
 * Guest PTE contains a virtual page number: the CPU needs the real page
140
 * number. */
141
static pte_t gpte_to_spte(struct lguest *lg, pte_t gpte, int write)
142
{
143
        unsigned long pfn, base, flags;
144
 
145
        /* The Guest sets the global flag, because it thinks that it is using
146
         * PGE.  We only told it to use PGE so it would tell us whether it was
147
         * flushing a kernel mapping or a userspace mapping.  We don't actually
148
         * use the global bit, so throw it away. */
149
        flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
150
 
151
        /* The Guest's pages are offset inside the Launcher. */
152
        base = (unsigned long)lg->mem_base / PAGE_SIZE;
153
 
154
        /* We need a temporary "unsigned long" variable to hold the answer from
155
         * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
156
         * fit in spte.pfn.  get_pfn() finds the real physical number of the
157
         * page, given the virtual number. */
158
        pfn = get_pfn(base + pte_pfn(gpte), write);
159
        if (pfn == -1UL) {
160
                kill_guest(lg, "failed to get page %lu", pte_pfn(gpte));
161
                /* When we destroy the Guest, we'll go through the shadow page
162
                 * tables and release_pte() them.  Make sure we don't think
163
                 * this one is valid! */
164
                flags = 0;
165
        }
166
        /* Now we assemble our shadow PTE from the page number and flags. */
167
        return pfn_pte(pfn, __pgprot(flags));
168
}
169
 
170
/*H:460 And to complete the chain, release_pte() looks like this: */
171
static void release_pte(pte_t pte)
172
{
173
        /* Remember that get_user_pages() took a reference to the page, in
174
         * get_pfn()?  We have to put it back now. */
175
        if (pte_flags(pte) & _PAGE_PRESENT)
176
                put_page(pfn_to_page(pte_pfn(pte)));
177
}
178
/*:*/
179
 
180
static void check_gpte(struct lguest *lg, pte_t gpte)
181
{
182
        if ((pte_flags(gpte) & (_PAGE_PWT|_PAGE_PSE))
183
            || pte_pfn(gpte) >= lg->pfn_limit)
184
                kill_guest(lg, "bad page table entry");
185
}
186
 
187
static void check_gpgd(struct lguest *lg, pgd_t gpgd)
188
{
189
        if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || pgd_pfn(gpgd) >= lg->pfn_limit)
190
                kill_guest(lg, "bad page directory entry");
191
}
192
 
193
/*H:330
194
 * (i) Looking up a page table entry when the Guest faults.
195
 *
196
 * We saw this call in run_guest(): when we see a page fault in the Guest, we
197
 * come here.  That's because we only set up the shadow page tables lazily as
198
 * they're needed, so we get page faults all the time and quietly fix them up
199
 * and return to the Guest without it knowing.
200
 *
201
 * If we fixed up the fault (ie. we mapped the address), this routine returns
202
 * true.  Otherwise, it was a real fault and we need to tell the Guest. */
203
int demand_page(struct lguest *lg, unsigned long vaddr, int errcode)
204
{
205
        pgd_t gpgd;
206
        pgd_t *spgd;
207
        unsigned long gpte_ptr;
208
        pte_t gpte;
209
        pte_t *spte;
210
 
211
        /* First step: get the top-level Guest page table entry. */
212
        gpgd = lgread(lg, gpgd_addr(lg, vaddr), pgd_t);
213
        /* Toplevel not present?  We can't map it in. */
214
        if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
215
                return 0;
216
 
217
        /* Now look at the matching shadow entry. */
218
        spgd = spgd_addr(lg, lg->pgdidx, vaddr);
219
        if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
220
                /* No shadow entry: allocate a new shadow PTE page. */
221
                unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
222
                /* This is not really the Guest's fault, but killing it is
223
                 * simple for this corner case. */
224
                if (!ptepage) {
225
                        kill_guest(lg, "out of memory allocating pte page");
226
                        return 0;
227
                }
228
                /* We check that the Guest pgd is OK. */
229
                check_gpgd(lg, gpgd);
230
                /* And we copy the flags to the shadow PGD entry.  The page
231
                 * number in the shadow PGD is the page we just allocated. */
232
                *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd));
233
        }
234
 
235
        /* OK, now we look at the lower level in the Guest page table: keep its
236
         * address, because we might update it later. */
237
        gpte_ptr = gpte_addr(lg, gpgd, vaddr);
238
        gpte = lgread(lg, gpte_ptr, pte_t);
239
 
240
        /* If this page isn't in the Guest page tables, we can't page it in. */
241
        if (!(pte_flags(gpte) & _PAGE_PRESENT))
242
                return 0;
243
 
244
        /* Check they're not trying to write to a page the Guest wants
245
         * read-only (bit 2 of errcode == write). */
246
        if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
247
                return 0;
248
 
249
        /* User access to a kernel-only page? (bit 3 == user access) */
250
        if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
251
                return 0;
252
 
253
        /* Check that the Guest PTE flags are OK, and the page number is below
254
         * the pfn_limit (ie. not mapping the Launcher binary). */
255
        check_gpte(lg, gpte);
256
 
257
        /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
258
        gpte = pte_mkyoung(gpte);
259
        if (errcode & 2)
260
                gpte = pte_mkdirty(gpte);
261
 
262
        /* Get the pointer to the shadow PTE entry we're going to set. */
263
        spte = spte_addr(lg, *spgd, vaddr);
264
        /* If there was a valid shadow PTE entry here before, we release it.
265
         * This can happen with a write to a previously read-only entry. */
266
        release_pte(*spte);
267
 
268
        /* If this is a write, we insist that the Guest page is writable (the
269
         * final arg to gpte_to_spte()). */
270
        if (pte_dirty(gpte))
271
                *spte = gpte_to_spte(lg, gpte, 1);
272
        else
273
                /* If this is a read, don't set the "writable" bit in the page
274
                 * table entry, even if the Guest says it's writable.  That way
275
                 * we will come back here when a write does actually occur, so
276
                 * we can update the Guest's _PAGE_DIRTY flag. */
277
                *spte = gpte_to_spte(lg, pte_wrprotect(gpte), 0);
278
 
279
        /* Finally, we write the Guest PTE entry back: we've set the
280
         * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */
281
        lgwrite(lg, gpte_ptr, pte_t, gpte);
282
 
283
        /* The fault is fixed, the page table is populated, the mapping
284
         * manipulated, the result returned and the code complete.  A small
285
         * delay and a trace of alliteration are the only indications the Guest
286
         * has that a page fault occurred at all. */
287
        return 1;
288
}
289
 
290
/*H:360
291
 * (ii) Making sure the Guest stack is mapped.
292
 *
293
 * Remember that direct traps into the Guest need a mapped Guest kernel stack.
294
 * pin_stack_pages() calls us here: we could simply call demand_page(), but as
295
 * we've seen that logic is quite long, and usually the stack pages are already
296
 * mapped, so it's overkill.
297
 *
298
 * This is a quick version which answers the question: is this virtual address
299
 * mapped by the shadow page tables, and is it writable? */
300
static int page_writable(struct lguest *lg, unsigned long vaddr)
301
{
302
        pgd_t *spgd;
303
        unsigned long flags;
304
 
305
        /* Look at the current top level entry: is it present? */
306
        spgd = spgd_addr(lg, lg->pgdidx, vaddr);
307
        if (!(pgd_flags(*spgd) & _PAGE_PRESENT))
308
                return 0;
309
 
310
        /* Check the flags on the pte entry itself: it must be present and
311
         * writable. */
312
        flags = pte_flags(*(spte_addr(lg, *spgd, vaddr)));
313
 
314
        return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
315
}
316
 
317
/* So, when pin_stack_pages() asks us to pin a page, we check if it's already
318
 * in the page tables, and if not, we call demand_page() with error code 2
319
 * (meaning "write"). */
320
void pin_page(struct lguest *lg, unsigned long vaddr)
321
{
322
        if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2))
323
                kill_guest(lg, "bad stack page %#lx", vaddr);
324
}
325
 
326
/*H:450 If we chase down the release_pgd() code, it looks like this: */
327
static void release_pgd(struct lguest *lg, pgd_t *spgd)
328
{
329
        /* If the entry's not present, there's nothing to release. */
330
        if (pgd_flags(*spgd) & _PAGE_PRESENT) {
331
                unsigned int i;
332
                /* Converting the pfn to find the actual PTE page is easy: turn
333
                 * the page number into a physical address, then convert to a
334
                 * virtual address (easy for kernel pages like this one). */
335
                pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
336
                /* For each entry in the page, we might need to release it. */
337
                for (i = 0; i < PTRS_PER_PTE; i++)
338
                        release_pte(ptepage[i]);
339
                /* Now we can free the page of PTEs */
340
                free_page((long)ptepage);
341
                /* And zero out the PGD entry so we never release it twice. */
342
                *spgd = __pgd(0);
343
        }
344
}
345
 
346
/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings()
347
 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
348
 * It simply releases every PTE page from 0 up to the Guest's kernel address. */
349
static void flush_user_mappings(struct lguest *lg, int idx)
350
{
351
        unsigned int i;
352
        /* Release every pgd entry up to the kernel's address. */
353
        for (i = 0; i < pgd_index(lg->kernel_address); i++)
354
                release_pgd(lg, lg->pgdirs[idx].pgdir + i);
355
}
356
 
357
/*H:440 (v) Flushing (throwing away) page tables,
358
 *
359
 * The Guest has a hypercall to throw away the page tables: it's used when a
360
 * large number of mappings have been changed. */
361
void guest_pagetable_flush_user(struct lguest *lg)
362
{
363
        /* Drop the userspace part of the current page table. */
364
        flush_user_mappings(lg, lg->pgdidx);
365
}
366
/*:*/
367
 
368
/* We walk down the guest page tables to get a guest-physical address */
369
unsigned long guest_pa(struct lguest *lg, unsigned long vaddr)
370
{
371
        pgd_t gpgd;
372
        pte_t gpte;
373
 
374
        /* First step: get the top-level Guest page table entry. */
375
        gpgd = lgread(lg, gpgd_addr(lg, vaddr), pgd_t);
376
        /* Toplevel not present?  We can't map it in. */
377
        if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
378
                kill_guest(lg, "Bad address %#lx", vaddr);
379
 
380
        gpte = lgread(lg, gpte_addr(lg, gpgd, vaddr), pte_t);
381
        if (!(pte_flags(gpte) & _PAGE_PRESENT))
382
                kill_guest(lg, "Bad address %#lx", vaddr);
383
 
384
        return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
385
}
386
 
387
/* We keep several page tables.  This is a simple routine to find the page
388
 * table (if any) corresponding to this top-level address the Guest has given
389
 * us. */
390
static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
391
{
392
        unsigned int i;
393
        for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
394
                if (lg->pgdirs[i].gpgdir == pgtable)
395
                        break;
396
        return i;
397
}
398
 
399
/*H:435 And this is us, creating the new page directory.  If we really do
400
 * allocate a new one (and so the kernel parts are not there), we set
401
 * blank_pgdir. */
402
static unsigned int new_pgdir(struct lguest *lg,
403
                              unsigned long gpgdir,
404
                              int *blank_pgdir)
405
{
406
        unsigned int next;
407
 
408
        /* We pick one entry at random to throw out.  Choosing the Least
409
         * Recently Used might be better, but this is easy. */
410
        next = random32() % ARRAY_SIZE(lg->pgdirs);
411
        /* If it's never been allocated at all before, try now. */
412
        if (!lg->pgdirs[next].pgdir) {
413
                lg->pgdirs[next].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
414
                /* If the allocation fails, just keep using the one we have */
415
                if (!lg->pgdirs[next].pgdir)
416
                        next = lg->pgdidx;
417
                else
418
                        /* This is a blank page, so there are no kernel
419
                         * mappings: caller must map the stack! */
420
                        *blank_pgdir = 1;
421
        }
422
        /* Record which Guest toplevel this shadows. */
423
        lg->pgdirs[next].gpgdir = gpgdir;
424
        /* Release all the non-kernel mappings. */
425
        flush_user_mappings(lg, next);
426
 
427
        return next;
428
}
429
 
430
/*H:430 (iv) Switching page tables
431
 *
432
 * Now we've seen all the page table setting and manipulation, let's see what
433
 * what happens when the Guest changes page tables (ie. changes the top-level
434
 * pgdir).  This occurs on almost every context switch. */
435
void guest_new_pagetable(struct lguest *lg, unsigned long pgtable)
436
{
437
        int newpgdir, repin = 0;
438
 
439
        /* Look to see if we have this one already. */
440
        newpgdir = find_pgdir(lg, pgtable);
441
        /* If not, we allocate or mug an existing one: if it's a fresh one,
442
         * repin gets set to 1. */
443
        if (newpgdir == ARRAY_SIZE(lg->pgdirs))
444
                newpgdir = new_pgdir(lg, pgtable, &repin);
445
        /* Change the current pgd index to the new one. */
446
        lg->pgdidx = newpgdir;
447
        /* If it was completely blank, we map in the Guest kernel stack */
448
        if (repin)
449
                pin_stack_pages(lg);
450
}
451
 
452
/*H:470 Finally, a routine which throws away everything: all PGD entries in all
453
 * the shadow page tables, including the Guest's kernel mappings.  This is used
454
 * when we destroy the Guest. */
455
static void release_all_pagetables(struct lguest *lg)
456
{
457
        unsigned int i, j;
458
 
459
        /* Every shadow pagetable this Guest has */
460
        for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
461
                if (lg->pgdirs[i].pgdir)
462
                        /* Every PGD entry except the Switcher at the top */
463
                        for (j = 0; j < SWITCHER_PGD_INDEX; j++)
464
                                release_pgd(lg, lg->pgdirs[i].pgdir + j);
465
}
466
 
467
/* We also throw away everything when a Guest tells us it's changed a kernel
468
 * mapping.  Since kernel mappings are in every page table, it's easiest to
469
 * throw them all away.  This traps the Guest in amber for a while as
470
 * everything faults back in, but it's rare. */
471
void guest_pagetable_clear_all(struct lguest *lg)
472
{
473
        release_all_pagetables(lg);
474
        /* We need the Guest kernel stack mapped again. */
475
        pin_stack_pages(lg);
476
}
477
/*:*/
478
/*M:009 Since we throw away all mappings when a kernel mapping changes, our
479
 * performance sucks for guests using highmem.  In fact, a guest with
480
 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
481
 * usually slower than a Guest with less memory.
482
 *
483
 * This, of course, cannot be fixed.  It would take some kind of... well, I
484
 * don't know, but the term "puissant code-fu" comes to mind. :*/
485
 
486
/*H:420 This is the routine which actually sets the page table entry for then
487
 * "idx"'th shadow page table.
488
 *
489
 * Normally, we can just throw out the old entry and replace it with 0: if they
490
 * use it demand_page() will put the new entry in.  We need to do this anyway:
491
 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
492
 * is read from, and _PAGE_DIRTY when it's written to.
493
 *
494
 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
495
 * these bits on PTEs immediately anyway.  This is done to save the CPU from
496
 * having to update them, but it helps us the same way: if they set
497
 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
498
 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
499
 */
500
static void do_set_pte(struct lguest *lg, int idx,
501
                       unsigned long vaddr, pte_t gpte)
502
{
503
        /* Look up the matching shadow page directory entry. */
504
        pgd_t *spgd = spgd_addr(lg, idx, vaddr);
505
 
506
        /* If the top level isn't present, there's no entry to update. */
507
        if (pgd_flags(*spgd) & _PAGE_PRESENT) {
508
                /* Otherwise, we start by releasing the existing entry. */
509
                pte_t *spte = spte_addr(lg, *spgd, vaddr);
510
                release_pte(*spte);
511
 
512
                /* If they're setting this entry as dirty or accessed, we might
513
                 * as well put that entry they've given us in now.  This shaves
514
                 * 10% off a copy-on-write micro-benchmark. */
515
                if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
516
                        check_gpte(lg, gpte);
517
                        *spte = gpte_to_spte(lg, gpte,
518
                                             pte_flags(gpte) & _PAGE_DIRTY);
519
                } else
520
                        /* Otherwise kill it and we can demand_page() it in
521
                         * later. */
522
                        *spte = __pte(0);
523
        }
524
}
525
 
526
/*H:410 Updating a PTE entry is a little trickier.
527
 *
528
 * We keep track of several different page tables (the Guest uses one for each
529
 * process, so it makes sense to cache at least a few).  Each of these have
530
 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
531
 * all processes.  So when the page table above that address changes, we update
532
 * all the page tables, not just the current one.  This is rare.
533
 *
534
 * The benefit is that when we have to track a new page table, we can copy keep
535
 * all the kernel mappings.  This speeds up context switch immensely. */
536
void guest_set_pte(struct lguest *lg,
537
                   unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
538
{
539
        /* Kernel mappings must be changed on all top levels.  Slow, but
540
         * doesn't happen often. */
541
        if (vaddr >= lg->kernel_address) {
542
                unsigned int i;
543
                for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
544
                        if (lg->pgdirs[i].pgdir)
545
                                do_set_pte(lg, i, vaddr, gpte);
546
        } else {
547
                /* Is this page table one we have a shadow for? */
548
                int pgdir = find_pgdir(lg, gpgdir);
549
                if (pgdir != ARRAY_SIZE(lg->pgdirs))
550
                        /* If so, do the update. */
551
                        do_set_pte(lg, pgdir, vaddr, gpte);
552
        }
553
}
554
 
555
/*H:400
556
 * (iii) Setting up a page table entry when the Guest tells us one has changed.
557
 *
558
 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
559
 * with the other side of page tables while we're here: what happens when the
560
 * Guest asks for a page table to be updated?
561
 *
562
 * We already saw that demand_page() will fill in the shadow page tables when
563
 * needed, so we can simply remove shadow page table entries whenever the Guest
564
 * tells us they've changed.  When the Guest tries to use the new entry it will
565
 * fault and demand_page() will fix it up.
566
 *
567
 * So with that in mind here's our code to to update a (top-level) PGD entry:
568
 */
569
void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx)
570
{
571
        int pgdir;
572
 
573
        /* The kernel seems to try to initialize this early on: we ignore its
574
         * attempts to map over the Switcher. */
575
        if (idx >= SWITCHER_PGD_INDEX)
576
                return;
577
 
578
        /* If they're talking about a page table we have a shadow for... */
579
        pgdir = find_pgdir(lg, gpgdir);
580
        if (pgdir < ARRAY_SIZE(lg->pgdirs))
581
                /* ... throw it away. */
582
                release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx);
583
}
584
 
585
/*H:500 (vii) Setting up the page tables initially.
586
 *
587
 * When a Guest is first created, the Launcher tells us where the toplevel of
588
 * its first page table is.  We set some things up here: */
589
int init_guest_pagetable(struct lguest *lg, unsigned long pgtable)
590
{
591
        /* We start on the first shadow page table, and give it a blank PGD
592
         * page. */
593
        lg->pgdidx = 0;
594
        lg->pgdirs[lg->pgdidx].gpgdir = pgtable;
595
        lg->pgdirs[lg->pgdidx].pgdir = (pgd_t*)get_zeroed_page(GFP_KERNEL);
596
        if (!lg->pgdirs[lg->pgdidx].pgdir)
597
                return -ENOMEM;
598
        return 0;
599
}
600
 
601
/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
602
void page_table_guest_data_init(struct lguest *lg)
603
{
604
        /* We get the kernel address: above this is all kernel memory. */
605
        if (get_user(lg->kernel_address, &lg->lguest_data->kernel_address)
606
            /* We tell the Guest that it can't use the top 4MB of virtual
607
             * addresses used by the Switcher. */
608
            || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)
609
            || put_user(lg->pgdirs[lg->pgdidx].gpgdir,&lg->lguest_data->pgdir))
610
                kill_guest(lg, "bad guest page %p", lg->lguest_data);
611
 
612
        /* In flush_user_mappings() we loop from 0 to
613
         * "pgd_index(lg->kernel_address)".  This assumes it won't hit the
614
         * Switcher mappings, so check that now. */
615
        if (pgd_index(lg->kernel_address) >= SWITCHER_PGD_INDEX)
616
                kill_guest(lg, "bad kernel address %#lx", lg->kernel_address);
617
}
618
 
619
/* When a Guest dies, our cleanup is fairly simple. */
620
void free_guest_pagetable(struct lguest *lg)
621
{
622
        unsigned int i;
623
 
624
        /* Throw away all page table pages. */
625
        release_all_pagetables(lg);
626
        /* Now free the top levels: free_page() can handle 0 just fine. */
627
        for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
628
                free_page((long)lg->pgdirs[i].pgdir);
629
}
630
 
631
/*H:480 (vi) Mapping the Switcher when the Guest is about to run.
632
 *
633
 * The Switcher and the two pages for this CPU need to be visible in the
634
 * Guest (and not the pages for other CPUs).  We have the appropriate PTE pages
635
 * for each CPU already set up, we just need to hook them in now we know which
636
 * Guest is about to run on this CPU. */
637
void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages)
638
{
639
        pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
640
        pgd_t switcher_pgd;
641
        pte_t regs_pte;
642
 
643
        /* Make the last PGD entry for this Guest point to the Switcher's PTE
644
         * page for this CPU (with appropriate flags). */
645
        switcher_pgd = __pgd(__pa(switcher_pte_page) | _PAGE_KERNEL);
646
 
647
        lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd;
648
 
649
        /* We also change the Switcher PTE page.  When we're running the Guest,
650
         * we want the Guest's "regs" page to appear where the first Switcher
651
         * page for this CPU is.  This is an optimization: when the Switcher
652
         * saves the Guest registers, it saves them into the first page of this
653
         * CPU's "struct lguest_pages": if we make sure the Guest's register
654
         * page is already mapped there, we don't have to copy them out
655
         * again. */
656
        regs_pte = pfn_pte (__pa(lg->regs_page) >> PAGE_SHIFT, __pgprot(_PAGE_KERNEL));
657
        switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte;
658
}
659
/*:*/
660
 
661
static void free_switcher_pte_pages(void)
662
{
663
        unsigned int i;
664
 
665
        for_each_possible_cpu(i)
666
                free_page((long)switcher_pte_page(i));
667
}
668
 
669
/*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given
670
 * the CPU number and the "struct page"s for the Switcher code itself.
671
 *
672
 * Currently the Switcher is less than a page long, so "pages" is always 1. */
673
static __init void populate_switcher_pte_page(unsigned int cpu,
674
                                              struct page *switcher_page[],
675
                                              unsigned int pages)
676
{
677
        unsigned int i;
678
        pte_t *pte = switcher_pte_page(cpu);
679
 
680
        /* The first entries are easy: they map the Switcher code. */
681
        for (i = 0; i < pages; i++) {
682
                pte[i] = mk_pte(switcher_page[i],
683
                                __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
684
        }
685
 
686
        /* The only other thing we map is this CPU's pair of pages. */
687
        i = pages + cpu*2;
688
 
689
        /* First page (Guest registers) is writable from the Guest */
690
        pte[i] = pfn_pte(page_to_pfn(switcher_page[i]),
691
                         __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW));
692
 
693
        /* The second page contains the "struct lguest_ro_state", and is
694
         * read-only. */
695
        pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]),
696
                           __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED));
697
}
698
 
699
/* We've made it through the page table code.  Perhaps our tired brains are
700
 * still processing the details, or perhaps we're simply glad it's over.
701
 *
702
 * If nothing else, note that all this complexity in juggling shadow page
703
 * tables in sync with the Guest's page tables is for one reason: for most
704
 * Guests this page table dance determines how bad performance will be.  This
705
 * is why Xen uses exotic direct Guest pagetable manipulation, and why both
706
 * Intel and AMD have implemented shadow page table support directly into
707
 * hardware.
708
 *
709
 * There is just one file remaining in the Host. */
710
 
711
/*H:510 At boot or module load time, init_pagetables() allocates and populates
712
 * the Switcher PTE page for each CPU. */
713
__init int init_pagetables(struct page **switcher_page, unsigned int pages)
714
{
715
        unsigned int i;
716
 
717
        for_each_possible_cpu(i) {
718
                switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL);
719
                if (!switcher_pte_page(i)) {
720
                        free_switcher_pte_pages();
721
                        return -ENOMEM;
722
                }
723
                populate_switcher_pte_page(i, switcher_page, pages);
724
        }
725
        return 0;
726
}
727
/*:*/
728
 
729
/* Cleaning up simply involves freeing the PTE page for each CPU. */
730
void free_pagetables(void)
731
{
732
        free_switcher_pte_pages();
733
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.