OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-ia64/] [pgalloc.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef _ASM_IA64_PGALLOC_H
2
#define _ASM_IA64_PGALLOC_H
3
 
4
/*
5
 * This file contains the functions and defines necessary to allocate
6
 * page tables.
7
 *
8
 * This hopefully works with any (fixed) ia-64 page-size, as defined
9
 * in <asm/page.h> (currently 8192).
10
 *
11
 * Copyright (C) 1998-2002 Hewlett-Packard Co
12
 *      David Mosberger-Tang <davidm@hpl.hp.com>
13
 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
14
 */
15
 
16
#include <linux/config.h>
17
 
18
#include <linux/compiler.h>
19
#include <linux/mm.h>
20
#include <linux/threads.h>
21
 
22
#include <asm/mmu_context.h>
23
#include <asm/processor.h>
24
 
25
/*
26
 * Very stupidly, we used to get new pgd's and pmd's, init their contents
27
 * to point to the NULL versions of the next level page table, later on
28
 * completely re-init them the same way, then free them up.  This wasted
29
 * a lot of work and caused unnecessary memory traffic.  How broken...
30
 * We fix this by caching them.
31
 */
32
#define pgd_quicklist           (local_cpu_data->pgd_quick)
33
#define pmd_quicklist           (local_cpu_data->pmd_quick)
34
#define pte_quicklist           (local_cpu_data->pte_quick)
35
#define pgtable_cache_size      (local_cpu_data->pgtable_cache_sz)
36
 
37
static inline pgd_t*
38
pgd_alloc_one_fast (struct mm_struct *mm)
39
{
40
        unsigned long *ret = pgd_quicklist;
41
 
42
        if (__builtin_expect(ret != NULL, 1)) {
43
                pgd_quicklist = (unsigned long *)(*ret);
44
                ret[0] = 0;
45
                --pgtable_cache_size;
46
        } else
47
                ret = NULL;
48
        return (pgd_t *) ret;
49
}
50
 
51
static inline pgd_t*
52
pgd_alloc (struct mm_struct *mm)
53
{
54
        /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
55
        pgd_t *pgd = pgd_alloc_one_fast(mm);
56
 
57
        if (__builtin_expect(pgd == NULL, 0)) {
58
                pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
59
                if (__builtin_expect(pgd != NULL, 1))
60
                        clear_page(pgd);
61
        }
62
        return pgd;
63
}
64
 
65
static inline void
66
pgd_free (pgd_t *pgd)
67
{
68
        *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
69
        pgd_quicklist = (unsigned long *) pgd;
70
        ++pgtable_cache_size;
71
}
72
 
73
static inline void
74
pgd_populate (struct mm_struct *mm, pgd_t *pgd_entry, pmd_t *pmd)
75
{
76
        pgd_val(*pgd_entry) = __pa(pmd);
77
}
78
 
79
 
80
static inline pmd_t*
81
pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
82
{
83
        unsigned long *ret = (unsigned long *)pmd_quicklist;
84
 
85
        if (__builtin_expect(ret != NULL, 1)) {
86
                pmd_quicklist = (unsigned long *)(*ret);
87
                ret[0] = 0;
88
                --pgtable_cache_size;
89
        }
90
        return (pmd_t *)ret;
91
}
92
 
93
static inline pmd_t*
94
pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
95
{
96
        pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
97
 
98
        if (__builtin_expect(pmd != NULL, 1))
99
                clear_page(pmd);
100
        return pmd;
101
}
102
 
103
static inline void
104
pmd_free (pmd_t *pmd)
105
{
106
        *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
107
        pmd_quicklist = (unsigned long *) pmd;
108
        ++pgtable_cache_size;
109
}
110
 
111
static inline void
112
pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
113
{
114
        pmd_val(*pmd_entry) = __pa(pte);
115
}
116
 
117
static inline pte_t*
118
pte_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
119
{
120
        unsigned long *ret = (unsigned long *)pte_quicklist;
121
 
122
        if (__builtin_expect(ret != NULL, 1)) {
123
                pte_quicklist = (unsigned long *)(*ret);
124
                ret[0] = 0;
125
                --pgtable_cache_size;
126
        }
127
        return (pte_t *)ret;
128
}
129
 
130
 
131
static inline pte_t*
132
pte_alloc_one (struct mm_struct *mm, unsigned long addr)
133
{
134
        pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
135
 
136
        if (__builtin_expect(pte != NULL, 1))
137
                clear_page(pte);
138
        return pte;
139
}
140
 
141
static inline void
142
pte_free (pte_t *pte)
143
{
144
        *(unsigned long *)pte = (unsigned long) pte_quicklist;
145
        pte_quicklist = (unsigned long *) pte;
146
        ++pgtable_cache_size;
147
}
148
 
149
extern int do_check_pgt_cache (int, int);
150
 
151
/*
152
 * Now for some TLB flushing routines.  This is the kind of stuff that
153
 * can be very expensive, so try to avoid them whenever possible.
154
 */
155
 
156
/*
157
 * Flush everything (kernel mapping may also have changed due to
158
 * vmalloc/vfree).
159
 */
160
extern void local_flush_tlb_all (void);
161
 
162
#ifdef CONFIG_SMP
163
  extern void smp_flush_tlb_all (void);
164
  extern void smp_flush_tlb_mm (struct mm_struct *mm);
165
# define flush_tlb_all()        smp_flush_tlb_all()
166
#else
167
# define flush_tlb_all()        local_flush_tlb_all()
168
#endif
169
 
170
static inline void
171
local_flush_tlb_mm (struct mm_struct *mm)
172
{
173
        if (mm == current->active_mm)
174
                activate_context(mm);
175
}
176
 
177
/*
178
 * Flush a specified user mapping.  This is called, e.g., as a result of fork() and
179
 * exit().  fork() ends up here because the copy-on-write mechanism needs to write-protect
180
 * the PTEs of the parent task.
181
 */
182
static inline void
183
flush_tlb_mm (struct mm_struct *mm)
184
{
185
        if (!mm)
186
                return;
187
 
188
        mm->context = 0;
189
 
190
        if (atomic_read(&mm->mm_users) == 0)
191
                return;         /* happens as a result of exit_mmap() */
192
 
193
#ifdef CONFIG_SMP
194
        smp_flush_tlb_mm(mm);
195
#else
196
        local_flush_tlb_mm(mm);
197
#endif
198
}
199
 
200
extern void flush_tlb_range (struct mm_struct *mm, unsigned long start, unsigned long end);
201
 
202
/*
203
 * Page-granular tlb flush.
204
 */
205
static inline void
206
flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
207
{
208
#ifdef CONFIG_SMP
209
        flush_tlb_range(vma->vm_mm, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
210
#else
211
        if (vma->vm_mm == current->active_mm)
212
                asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
213
        else
214
                vma->vm_mm->context = 0;
215
#endif
216
}
217
 
218
/*
219
 * Flush the TLB entries mapping the virtually mapped linear page
220
 * table corresponding to address range [START-END).
221
 */
222
static inline void
223
flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
224
{
225
        if (unlikely(end - start >= 1024*1024*1024*1024UL
226
                     || rgn_index(start) != rgn_index(end - 1)))
227
                /*
228
                 * This condition is very rare and normal applications shouldn't get
229
                 * here. No attempt has been made to optimize for this case.
230
                 */
231
                flush_tlb_all();
232
        else
233
                flush_tlb_range(mm, ia64_thash(start), ia64_thash(end));
234
}
235
 
236
/*
237
 * Cache flushing routines.  This is the kind of stuff that can be very expensive, so try
238
 * to avoid them whenever possible.
239
 */
240
 
241
#define flush_cache_all()                       do { } while (0)
242
#define flush_cache_mm(mm)                      do { } while (0)
243
#define flush_cache_range(mm, start, end)       do { } while (0)
244
#define flush_cache_page(vma, vmaddr)           do { } while (0)
245
#define flush_page_to_ram(page)                 do { } while (0)
246
#define flush_icache_page(vma,page)             do { } while (0)
247
 
248
#define flush_dcache_page(page)                 \
249
do {                                            \
250
        clear_bit(PG_arch_1, &(page)->flags);   \
251
} while (0)
252
 
253
extern void flush_icache_range (unsigned long start, unsigned long end);
254
 
255
#define flush_icache_user_range(vma, page, user_addr, len)                                      \
256
do {                                                                                            \
257
        unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK);  \
258
        flush_icache_range(_addr, _addr + (len));                                               \
259
} while (0)
260
 
261
static inline void
262
clear_user_page (void *addr, unsigned long vaddr, struct page *page)
263
{
264
        clear_page(addr);
265
        flush_dcache_page(page);
266
}
267
 
268
static inline void
269
copy_user_page (void *to, void *from, unsigned long vaddr, struct page *page)
270
{
271
        copy_page(to, from);
272
        flush_dcache_page(page);
273
}
274
 
275
/*
276
 * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
277
 * information.  However, we use this macro to take care of any (delayed) i-cache flushing
278
 * that may be necessary.
279
 */
280
static inline void
281
update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
282
{
283
        unsigned long addr;
284
        struct page *page;
285
 
286
        if (!pte_exec(pte))
287
                return;                         /* not an executable page... */
288
 
289
        page = pte_page(pte);
290
        /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
291
        addr = (unsigned long) page_address(page);
292
 
293
        if (test_bit(PG_arch_1, &page->flags))
294
                return;                         /* i-cache is already coherent with d-cache */
295
 
296
        flush_icache_range(addr, addr + PAGE_SIZE);
297
        set_bit(PG_arch_1, &page->flags);       /* mark page as clean */
298
}
299
 
300
#endif /* _ASM_IA64_PGALLOC_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.