OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-s390/] [pgalloc.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1276 phoenix
/*
2
 *  include/asm-s390/pgalloc.h
3
 *
4
 *  S390 version
5
 *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6
 *    Author(s): Hartmut Penner (hp@de.ibm.com)
7
 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8
 *
9
 *  Derived from "include/asm-i386/pgalloc.h"
10
 *    Copyright (C) 1994  Linus Torvalds
11
 */
12
 
13
#ifndef _S390_PGALLOC_H
14
#define _S390_PGALLOC_H
15
 
16
#include <linux/config.h>
17
#include <asm/processor.h>
18
#include <linux/threads.h>
19
 
20
#define pgd_quicklist (S390_lowcore.cpu_data.pgd_quick)
21
#define pmd_quicklist ((unsigned long *)0)
22
#define pte_quicklist (S390_lowcore.cpu_data.pte_quick)
23
#define pgtable_cache_size (S390_lowcore.cpu_data.pgtable_cache_sz)
24
 
25
extern void diag10(unsigned long addr);
26
 
27
/*
28
 * Allocate and free page tables. The xxx_kernel() versions are
29
 * used to allocate a kernel page table - this turns on ASN bits
30
 * if any.
31
 */
32
 
33
extern __inline__ pgd_t* get_pgd_slow(void)
34
{
35
        pgd_t *ret;
36
        int i;
37
 
38
        ret = (pgd_t *) __get_free_pages(GFP_KERNEL,1);
39
        if (ret != NULL)
40
                for (i = 0; i < USER_PTRS_PER_PGD; i++)
41
                        pmd_clear(pmd_offset(ret + i, i*PGDIR_SIZE));
42
        return ret;
43
}
44
 
45
extern __inline__ pgd_t* get_pgd_fast(void)
46
{
47
        unsigned long *ret = pgd_quicklist;
48
 
49
        if (ret != NULL) {
50
                pgd_quicklist = (unsigned long *)(*ret);
51
                ret[0] = ret[1];
52
                pgtable_cache_size -= 2;
53
        }
54
        return (pgd_t *)ret;
55
}
56
 
57
extern __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
58
{
59
        pgd_t *pgd;
60
 
61
        pgd = get_pgd_fast();
62
        if (!pgd)
63
                pgd = get_pgd_slow();
64
        return pgd;
65
}
66
 
67
extern __inline__ void free_pgd_fast(pgd_t *pgd)
68
{
69
        *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
70
        pgd_quicklist = (unsigned long *) pgd;
71
        pgtable_cache_size += 2;
72
}
73
 
74
extern __inline__ void free_pgd_slow(pgd_t *pgd)
75
{
76
        free_pages((unsigned long) pgd, 1);
77
}
78
 
79
#define pgd_free(pgd)           free_pgd_fast(pgd)
80
 
81
/*
82
 * page middle directory allocation/free routines.
83
 * We don't use pmd cache, so these are dummy routines. This
84
 * code never triggers because the pgd will always be present.
85
 */
86
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
87
#define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); })
88
#define pmd_free(x)                     do { } while (0)
89
#define pmd_free_slow(x)                do { } while (0)
90
#define pmd_free_fast(x)                do { } while (0)
91
#define pgd_populate(mm, pmd, pte)      BUG()
92
 
93
extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
94
{
95
        pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
96
        pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
97
        pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
98
        pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
99
}
100
 
101
/*
102
 * page table entry allocation/free routines.
103
 */
104
extern inline pte_t * pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
105
{
106
        pte_t *pte;
107
        int i;
108
 
109
        pte = (pte_t *) __get_free_page(GFP_KERNEL);
110
        if (pte != NULL) {
111
                for (i=0; i < PTRS_PER_PTE; i++)
112
                        pte_clear(pte+i);
113
        }
114
        return pte;
115
}
116
 
117
extern __inline__ pte_t *
118
pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
119
{
120
        unsigned long *ret = (unsigned long *) pte_quicklist;
121
 
122
        if (ret != NULL) {
123
                pte_quicklist = (unsigned long *)(*ret);
124
                ret[0] = ret[1];
125
                pgtable_cache_size--;
126
        }
127
        return (pte_t *)ret;
128
}
129
 
130
extern __inline__ void pte_free_fast(pte_t *pte)
131
{
132
        *(unsigned long *)pte = (unsigned long) pte_quicklist;
133
        pte_quicklist = (unsigned long *) pte;
134
        pgtable_cache_size++;
135
}
136
 
137
extern __inline__ void pte_free_slow(pte_t *pte)
138
{
139
        free_page((unsigned long) pte);
140
}
141
 
142
#define pte_free(pte)           pte_free_fast(pte)
143
 
144
extern int do_check_pgt_cache(int, int);
145
 
146
/*
147
 * This establishes kernel virtual mappings (e.g., as a result of a
148
 * vmalloc call).  Since s390-esame uses a separate kernel page table,
149
 * there is nothing to do here... :)
150
 */
151
#define set_pgdir(addr,entry) do { } while(0)
152
 
153
/*
154
 * TLB flushing:
155
 *
156
 *  - flush_tlb() flushes the current mm struct TLBs
157
 *  - flush_tlb_all() flushes all processes TLBs
158
 *    called only from vmalloc/vfree
159
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
160
 *  - flush_tlb_page(vma, vmaddr) flushes one page
161
 *  - flush_tlb_range(mm, start, end) flushes a range of pages
162
 *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
163
 */
164
 
165
/*
166
 * S/390 has three ways of flushing TLBs
167
 * 'ptlb' does a flush of the local processor
168
 * 'csp' flushes the TLBs on all PUs of a SMP
169
 * 'ipte' invalidates a pte in a page table and flushes that out of
170
 * the TLBs of all PUs of a SMP
171
 */
172
 
173
#define local_flush_tlb() \
174
do {  __asm__ __volatile__("ptlb": : :"memory"); } while (0)
175
 
176
 
177
#ifndef CONFIG_SMP
178
 
179
/*
180
 * We always need to flush, since s390 does not flush tlb
181
 * on each context switch
182
 */
183
 
184
static inline void flush_tlb(void)
185
{
186
        local_flush_tlb();
187
}
188
static inline void flush_tlb_all(void)
189
{
190
        local_flush_tlb();
191
}
192
static inline void flush_tlb_mm(struct mm_struct *mm)
193
{
194
        local_flush_tlb();
195
}
196
static inline void flush_tlb_page(struct vm_area_struct *vma,
197
                                  unsigned long addr)
198
{
199
        local_flush_tlb();
200
}
201
static inline void flush_tlb_range(struct mm_struct *mm,
202
                                   unsigned long start, unsigned long end)
203
{
204
        local_flush_tlb();
205
}
206
 
207
#else
208
 
209
#include <asm/smp.h>
210
 
211
extern void smp_ptlb_all(void);
212
static inline void global_flush_tlb_csp(void)
213
{
214
        int cs1=0,dum=0;
215
        int *adr;
216
        long long dummy=0;
217
        adr = (int*) (((int)(((int*) &dummy)+1) & 0xfffffffc)|1);
218
        __asm__ __volatile__("lr    2,%0\n\t"
219
                             "lr    3,%1\n\t"
220
                             "lr    4,%2\n\t"
221
                             "csp   2,4" :
222
                             : "d" (cs1), "d" (dum), "d" (adr)
223
                             : "2", "3", "4");
224
}
225
static inline void global_flush_tlb(void)
226
{
227
        if (MACHINE_HAS_CSP)
228
                global_flush_tlb_csp();
229
        else
230
                smp_ptlb_all();
231
}
232
 
233
/*
234
 * We only have to do global flush of tlb if process run since last
235
 * flush on any other pu than current.
236
 * If we have threads (mm->count > 1) we always do a global flush,
237
 * since the process runs on more than one processor at the same time.
238
 */
239
 
240
static inline void __flush_tlb_mm(struct mm_struct * mm)
241
{
242
        if (mm->cpu_vm_mask != (1UL << smp_processor_id())) {
243
                /* mm was active on more than one cpu. */
244
                if (mm == current->active_mm &&
245
                    atomic_read(&mm->mm_users) == 1)
246
                        /* this cpu is the only one using the mm. */
247
                        mm->cpu_vm_mask = 1UL << smp_processor_id();
248
                global_flush_tlb();
249
        } else
250
                local_flush_tlb();
251
}
252
 
253
static inline void flush_tlb(void)
254
{
255
        __flush_tlb_mm(current->mm);
256
}
257
static inline void flush_tlb_all(void)
258
{
259
        global_flush_tlb();
260
}
261
static inline void flush_tlb_mm(struct mm_struct *mm)
262
{
263
        __flush_tlb_mm(mm);
264
}
265
static inline void flush_tlb_page(struct vm_area_struct *vma,
266
                                  unsigned long addr)
267
{
268
        __flush_tlb_mm(vma->vm_mm);
269
}
270
static inline void flush_tlb_range(struct mm_struct *mm,
271
                                   unsigned long start, unsigned long end)
272
{
273
        __flush_tlb_mm(mm);
274
}
275
 
276
#endif
277
 
278
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
279
                                      unsigned long start, unsigned long end)
280
{
281
        /* S/390 does not keep any page table caches in TLB */
282
}
283
 
284
 
285
static inline int ptep_test_and_clear_and_flush_young(struct vm_area_struct *vma,
286
                                                      unsigned long address, pte_t *ptep)
287
{
288
        /* No need to flush TLB; bits are in storage key */
289
        return ptep_test_and_clear_young(ptep);
290
}
291
 
292
static inline int ptep_test_and_clear_and_flush_dirty(struct vm_area_struct *vma,
293
                                                      unsigned long address, pte_t *ptep)
294
{
295
        /* No need to flush TLB; bits are in storage key */
296
        return ptep_test_and_clear_dirty(ptep);
297
}
298
 
299
static inline pte_t ptep_invalidate(struct vm_area_struct *vma,
300
                                    unsigned long address, pte_t *ptep)
301
{
302
        pte_t pte = *ptep;
303
        if (!(pte_val(pte) & _PAGE_INVALID)) {
304
                /* S390 has 1mb segments, we are emulating 4MB segments */
305
                pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
306
                __asm__ __volatile__ ("ipte %0,%1" : : "a" (pto), "a" (address));
307
        }
308
        pte_clear(ptep);
309
        return pte;
310
}
311
 
312
static inline void ptep_establish(struct vm_area_struct *vma,
313
                                  unsigned long address, pte_t *ptep, pte_t entry)
314
{
315
        ptep_invalidate(vma, address, ptep);
316
        set_pte(ptep, entry);
317
}
318
 
319
#endif /* _S390_PGALLOC_H */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.