OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [m68k/] [mm/] [kmap.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/arch/m68k/mm/kmap.c
3
 *
4
 *  Copyright (C) 1997 Roman Hodek
5
 *
6
 *  10/01/99 cleaned up the code and changing to the same interface
7
 *           used by other architectures                /Roman Zippel
8
 */
9
 
10
#include <linux/config.h>
11
#include <linux/mm.h>
12
#include <linux/kernel.h>
13
#include <linux/string.h>
14
#include <linux/types.h>
15
#include <linux/slab.h>
16
#include <linux/vmalloc.h>
17
 
18
#include <asm/setup.h>
19
#include <asm/segment.h>
20
#include <asm/page.h>
21
#include <asm/pgalloc.h>
22
#include <asm/io.h>
23
#include <asm/system.h>
24
 
25
#undef DEBUG
26
 
27
#define PTRTREESIZE     (256*1024)
28
 
29
/*
30
 * For 040/060 we can use the virtual memory area like other architectures,
31
 * but for 020/030 we want to use early termination page descriptor and we
32
 * can't mix this with normal page descriptors, so we have to copy that code
33
 * (mm/vmalloc.c) and return appriorate aligned addresses.
34
 */
35
 
36
#ifdef CPU_M68040_OR_M68060_ONLY
37
 
38
#define IO_SIZE         PAGE_SIZE
39
 
40
static inline struct vm_struct *get_io_area(unsigned long size)
41
{
42
        return get_vm_area(size, VM_IOREMAP);
43
}
44
 
45
 
46
static inline void free_io_area(void *addr)
47
{
48
        return vfree((void *)(PAGE_MASK & (unsigned long)addr));
49
}
50
 
51
#else
52
 
53
#define IO_SIZE         (256*1024)
54
 
55
static struct vm_struct *iolist = NULL;
56
 
57
static struct vm_struct *get_io_area(unsigned long size)
58
{
59
        unsigned long addr;
60
        struct vm_struct **p, *tmp, *area;
61
 
62
        area = (struct vm_struct *)kmalloc(sizeof(*area), GFP_KERNEL);
63
        if (!area)
64
                return NULL;
65
        addr = KMAP_START;
66
        for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
67
                if (size + addr < (unsigned long)tmp->addr)
68
                        break;
69
                if (addr > KMAP_END-size)
70
                        return NULL;
71
                addr = tmp->size + (unsigned long)tmp->addr;
72
        }
73
        area->addr = (void *)addr;
74
        area->size = size + IO_SIZE;    /* leave a gap between */
75
        area->next = *p;
76
        *p = area;
77
        return area;
78
}
79
 
80
static inline void free_io_area(void *addr)
81
{
82
        struct vm_struct **p, *tmp;
83
 
84
        if (!addr)
85
                return;
86
        addr = (void *)((unsigned long)addr & -IO_SIZE);
87
        for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
88
                if (tmp->addr == addr) {
89
                        *p = tmp->next;
90
                        if ( tmp->size > IO_SIZE )
91
                                __iounmap(tmp->addr, tmp->size - IO_SIZE);
92
                        else
93
                                printk("free_io_area: Invalid I/O area size %lu\n", tmp->size);
94
                        kfree(tmp);
95
                        return;
96
                }
97
        }
98
}
99
 
100
#endif
101
 
102
/*
103
 * Map some physical address range into the kernel address space. The
104
 * code is copied and adapted from map_chunk().
105
 */
106
/* Rewritten by Andreas Schwab to remove all races. */
107
 
108
void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
109
{
110
        struct vm_struct *area;
111
        unsigned long virtaddr, retaddr;
112
        long offset;
113
        pgd_t *pgd_dir;
114
        pmd_t *pmd_dir;
115
        pte_t *pte_dir;
116
 
117
        /*
118
         * Don't allow mappings that wrap..
119
         */
120
        if (!size || size > physaddr + size)
121
                return NULL;
122
 
123
#ifdef CONFIG_AMIGA
124
        if (MACH_IS_AMIGA) {
125
                if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
126
                    && (cacheflag == IOMAP_NOCACHE_SER))
127
                        return (void *)physaddr;
128
        }
129
#endif
130
 
131
#ifdef DEBUG
132
        printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
133
#endif
134
        /*
135
         * Mappings have to be aligned
136
         */
137
        offset = physaddr & (IO_SIZE - 1);
138
        physaddr &= -IO_SIZE;
139
        size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
140
 
141
        /*
142
         * Ok, go for it..
143
         */
144
        area = get_io_area(size);
145
        if (!area)
146
                return NULL;
147
 
148
        virtaddr = (unsigned long)area->addr;
149
        retaddr = virtaddr + offset;
150
#ifdef DEBUG
151
        printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
152
#endif
153
 
154
        /*
155
         * add cache and table flags to physical address
156
         */
157
        if (CPU_IS_040_OR_060) {
158
                physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
159
                             _PAGE_ACCESSED | _PAGE_DIRTY);
160
                switch (cacheflag) {
161
                case IOMAP_FULL_CACHING:
162
                        physaddr |= _PAGE_CACHE040;
163
                        break;
164
                case IOMAP_NOCACHE_SER:
165
                default:
166
                        physaddr |= _PAGE_NOCACHE_S;
167
                        break;
168
                case IOMAP_NOCACHE_NONSER:
169
                        physaddr |= _PAGE_NOCACHE;
170
                        break;
171
                case IOMAP_WRITETHROUGH:
172
                        physaddr |= _PAGE_CACHE040W;
173
                        break;
174
                }
175
        } else {
176
                physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
177
                switch (cacheflag) {
178
                case IOMAP_NOCACHE_SER:
179
                case IOMAP_NOCACHE_NONSER:
180
                default:
181
                        physaddr |= _PAGE_NOCACHE030;
182
                        break;
183
                case IOMAP_FULL_CACHING:
184
                case IOMAP_WRITETHROUGH:
185
                        break;
186
                }
187
        }
188
 
189
        while ((long)size > 0) {
190
#ifdef DEBUG
191
                if (!(virtaddr & (PTRTREESIZE-1)))
192
                        printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
193
#endif
194
                pgd_dir = pgd_offset_k(virtaddr);
195
                pmd_dir = pmd_alloc_kernel(pgd_dir, virtaddr);
196
                if (!pmd_dir) {
197
                        printk("ioremap: no mem for pmd_dir\n");
198
                        return NULL;
199
                }
200
 
201
                if (CPU_IS_020_OR_030) {
202
                        pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
203
                        physaddr += PTRTREESIZE;
204
                        virtaddr += PTRTREESIZE;
205
                        size -= PTRTREESIZE;
206
                } else {
207
                        pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
208
                        if (!pte_dir) {
209
                                printk("ioremap: no mem for pte_dir\n");
210
                                return NULL;
211
                        }
212
 
213
                        pte_val(*pte_dir) = physaddr;
214
                        virtaddr += PAGE_SIZE;
215
                        physaddr += PAGE_SIZE;
216
                        size -= PAGE_SIZE;
217
                }
218
        }
219
#ifdef DEBUG
220
        printk("\n");
221
#endif
222
        flush_tlb_all();
223
 
224
        return (void *)retaddr;
225
}
226
 
227
/*
228
 * Unmap a ioremap()ed region again
229
 */
230
void iounmap(void *addr)
231
{
232
#ifdef CONFIG_AMIGA
233
        if ((!MACH_IS_AMIGA) ||
234
            (((unsigned long)addr < 0x40000000) ||
235
             ((unsigned long)addr > 0x60000000)))
236
                        free_io_area(addr);
237
#else
238
        free_io_area(addr);
239
#endif
240
}
241
 
242
/*
243
 * __iounmap unmaps nearly everything, so be careful
244
 * it doesn't free currently pointer/page tables anymore but it
245
 * wans't used anyway and might be added later.
246
 */
247
void __iounmap(void *addr, unsigned long size)
248
{
249
        unsigned long virtaddr = (unsigned long)addr;
250
        pgd_t *pgd_dir;
251
        pmd_t *pmd_dir;
252
        pte_t *pte_dir;
253
 
254
        while ((long)size > 0) {
255
                pgd_dir = pgd_offset_k(virtaddr);
256
                if (pgd_bad(*pgd_dir)) {
257
                        printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
258
                        pgd_clear(pgd_dir);
259
                        return;
260
                }
261
                pmd_dir = pmd_offset(pgd_dir, virtaddr);
262
 
263
                if (CPU_IS_020_OR_030) {
264
                        int pmd_off = (virtaddr/PTRTREESIZE) & 15;
265
 
266
                        if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
267
                                pmd_dir->pmd[pmd_off] = 0;
268
                                virtaddr += PTRTREESIZE;
269
                                size -= PTRTREESIZE;
270
                                continue;
271
                        }
272
                }
273
 
274
                if (pmd_bad(*pmd_dir)) {
275
                        printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
276
                        pmd_clear(pmd_dir);
277
                        return;
278
                }
279
                pte_dir = pte_offset(pmd_dir, virtaddr);
280
 
281
                pte_val(*pte_dir) = 0;
282
                virtaddr += PAGE_SIZE;
283
                size -= PAGE_SIZE;
284
        }
285
 
286
        flush_tlb_all();
287
}
288
 
289
/*
290
 * Set new cache mode for some kernel address space.
291
 * The caller must push data for that range itself, if such data may already
292
 * be in the cache.
293
 */
294
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
295
{
296
        unsigned long virtaddr = (unsigned long)addr;
297
        pgd_t *pgd_dir;
298
        pmd_t *pmd_dir;
299
        pte_t *pte_dir;
300
 
301
        if (CPU_IS_040_OR_060) {
302
                switch (cmode) {
303
                case IOMAP_FULL_CACHING:
304
                        cmode = _PAGE_CACHE040;
305
                        break;
306
                case IOMAP_NOCACHE_SER:
307
                default:
308
                        cmode = _PAGE_NOCACHE_S;
309
                        break;
310
                case IOMAP_NOCACHE_NONSER:
311
                        cmode = _PAGE_NOCACHE;
312
                        break;
313
                case IOMAP_WRITETHROUGH:
314
                        cmode = _PAGE_CACHE040W;
315
                        break;
316
                }
317
        } else {
318
                switch (cmode) {
319
                case IOMAP_NOCACHE_SER:
320
                case IOMAP_NOCACHE_NONSER:
321
                default:
322
                        cmode = _PAGE_NOCACHE030;
323
                        break;
324
                case IOMAP_FULL_CACHING:
325
                case IOMAP_WRITETHROUGH:
326
                        cmode = 0;
327
                }
328
        }
329
 
330
        while ((long)size > 0) {
331
                pgd_dir = pgd_offset_k(virtaddr);
332
                if (pgd_bad(*pgd_dir)) {
333
                        printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
334
                        pgd_clear(pgd_dir);
335
                        return;
336
                }
337
                pmd_dir = pmd_offset(pgd_dir, virtaddr);
338
 
339
                if (CPU_IS_020_OR_030) {
340
                        int pmd_off = (virtaddr/PTRTREESIZE) & 15;
341
 
342
                        if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
343
                                pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
344
                                                         _CACHEMASK040) | cmode;
345
                                virtaddr += PTRTREESIZE;
346
                                size -= PTRTREESIZE;
347
                                continue;
348
                        }
349
                }
350
 
351
                if (pmd_bad(*pmd_dir)) {
352
                        printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
353
                        pmd_clear(pmd_dir);
354
                        return;
355
                }
356
                pte_dir = pte_offset(pmd_dir, virtaddr);
357
 
358
                pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
359
                virtaddr += PAGE_SIZE;
360
                size -= PAGE_SIZE;
361
        }
362
 
363
        flush_tlb_all();
364
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.