OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [arch/] [or32/] [mm/] [ioremap.c] - Blame information for rev 65

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * arch/or32/mm/ioremap.c
3
 *
4
 * Re-map IO memory to kernel address space so that we can access it.
5
 * Needed for memory-mapped I/O devices mapped outside our normal DRAM
6
 * window (that is, all memory-mapped I/O devices).
7
 *
8
 * (C) Copyright 1995 1996 Linus Torvalds
9
 * CRIS-port by Axis Communications AB
10
 */
11
 
12
#include <linux/vmalloc.h>
13
#include <asm/io.h>
14
#include <asm/pgalloc.h>
15
#include <asm/cacheflush.h>
16
#include <asm/tlbflush.h>
17
#include <asm/kmap_types.h>
18
#include <asm/fixmap.h>
19
#include <asm/bug.h>
20
#include <linux/sched.h>
21
 
22
/* __PHX__ cleanup, check */
23
#define __READABLE   ( _PAGE_ALL | _PAGE_URE | _PAGE_SRE )
24
#define __WRITEABLE  ( _PAGE_WRITE )
25
#define _PAGE_GLOBAL ( 0 )
26
#define _PAGE_KERNEL ( _PAGE_ALL | _PAGE_SRE | _PAGE_SWE | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC )
27
 
28
extern int mem_init_done;
29
 
30
/* bt ioremaped lenghts */
31
static unsigned int bt_ioremapped_len[NR_FIX_BTMAPS] __initdata =
32
 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
33
 
34
extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
35
        unsigned long phys_addr, unsigned long flags)
36
{
37
        unsigned long end;
38
 
39
        address &= ~PMD_MASK;
40
        end = address + size;
41
        if (end > PMD_SIZE)
42
                end = PMD_SIZE;
43
        if (address >= end)
44
                BUG();
45
        do {
46
                if (!pte_none(*pte)) {
47
                        printk("remap_area_pte: page already exists\n");
48
                        BUG();
49
                }
50
                set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | __READABLE |
51
                                                             __WRITEABLE | _PAGE_GLOBAL |
52
                                                             _PAGE_KERNEL | flags)));
53
                address += PAGE_SIZE;
54
                phys_addr += PAGE_SIZE;
55
                pte++;
56
        } while (address && (address < end));
57
}
58
 
59
static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
60
        unsigned long phys_addr, unsigned long flags)
61
{
62
        unsigned long end;
63
 
64
        address &= ~PGDIR_MASK;
65
        end = address + size;
66
        if (end > PGDIR_SIZE)
67
                end = PGDIR_SIZE;
68
        phys_addr -= address;
69
        if (address >= end)
70
                BUG();
71
        do {
72
                pte_t * pte = pte_alloc_kernel(pmd, address);
73
                if (!pte)
74
                        return -ENOMEM;
75
                remap_area_pte(pte, address, end - address, address + phys_addr, flags);
76
                address = (address + PMD_SIZE) & PMD_MASK;
77
                pmd++;
78
        } while (address && (address < end));
79
        return 0;
80
}
81
 
82
static int remap_area_pages(unsigned long address, unsigned long phys_addr,
83
                                 unsigned long size, unsigned long flags)
84
{
85
        int error;
86
        pgd_t * dir;
87
        unsigned long end = address + size;
88
 
89
        phys_addr -= address;
90
        dir = pgd_offset(&init_mm, address);
91
        flush_cache_all();
92
        if (address >= end)
93
                BUG();
94
        spin_lock(&init_mm.page_table_lock);
95
        do {
96
                pmd_t *pmd;
97
                pmd = pmd_alloc(&init_mm, dir, address);
98
                error = -ENOMEM;
99
                if (!pmd)
100
                        break;
101
                if (remap_area_pmd(pmd, address, end - address,
102
                                   phys_addr + address, flags))
103
                        break;
104
                error = 0;
105
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
106
                dir++;
107
        } while (address && (address < end));
108
        spin_unlock(&init_mm.page_table_lock);
109
        flush_tlb_all();
110
        return error;
111
}
112
 
113
/*
114
 * IO remapping core to use when system is running
115
 */
116
void *ioremap_core(unsigned long phys_addr, unsigned long size,
117
                   unsigned long flags)
118
{
119
        struct vm_struct * area;
120
        void * addr;
121
 
122
        area = get_vm_area(size, VM_IOREMAP);
123
        if (!area)
124
                return NULL;
125
        addr = area->addr;
126
        if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
127
                vfree(addr);
128
                return NULL;
129
        }
130
        return addr;
131
}
132
 
133
/*
134
 * Boot-time IO remapping core to use
135
 */
136
static void __init *bt_ioremap_core(unsigned long phys_addr, unsigned long size,
137
                                    unsigned long flags)
138
{
139
        unsigned int nrpages;
140
        unsigned int i;
141
        unsigned int nr_free;
142
        unsigned int idx;
143
 
144
        /*
145
         * Mappings have to fit in the FIX_BTMAP area.
146
         */
147
        nrpages = size >> PAGE_SHIFT;
148
        if (nrpages > NR_FIX_BTMAPS)
149
                return NULL;
150
 
151
        /*
152
         * Find a big enough gap in NR_FIX_BTMAPS
153
         */
154
        idx = FIX_BTMAP_BEGIN;
155
        for(i = 0, nr_free = 0; i < NR_FIX_BTMAPS; i++) {
156
                if(!bt_ioremapped_len[i])
157
                        nr_free++;
158
                else {
159
                        nr_free = 0;
160
                        idx = FIX_BTMAP_BEGIN - i;
161
                        i += bt_ioremapped_len[i] - 2;
162
                }
163
                if(nr_free == nrpages)
164
                        break;
165
        }
166
 
167
        if(nr_free < nrpages)
168
                return NULL;
169
 
170
        bt_ioremapped_len[FIX_BTMAP_BEGIN - idx] = nrpages;
171
 
172
        /*
173
         * Ok, go for it..
174
         */
175
        for(i = idx; nrpages > 0; i--, nrpages--) {
176
                set_fixmap_nocache(i, phys_addr);
177
                phys_addr += PAGE_SIZE;
178
        }
179
 
180
        return (void *)fix_to_virt(idx);
181
}
182
 
183
static void __init bt_iounmap(void *addr)
184
{
185
        unsigned long virt_addr;
186
        unsigned int nr_pages;
187
        unsigned int idx;
188
 
189
        virt_addr = (unsigned long)addr;
190
        idx = virt_to_fix(virt_addr);
191
        nr_pages = bt_ioremapped_len[FIX_BTMAP_BEGIN - idx];
192
        bt_ioremapped_len[FIX_BTMAP_BEGIN - idx] = 0;
193
 
194
        while (nr_pages > 0) {
195
                clear_fixmap(idx);
196
                --idx;
197
                --nr_pages;
198
        }
199
}
200
 
201
/*
202
 * Generic mapping function (not visible outside):
203
 */
204
 
205
/*
206
 * Remap an arbitrary physical address space into the kernel virtual
207
 * address space. Needed when the kernel wants to access high addresses
208
 * directly.
209
 *
210
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
211
 * have to convert them into an offset in a page-aligned mapping, but the
212
 * caller shouldn't need to know that small detail.
213
 */
214
void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
215
{
216
        void * addr;
217
        unsigned long offset, last_addr;
218
 
219
        /* Don't allow wraparound or zero size */
220
        last_addr = phys_addr + size - 1;
221
        if (!size || last_addr < phys_addr)
222
                return NULL;
223
 
224
#if 0
225
        /* TODO: Here we can put checks for driver-writer abuse...  */
226
 
227
        /*
228
         * Don't remap the low PCI/ISA area, it's always mapped..
229
         */
230
        if (phys_addr >= 0xA0000 && last_addr < 0x100000)
231
                return phys_to_virt(phys_addr);
232
 
233
        /*
234
         * Don't allow anybody to remap normal RAM that we're using..
235
         */
236
        if (phys_addr < virt_to_phys(high_memory)) {
237
                char *t_addr, *t_end;
238
                struct page *page;
239
 
240
                t_addr = __va(phys_addr);
241
                t_end = t_addr + (size - 1);
242
 
243
                for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
244
                        if(!PageReserved(page))
245
                                return NULL;
246
        }
247
#endif
248
 
249
        /*
250
         * Mappings have to be page-aligned
251
         */
252
        offset = phys_addr & ~PAGE_MASK;
253
        phys_addr &= PAGE_MASK;
254
        size = PAGE_ALIGN(last_addr+1) - phys_addr;
255
 
256
        /*
257
         * Ok, go for it..
258
         */
259
        if(mem_init_done)
260
                addr = ioremap_core(phys_addr, size, flags);
261
        else
262
                addr = bt_ioremap_core(phys_addr, size, flags);
263
 
264
        return (void *) (offset + (char *)addr);
265
}
266
 
267
static inline int is_bt_ioremapped(void *addr)
268
{
269
        unsigned long a = (unsigned long)addr;
270
        return (a < FIXADDR_TOP) && (a >= FIXADDR_BOOT_START);
271
}
272
 
273
void iounmap(void *addr)
274
{
275
        if(is_bt_ioremapped(addr))
276
                return bt_iounmap(addr);
277
        if (addr > high_memory)
278
                return vfree((void *) (PAGE_MASK & (unsigned long) addr));
279
}
280
 
281
 
282
//RGD stolen from PPC probably doesn't work on or32 not called right now
283
void __iomem *ioport_map(unsigned long port, unsigned int len)
284
{
285
        return (void __iomem *) (port + IO_BASE);
286
}
287
 
288
void ioport_unmap(void __iomem *addr)
289
{
290
        /* Nothing to do */
291
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.