OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [x86/] [mm/] [ioremap_32.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * arch/i386/mm/ioremap.c
3
 *
4
 * Re-map IO memory to kernel address space so that we can access it.
5
 * This is needed for high PCI addresses that aren't mapped in the
6
 * 640k-1MB IO memory area on PC's
7
 *
8
 * (C) Copyright 1995 1996 Linus Torvalds
9
 */
10
 
11
#include <linux/vmalloc.h>
12
#include <linux/init.h>
13
#include <linux/slab.h>
14
#include <linux/module.h>
15
#include <linux/io.h>
16
#include <asm/fixmap.h>
17
#include <asm/cacheflush.h>
18
#include <asm/tlbflush.h>
19
#include <asm/pgtable.h>
20
 
21
#define ISA_START_ADDRESS       0xa0000
22
#define ISA_END_ADDRESS         0x100000
23
 
24
/*
25
 * Generic mapping function (not visible outside):
26
 */
27
 
28
/*
29
 * Remap an arbitrary physical address space into the kernel virtual
30
 * address space. Needed when the kernel wants to access high addresses
31
 * directly.
32
 *
33
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
34
 * have to convert them into an offset in a page-aligned mapping, but the
35
 * caller shouldn't need to know that small detail.
36
 */
37
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
38
{
39
        void __iomem * addr;
40
        struct vm_struct * area;
41
        unsigned long offset, last_addr;
42
        pgprot_t prot;
43
 
44
        /* Don't allow wraparound or zero size */
45
        last_addr = phys_addr + size - 1;
46
        if (!size || last_addr < phys_addr)
47
                return NULL;
48
 
49
        /*
50
         * Don't remap the low PCI/ISA area, it's always mapped..
51
         */
52
        if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
53
                return (void __iomem *) phys_to_virt(phys_addr);
54
 
55
        /*
56
         * Don't allow anybody to remap normal RAM that we're using..
57
         */
58
        if (phys_addr <= virt_to_phys(high_memory - 1)) {
59
                char *t_addr, *t_end;
60
                struct page *page;
61
 
62
                t_addr = __va(phys_addr);
63
                t_end = t_addr + (size - 1);
64
 
65
                for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
66
                        if(!PageReserved(page))
67
                                return NULL;
68
        }
69
 
70
        prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
71
                        | _PAGE_ACCESSED | flags);
72
 
73
        /*
74
         * Mappings have to be page-aligned
75
         */
76
        offset = phys_addr & ~PAGE_MASK;
77
        phys_addr &= PAGE_MASK;
78
        size = PAGE_ALIGN(last_addr+1) - phys_addr;
79
 
80
        /*
81
         * Ok, go for it..
82
         */
83
        area = get_vm_area(size, VM_IOREMAP | (flags << 20));
84
        if (!area)
85
                return NULL;
86
        area->phys_addr = phys_addr;
87
        addr = (void __iomem *) area->addr;
88
        if (ioremap_page_range((unsigned long) addr,
89
                        (unsigned long) addr + size, phys_addr, prot)) {
90
                vunmap((void __force *) addr);
91
                return NULL;
92
        }
93
        return (void __iomem *) (offset + (char __iomem *)addr);
94
}
95
EXPORT_SYMBOL(__ioremap);
96
 
97
/**
98
 * ioremap_nocache     -   map bus memory into CPU space
99
 * @offset:    bus address of the memory
100
 * @size:      size of the resource to map
101
 *
102
 * ioremap_nocache performs a platform specific sequence of operations to
103
 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104
 * writew/writel functions and the other mmio helpers. The returned
105
 * address is not guaranteed to be usable directly as a virtual
106
 * address.
107
 *
108
 * This version of ioremap ensures that the memory is marked uncachable
109
 * on the CPU as well as honouring existing caching rules from things like
110
 * the PCI bus. Note that there are other caches and buffers on many
111
 * busses. In particular driver authors should read up on PCI writes
112
 *
113
 * It's useful if some control registers are in such an area and
114
 * write combining or read caching is not desirable:
115
 *
116
 * Must be freed with iounmap.
117
 */
118
 
119
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
120
{
121
        unsigned long last_addr;
122
        void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
123
        if (!p)
124
                return p;
125
 
126
        /* Guaranteed to be > phys_addr, as per __ioremap() */
127
        last_addr = phys_addr + size - 1;
128
 
129
        if (last_addr < virt_to_phys(high_memory) - 1) {
130
                struct page *ppage = virt_to_page(__va(phys_addr));
131
                unsigned long npages;
132
 
133
                phys_addr &= PAGE_MASK;
134
 
135
                /* This might overflow and become zero.. */
136
                last_addr = PAGE_ALIGN(last_addr);
137
 
138
                /* .. but that's ok, because modulo-2**n arithmetic will make
139
                * the page-aligned "last - first" come out right.
140
                */
141
                npages = (last_addr - phys_addr) >> PAGE_SHIFT;
142
 
143
                if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
144
                        iounmap(p);
145
                        p = NULL;
146
                }
147
                global_flush_tlb();
148
        }
149
 
150
        return p;
151
}
152
EXPORT_SYMBOL(ioremap_nocache);
153
 
154
/**
155
 * iounmap - Free a IO remapping
156
 * @addr: virtual address from ioremap_*
157
 *
158
 * Caller must ensure there is only one unmapping for the same pointer.
159
 */
160
void iounmap(volatile void __iomem *addr)
161
{
162
        struct vm_struct *p, *o;
163
 
164
        if ((void __force *)addr <= high_memory)
165
                return;
166
 
167
        /*
168
         * __ioremap special-cases the PCI/ISA range by not instantiating a
169
         * vm_area and by simply returning an address into the kernel mapping
170
         * of ISA space.   So handle that here.
171
         */
172
        if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
173
                        addr < phys_to_virt(ISA_END_ADDRESS))
174
                return;
175
 
176
        addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
177
 
178
        /* Use the vm area unlocked, assuming the caller
179
           ensures there isn't another iounmap for the same address
180
           in parallel. Reuse of the virtual address is prevented by
181
           leaving it in the global lists until we're done with it.
182
           cpa takes care of the direct mappings. */
183
        read_lock(&vmlist_lock);
184
        for (p = vmlist; p; p = p->next) {
185
                if (p->addr == addr)
186
                        break;
187
        }
188
        read_unlock(&vmlist_lock);
189
 
190
        if (!p) {
191
                printk("iounmap: bad address %p\n", addr);
192
                dump_stack();
193
                return;
194
        }
195
 
196
        /* Reset the direct mapping. Can block */
197
        if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
198
                change_page_attr(virt_to_page(__va(p->phys_addr)),
199
                                 get_vm_area_size(p) >> PAGE_SHIFT,
200
                                 PAGE_KERNEL);
201
                global_flush_tlb();
202
        }
203
 
204
        /* Finally remove it */
205
        o = remove_vm_area((void *)addr);
206
        BUG_ON(p != o || o == NULL);
207
        kfree(p);
208
}
209
EXPORT_SYMBOL(iounmap);
210
 
211
void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
212
{
213
        unsigned long offset, last_addr;
214
        unsigned int nrpages;
215
        enum fixed_addresses idx;
216
 
217
        /* Don't allow wraparound or zero size */
218
        last_addr = phys_addr + size - 1;
219
        if (!size || last_addr < phys_addr)
220
                return NULL;
221
 
222
        /*
223
         * Don't remap the low PCI/ISA area, it's always mapped..
224
         */
225
        if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
226
                return phys_to_virt(phys_addr);
227
 
228
        /*
229
         * Mappings have to be page-aligned
230
         */
231
        offset = phys_addr & ~PAGE_MASK;
232
        phys_addr &= PAGE_MASK;
233
        size = PAGE_ALIGN(last_addr) - phys_addr;
234
 
235
        /*
236
         * Mappings have to fit in the FIX_BTMAP area.
237
         */
238
        nrpages = size >> PAGE_SHIFT;
239
        if (nrpages > NR_FIX_BTMAPS)
240
                return NULL;
241
 
242
        /*
243
         * Ok, go for it..
244
         */
245
        idx = FIX_BTMAP_BEGIN;
246
        while (nrpages > 0) {
247
                set_fixmap(idx, phys_addr);
248
                phys_addr += PAGE_SIZE;
249
                --idx;
250
                --nrpages;
251
        }
252
        return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
253
}
254
 
255
void __init bt_iounmap(void *addr, unsigned long size)
256
{
257
        unsigned long virt_addr;
258
        unsigned long offset;
259
        unsigned int nrpages;
260
        enum fixed_addresses idx;
261
 
262
        virt_addr = (unsigned long)addr;
263
        if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
264
                return;
265
        offset = virt_addr & ~PAGE_MASK;
266
        nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
267
 
268
        idx = FIX_BTMAP_BEGIN;
269
        while (nrpages > 0) {
270
                clear_fixmap(idx);
271
                --idx;
272
                --nrpages;
273
        }
274
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.