OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [arm/] [mm/] [ioremap.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/arch/arm/mm/ioremap.c
3
 *
4
 * Re-map IO memory to kernel address space so that we can access it.
5
 *
6
 * (C) Copyright 1995 1996 Linus Torvalds
7
 *
8
 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9
 * Hacked to allow all architectures to build, and various cleanups
10
 * by Russell King
11
 *
12
 * This allows a driver to remap an arbitrary region of bus memory into
13
 * virtual space.  One should *only* use readl, writel, memcpy_toio and
14
 * so on with such remapped areas.
15
 *
16
 * ioremap support tweaked to allow support for large page mappings.  We
17
 * have several issues that needs to be resolved first however:
18
 *
19
 *  1. We need set_pte, or something like set_pte to understand large
20
 *     page mappings.
21
 *
22
 *  2. we need the unmap_* functions to likewise understand large page
23
 *     mappings.
24
 */
25
#include <linux/errno.h>
26
#include <linux/mm.h>
27
#include <linux/vmalloc.h>
28
 
29
#include <asm/page.h>
30
#include <asm/pgalloc.h>
31
#include <asm/io.h>
32
 
33
static inline void
34
remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
35
               unsigned long pfn, pgprot_t pgprot)
36
{
37
        unsigned long end;
38
 
39
        address &= ~PMD_MASK;
40
        end = address + size;
41
        if (end > PMD_SIZE)
42
                end = PMD_SIZE;
43
        BUG_ON(address >= end);
44
        do {
45
                if (!pte_none(*pte))
46
                        goto bad;
47
 
48
                set_pte(pte, pfn_pte(pfn, pgprot));
49
                address += PAGE_SIZE;
50
                pfn++;
51
                pte++;
52
        } while (address && (address < end));
53
        return;
54
 
55
 bad:
56
        printk("remap_area_pte: page already exists\n");
57
        BUG();
58
}
59
 
60
static inline int
61
remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
62
               unsigned long pfn, unsigned long flags)
63
{
64
        unsigned long end;
65
        pgprot_t pgprot;
66
 
67
        address &= ~PGDIR_MASK;
68
        end = address + size;
69
 
70
        if (end > PGDIR_SIZE)
71
                end = PGDIR_SIZE;
72
 
73
        pfn -= address >> PAGE_SHIFT;
74
        BUG_ON(address >= end);
75
 
76
        pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
77
        do {
78
                pte_t * pte = pte_alloc(&init_mm, pmd, address);
79
                if (!pte)
80
                        return -ENOMEM;
81
                remap_area_pte(pte, address, end - address, pfn + (address >> PAGE_SHIFT), pgprot);
82
                address = (address + PMD_SIZE) & PMD_MASK;
83
                pmd++;
84
        } while (address && (address < end));
85
        return 0;
86
}
87
 
88
static int
89
remap_area_pages(unsigned long address, unsigned long pfn,
90
                 unsigned long size, unsigned long flags)
91
{
92
        int error;
93
        pgd_t * dir;
94
        unsigned long end = address + size;
95
 
96
        pfn -= address >> PAGE_SHIFT;
97
        dir = pgd_offset(&init_mm, address);
98
        flush_cache_all();
99
        BUG_ON(address >= end);
100
        spin_lock(&init_mm.page_table_lock);
101
        do {
102
                pmd_t *pmd;
103
                pmd = pmd_alloc(&init_mm, dir, address);
104
                error = -ENOMEM;
105
                if (!pmd)
106
                        break;
107
                if (remap_area_pmd(pmd, address, end - address,
108
                                         pfn + (address >> PAGE_SHIFT), flags))
109
                        break;
110
                error = 0;
111
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
112
                dir++;
113
        } while (address && (address < end));
114
        spin_unlock(&init_mm.page_table_lock);
115
        flush_tlb_all();
116
        return error;
117
}
118
 
119
/*
120
 * Remap an arbitrary physical address space into the kernel virtual
121
 * address space. Needed when the kernel wants to access high addresses
122
 * directly.
123
 *
124
 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
125
 * have to convert them into an offset in a page-aligned mapping, but the
126
 * caller shouldn't need to know that small detail.
127
 *
128
 * 'flags' are the extra L_PTE_ flags that you want to specify for this
129
 * mapping.  See include/asm-arm/proc-armv/pgtable.h for more information.
130
 */
131
void * __ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
132
{
133
        void * addr;
134
        struct vm_struct * area;
135
        unsigned long offset, last_addr;
136
 
137
        /* Don't allow wraparound or zero size */
138
        last_addr = phys_addr + size - 1;
139
        if (!size || last_addr < phys_addr)
140
                return NULL;
141
 
142
        /*
143
         * Mappings have to be page-aligned
144
         */
145
        offset = phys_addr & ~PAGE_MASK;
146
        phys_addr &= PAGE_MASK;
147
        size = PAGE_ALIGN(last_addr) - phys_addr;
148
 
149
        /*
150
         * Ok, go for it..
151
         */
152
        area = get_vm_area(size, VM_IOREMAP);
153
        if (!area)
154
                return NULL;
155
        addr = area->addr;
156
        if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr >> PAGE_SHIFT, size, flags)) {
157
                vfree(addr);
158
                return NULL;
159
        }
160
        return (void *) (offset + (char *)addr);
161
}
162
 
163
void __iounmap(void *addr)
164
{
165
        vfree((void *) (PAGE_MASK & (unsigned long) addr));
166
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.