OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [powerpc/] [lib/] [dma-noncoherent.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *  PowerPC version derived from arch/arm/mm/consistent.c
3
 *    Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
4
 *
5
 *  Copyright (C) 2000 Russell King
6
 *
7
 * Consistent memory allocators.  Used for DMA devices that want to
8
 * share uncached memory with the processor core.  The function return
9
 * is the virtual address and 'dma_handle' is the physical address.
10
 * Mostly stolen from the ARM port, with some changes for PowerPC.
11
 *                                              -- Dan
12
 *
13
 * Reorganized to get rid of the arch-specific consistent_* functions
14
 * and provide non-coherent implementations for the DMA API. -Matt
15
 *
16
 * Added in_interrupt() safe dma_alloc_coherent()/dma_free_coherent()
17
 * implementation. This is pulled straight from ARM and barely
18
 * modified. -Matt
19
 *
20
 * This program is free software; you can redistribute it and/or modify
21
 * it under the terms of the GNU General Public License version 2 as
22
 * published by the Free Software Foundation.
23
 */
24
 
25
#include <linux/sched.h>
26
#include <linux/kernel.h>
27
#include <linux/errno.h>
28
#include <linux/string.h>
29
#include <linux/types.h>
30
#include <linux/highmem.h>
31
#include <linux/dma-mapping.h>
32
 
33
#include <asm/tlbflush.h>
34
 
35
/*
36
 * This address range defaults to a value that is safe for all
37
 * platforms which currently set CONFIG_NOT_COHERENT_CACHE. It
38
 * can be further configured for specific applications under
39
 * the "Advanced Setup" menu. -Matt
40
 */
41
#define CONSISTENT_BASE (CONFIG_CONSISTENT_START)
42
#define CONSISTENT_END  (CONFIG_CONSISTENT_START + CONFIG_CONSISTENT_SIZE)
43
#define CONSISTENT_OFFSET(x)    (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
44
 
45
/*
46
 * This is the page table (2MB) covering uncached, DMA consistent allocations
47
 */
48
static pte_t *consistent_pte;
49
static DEFINE_SPINLOCK(consistent_lock);
50
 
51
/*
52
 * VM region handling support.
53
 *
54
 * This should become something generic, handling VM region allocations for
55
 * vmalloc and similar (ioremap, module space, etc).
56
 *
57
 * I envisage vmalloc()'s supporting vm_struct becoming:
58
 *
59
 *  struct vm_struct {
60
 *    struct vm_region  region;
61
 *    unsigned long     flags;
62
 *    struct page       **pages;
63
 *    unsigned int      nr_pages;
64
 *    unsigned long     phys_addr;
65
 *  };
66
 *
67
 * get_vm_area() would then call vm_region_alloc with an appropriate
68
 * struct vm_region head (eg):
69
 *
70
 *  struct vm_region vmalloc_head = {
71
 *      .vm_list        = LIST_HEAD_INIT(vmalloc_head.vm_list),
72
 *      .vm_start       = VMALLOC_START,
73
 *      .vm_end         = VMALLOC_END,
74
 *  };
75
 *
76
 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
77
 * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
78
 * would have to initialise this each time prior to calling vm_region_alloc().
79
 */
80
struct vm_region {
81
        struct list_head        vm_list;
82
        unsigned long           vm_start;
83
        unsigned long           vm_end;
84
};
85
 
86
static struct vm_region consistent_head = {
87
        .vm_list        = LIST_HEAD_INIT(consistent_head.vm_list),
88
        .vm_start       = CONSISTENT_BASE,
89
        .vm_end         = CONSISTENT_END,
90
};
91
 
92
static struct vm_region *
93
vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
94
{
95
        unsigned long addr = head->vm_start, end = head->vm_end - size;
96
        unsigned long flags;
97
        struct vm_region *c, *new;
98
 
99
        new = kmalloc(sizeof(struct vm_region), gfp);
100
        if (!new)
101
                goto out;
102
 
103
        spin_lock_irqsave(&consistent_lock, flags);
104
 
105
        list_for_each_entry(c, &head->vm_list, vm_list) {
106
                if ((addr + size) < addr)
107
                        goto nospc;
108
                if ((addr + size) <= c->vm_start)
109
                        goto found;
110
                addr = c->vm_end;
111
                if (addr > end)
112
                        goto nospc;
113
        }
114
 
115
 found:
116
        /*
117
         * Insert this entry _before_ the one we found.
118
         */
119
        list_add_tail(&new->vm_list, &c->vm_list);
120
        new->vm_start = addr;
121
        new->vm_end = addr + size;
122
 
123
        spin_unlock_irqrestore(&consistent_lock, flags);
124
        return new;
125
 
126
 nospc:
127
        spin_unlock_irqrestore(&consistent_lock, flags);
128
        kfree(new);
129
 out:
130
        return NULL;
131
}
132
 
133
static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
134
{
135
        struct vm_region *c;
136
 
137
        list_for_each_entry(c, &head->vm_list, vm_list) {
138
                if (c->vm_start == addr)
139
                        goto out;
140
        }
141
        c = NULL;
142
 out:
143
        return c;
144
}
145
 
146
/*
147
 * Allocate DMA-coherent memory space and return both the kernel remapped
148
 * virtual and bus address for that space.
149
 */
150
void *
151
__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
152
{
153
        struct page *page;
154
        struct vm_region *c;
155
        unsigned long order;
156
        u64 mask = 0x00ffffff, limit; /* ISA default */
157
 
158
        if (!consistent_pte) {
159
                printk(KERN_ERR "%s: not initialised\n", __func__);
160
                dump_stack();
161
                return NULL;
162
        }
163
 
164
        size = PAGE_ALIGN(size);
165
        limit = (mask + 1) & ~mask;
166
        if ((limit && size >= limit) || size >= (CONSISTENT_END - CONSISTENT_BASE)) {
167
                printk(KERN_WARNING "coherent allocation too big (requested %#x mask %#Lx)\n",
168
                       size, mask);
169
                return NULL;
170
        }
171
 
172
        order = get_order(size);
173
 
174
        if (mask != 0xffffffff)
175
                gfp |= GFP_DMA;
176
 
177
        page = alloc_pages(gfp, order);
178
        if (!page)
179
                goto no_page;
180
 
181
        /*
182
         * Invalidate any data that might be lurking in the
183
         * kernel direct-mapped region for device DMA.
184
         */
185
        {
186
                unsigned long kaddr = (unsigned long)page_address(page);
187
                memset(page_address(page), 0, size);
188
                flush_dcache_range(kaddr, kaddr + size);
189
        }
190
 
191
        /*
192
         * Allocate a virtual address in the consistent mapping region.
193
         */
194
        c = vm_region_alloc(&consistent_head, size,
195
                            gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
196
        if (c) {
197
                unsigned long vaddr = c->vm_start;
198
                pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
199
                struct page *end = page + (1 << order);
200
 
201
                split_page(page, order);
202
 
203
                /*
204
                 * Set the "dma handle"
205
                 */
206
                *handle = page_to_bus(page);
207
 
208
                do {
209
                        BUG_ON(!pte_none(*pte));
210
 
211
                        SetPageReserved(page);
212
                        set_pte_at(&init_mm, vaddr,
213
                                   pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
214
                        page++;
215
                        pte++;
216
                        vaddr += PAGE_SIZE;
217
                } while (size -= PAGE_SIZE);
218
 
219
                /*
220
                 * Free the otherwise unused pages.
221
                 */
222
                while (page < end) {
223
                        __free_page(page);
224
                        page++;
225
                }
226
 
227
                return (void *)c->vm_start;
228
        }
229
 
230
        if (page)
231
                __free_pages(page, order);
232
 no_page:
233
        return NULL;
234
}
235
EXPORT_SYMBOL(__dma_alloc_coherent);
236
 
237
/*
238
 * free a page as defined by the above mapping.
239
 */
240
void __dma_free_coherent(size_t size, void *vaddr)
241
{
242
        struct vm_region *c;
243
        unsigned long flags, addr;
244
        pte_t *ptep;
245
 
246
        size = PAGE_ALIGN(size);
247
 
248
        spin_lock_irqsave(&consistent_lock, flags);
249
 
250
        c = vm_region_find(&consistent_head, (unsigned long)vaddr);
251
        if (!c)
252
                goto no_area;
253
 
254
        if ((c->vm_end - c->vm_start) != size) {
255
                printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
256
                       __func__, c->vm_end - c->vm_start, size);
257
                dump_stack();
258
                size = c->vm_end - c->vm_start;
259
        }
260
 
261
        ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
262
        addr = c->vm_start;
263
        do {
264
                pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
265
                unsigned long pfn;
266
 
267
                ptep++;
268
                addr += PAGE_SIZE;
269
 
270
                if (!pte_none(pte) && pte_present(pte)) {
271
                        pfn = pte_pfn(pte);
272
 
273
                        if (pfn_valid(pfn)) {
274
                                struct page *page = pfn_to_page(pfn);
275
                                ClearPageReserved(page);
276
 
277
                                __free_page(page);
278
                                continue;
279
                        }
280
                }
281
 
282
                printk(KERN_CRIT "%s: bad page in kernel page table\n",
283
                       __func__);
284
        } while (size -= PAGE_SIZE);
285
 
286
        flush_tlb_kernel_range(c->vm_start, c->vm_end);
287
 
288
        list_del(&c->vm_list);
289
 
290
        spin_unlock_irqrestore(&consistent_lock, flags);
291
 
292
        kfree(c);
293
        return;
294
 
295
 no_area:
296
        spin_unlock_irqrestore(&consistent_lock, flags);
297
        printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
298
               __func__, vaddr);
299
        dump_stack();
300
}
301
EXPORT_SYMBOL(__dma_free_coherent);
302
 
303
/*
304
 * Initialise the consistent memory allocation.
305
 */
306
static int __init dma_alloc_init(void)
307
{
308
        pgd_t *pgd;
309
        pud_t *pud;
310
        pmd_t *pmd;
311
        pte_t *pte;
312
        int ret = 0;
313
 
314
        do {
315
                pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
316
                pud = pud_alloc(&init_mm, pgd, CONSISTENT_BASE);
317
                pmd = pmd_alloc(&init_mm, pud, CONSISTENT_BASE);
318
                if (!pmd) {
319
                        printk(KERN_ERR "%s: no pmd tables\n", __func__);
320
                        ret = -ENOMEM;
321
                        break;
322
                }
323
                WARN_ON(!pmd_none(*pmd));
324
 
325
                pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
326
                if (!pte) {
327
                        printk(KERN_ERR "%s: no pte tables\n", __func__);
328
                        ret = -ENOMEM;
329
                        break;
330
                }
331
 
332
                consistent_pte = pte;
333
        } while (0);
334
 
335
        return ret;
336
}
337
 
338
core_initcall(dma_alloc_init);
339
 
340
/*
341
 * make an area consistent.
342
 */
343
void __dma_sync(void *vaddr, size_t size, int direction)
344
{
345
        unsigned long start = (unsigned long)vaddr;
346
        unsigned long end   = start + size;
347
 
348
        switch (direction) {
349
        case DMA_NONE:
350
                BUG();
351
        case DMA_FROM_DEVICE:   /* invalidate only */
352
                invalidate_dcache_range(start, end);
353
                break;
354
        case DMA_TO_DEVICE:             /* writeback only */
355
                clean_dcache_range(start, end);
356
                break;
357
        case DMA_BIDIRECTIONAL: /* writeback and invalidate */
358
                flush_dcache_range(start, end);
359
                break;
360
        }
361
}
362
EXPORT_SYMBOL(__dma_sync);
363
 
364
#ifdef CONFIG_HIGHMEM
365
/*
366
 * __dma_sync_page() implementation for systems using highmem.
367
 * In this case, each page of a buffer must be kmapped/kunmapped
368
 * in order to have a virtual address for __dma_sync(). This must
369
 * not sleep so kmap_atomic()/kunmap_atomic() are used.
370
 *
371
 * Note: yes, it is possible and correct to have a buffer extend
372
 * beyond the first page.
373
 */
374
static inline void __dma_sync_page_highmem(struct page *page,
375
                unsigned long offset, size_t size, int direction)
376
{
377
        size_t seg_size = min((size_t)(PAGE_SIZE - offset), size);
378
        size_t cur_size = seg_size;
379
        unsigned long flags, start, seg_offset = offset;
380
        int nr_segs = 1 + ((size - seg_size) + PAGE_SIZE - 1)/PAGE_SIZE;
381
        int seg_nr = 0;
382
 
383
        local_irq_save(flags);
384
 
385
        do {
386
                start = (unsigned long)kmap_atomic(page + seg_nr,
387
                                KM_PPC_SYNC_PAGE) + seg_offset;
388
 
389
                /* Sync this buffer segment */
390
                __dma_sync((void *)start, seg_size, direction);
391
                kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE);
392
                seg_nr++;
393
 
394
                /* Calculate next buffer segment size */
395
                seg_size = min((size_t)PAGE_SIZE, size - cur_size);
396
 
397
                /* Add the segment size to our running total */
398
                cur_size += seg_size;
399
                seg_offset = 0;
400
        } while (seg_nr < nr_segs);
401
 
402
        local_irq_restore(flags);
403
}
404
#endif /* CONFIG_HIGHMEM */
405
 
406
/*
407
 * __dma_sync_page makes memory consistent. identical to __dma_sync, but
408
 * takes a struct page instead of a virtual address
409
 */
410
void __dma_sync_page(struct page *page, unsigned long offset,
411
        size_t size, int direction)
412
{
413
#ifdef CONFIG_HIGHMEM
414
        __dma_sync_page_highmem(page, offset, size, direction);
415
#else
416
        unsigned long start = (unsigned long)page_address(page) + offset;
417
        __dma_sync((void *)start, size, direction);
418
#endif
419
}
420
EXPORT_SYMBOL(__dma_sync_page);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.