OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [tags/] [linux-2.6/] [linux-2.6.24_or32_unified_v2.3/] [lib/] [swiotlb.c] - Blame information for rev 8

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * Dynamic DMA mapping support.
3
 *
4
 * This implementation is a fallback for platforms that do not support
5
 * I/O TLBs (aka DMA address translation hardware).
6
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
8
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
9
 *      David Mosberger-Tang <davidm@hpl.hp.com>
10
 *
11
 * 03/05/07 davidm      Switch from PCI-DMA to generic device DMA API.
12
 * 00/12/13 davidm      Rename to swiotlb.c and add mark_clean() to avoid
13
 *                      unnecessary i-cache flushing.
14
 * 04/07/.. ak          Better overflow handling. Assorted fixes.
15
 * 05/09/10 linville    Add support for syncing ranges, support syncing for
16
 *                      DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
17
 */
18
 
19
#include <linux/cache.h>
20
#include <linux/dma-mapping.h>
21
#include <linux/mm.h>
22
#include <linux/module.h>
23
#include <linux/spinlock.h>
24
#include <linux/string.h>
25
#include <linux/types.h>
26
#include <linux/ctype.h>
27
 
28
#include <asm/io.h>
29
#include <asm/dma.h>
30
#include <asm/scatterlist.h>
31
 
32
#include <linux/init.h>
33
#include <linux/bootmem.h>
34
 
35
#define OFFSET(val,align) ((unsigned long)      \
36
                           ( (val) & ( (align) - 1)))
37
 
38
#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
39
#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
40
 
41
/*
42
 * Maximum allowable number of contiguous slabs to map,
43
 * must be a power of 2.  What is the appropriate value ?
44
 * The complexity of {map,unmap}_single is linearly dependent on this value.
45
 */
46
#define IO_TLB_SEGSIZE  128
47
 
48
/*
49
 * log of the size of each IO TLB slab.  The number of slabs is command line
50
 * controllable.
51
 */
52
#define IO_TLB_SHIFT 11
53
 
54
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
55
 
56
/*
57
 * Minimum IO TLB size to bother booting with.  Systems with mainly
58
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
59
 * allocate a contiguous 1MB, we're probably in trouble anyway.
60
 */
61
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
62
 
63
/*
64
 * Enumeration for sync targets
65
 */
66
enum dma_sync_target {
67
        SYNC_FOR_CPU = 0,
68
        SYNC_FOR_DEVICE = 1,
69
};
70
 
71
int swiotlb_force;
72
 
73
/*
74
 * Used to do a quick range check in swiotlb_unmap_single and
75
 * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
76
 * API.
77
 */
78
static char *io_tlb_start, *io_tlb_end;
79
 
80
/*
81
 * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
82
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
83
 */
84
static unsigned long io_tlb_nslabs;
85
 
86
/*
87
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
88
 */
89
static unsigned long io_tlb_overflow = 32*1024;
90
 
91
void *io_tlb_overflow_buffer;
92
 
93
/*
94
 * This is a free list describing the number of free entries available from
95
 * each index
96
 */
97
static unsigned int *io_tlb_list;
98
static unsigned int io_tlb_index;
99
 
100
/*
101
 * We need to save away the original address corresponding to a mapped entry
102
 * for the sync operations.
103
 */
104
static unsigned char **io_tlb_orig_addr;
105
 
106
/*
107
 * Protect the above data structures in the map and unmap calls
108
 */
109
static DEFINE_SPINLOCK(io_tlb_lock);
110
 
111
static int __init
112
setup_io_tlb_npages(char *str)
113
{
114
        if (isdigit(*str)) {
115
                io_tlb_nslabs = simple_strtoul(str, &str, 0);
116
                /* avoid tail segment of size < IO_TLB_SEGSIZE */
117
                io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
118
        }
119
        if (*str == ',')
120
                ++str;
121
        if (!strcmp(str, "force"))
122
                swiotlb_force = 1;
123
        return 1;
124
}
125
__setup("swiotlb=", setup_io_tlb_npages);
126
/* make io_tlb_overflow tunable too? */
127
 
128
/*
129
 * Statically reserve bounce buffer space and initialize bounce buffer data
130
 * structures for the software IO TLB used to implement the DMA API.
131
 */
132
void __init
133
swiotlb_init_with_default_size(size_t default_size)
134
{
135
        unsigned long i, bytes;
136
 
137
        if (!io_tlb_nslabs) {
138
                io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
139
                io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
140
        }
141
 
142
        bytes = io_tlb_nslabs << IO_TLB_SHIFT;
143
 
144
        /*
145
         * Get IO TLB memory from the low pages
146
         */
147
        io_tlb_start = alloc_bootmem_low_pages(bytes);
148
        if (!io_tlb_start)
149
                panic("Cannot allocate SWIOTLB buffer");
150
        io_tlb_end = io_tlb_start + bytes;
151
 
152
        /*
153
         * Allocate and initialize the free list array.  This array is used
154
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
155
         * between io_tlb_start and io_tlb_end.
156
         */
157
        io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
158
        for (i = 0; i < io_tlb_nslabs; i++)
159
                io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
160
        io_tlb_index = 0;
161
        io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
162
 
163
        /*
164
         * Get the overflow emergency buffer
165
         */
166
        io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
167
        if (!io_tlb_overflow_buffer)
168
                panic("Cannot allocate SWIOTLB overflow buffer!\n");
169
 
170
        printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
171
               virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
172
}
173
 
174
void __init
175
swiotlb_init(void)
176
{
177
        swiotlb_init_with_default_size(64 * (1<<20));   /* default to 64MB */
178
}
179
 
180
/*
181
 * Systems with larger DMA zones (those that don't support ISA) can
182
 * initialize the swiotlb later using the slab allocator if needed.
183
 * This should be just like above, but with some error catching.
184
 */
185
int
186
swiotlb_late_init_with_default_size(size_t default_size)
187
{
188
        unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
189
        unsigned int order;
190
 
191
        if (!io_tlb_nslabs) {
192
                io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
193
                io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
194
        }
195
 
196
        /*
197
         * Get IO TLB memory from the low pages
198
         */
199
        order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
200
        io_tlb_nslabs = SLABS_PER_PAGE << order;
201
        bytes = io_tlb_nslabs << IO_TLB_SHIFT;
202
 
203
        while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
204
                io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
205
                                                        order);
206
                if (io_tlb_start)
207
                        break;
208
                order--;
209
        }
210
 
211
        if (!io_tlb_start)
212
                goto cleanup1;
213
 
214
        if (order != get_order(bytes)) {
215
                printk(KERN_WARNING "Warning: only able to allocate %ld MB "
216
                       "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
217
                io_tlb_nslabs = SLABS_PER_PAGE << order;
218
                bytes = io_tlb_nslabs << IO_TLB_SHIFT;
219
        }
220
        io_tlb_end = io_tlb_start + bytes;
221
        memset(io_tlb_start, 0, bytes);
222
 
223
        /*
224
         * Allocate and initialize the free list array.  This array is used
225
         * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
226
         * between io_tlb_start and io_tlb_end.
227
         */
228
        io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
229
                                      get_order(io_tlb_nslabs * sizeof(int)));
230
        if (!io_tlb_list)
231
                goto cleanup2;
232
 
233
        for (i = 0; i < io_tlb_nslabs; i++)
234
                io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
235
        io_tlb_index = 0;
236
 
237
        io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
238
                                   get_order(io_tlb_nslabs * sizeof(char *)));
239
        if (!io_tlb_orig_addr)
240
                goto cleanup3;
241
 
242
        memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
243
 
244
        /*
245
         * Get the overflow emergency buffer
246
         */
247
        io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
248
                                                  get_order(io_tlb_overflow));
249
        if (!io_tlb_overflow_buffer)
250
                goto cleanup4;
251
 
252
        printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
253
               "0x%lx\n", bytes >> 20,
254
               virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
255
 
256
        return 0;
257
 
258
cleanup4:
259
        free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
260
                                                              sizeof(char *)));
261
        io_tlb_orig_addr = NULL;
262
cleanup3:
263
        free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
264
                                                         sizeof(int)));
265
        io_tlb_list = NULL;
266
cleanup2:
267
        io_tlb_end = NULL;
268
        free_pages((unsigned long)io_tlb_start, order);
269
        io_tlb_start = NULL;
270
cleanup1:
271
        io_tlb_nslabs = req_nslabs;
272
        return -ENOMEM;
273
}
274
 
275
static int
276
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
277
{
278
        dma_addr_t mask = 0xffffffff;
279
        /* If the device has a mask, use it, otherwise default to 32 bits */
280
        if (hwdev && hwdev->dma_mask)
281
                mask = *hwdev->dma_mask;
282
        return (addr & ~mask) != 0;
283
}
284
 
285
/*
286
 * Allocates bounce buffer and returns its kernel virtual address.
287
 */
288
static void *
289
map_single(struct device *hwdev, char *buffer, size_t size, int dir)
290
{
291
        unsigned long flags;
292
        char *dma_addr;
293
        unsigned int nslots, stride, index, wrap;
294
        int i;
295
 
296
        /*
297
         * For mappings greater than a page, we limit the stride (and
298
         * hence alignment) to a page size.
299
         */
300
        nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
301
        if (size > PAGE_SIZE)
302
                stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
303
        else
304
                stride = 1;
305
 
306
        BUG_ON(!nslots);
307
 
308
        /*
309
         * Find suitable number of IO TLB entries size that will fit this
310
         * request and allocate a buffer from that IO TLB pool.
311
         */
312
        spin_lock_irqsave(&io_tlb_lock, flags);
313
        {
314
                wrap = index = ALIGN(io_tlb_index, stride);
315
 
316
                if (index >= io_tlb_nslabs)
317
                        wrap = index = 0;
318
 
319
                do {
320
                        /*
321
                         * If we find a slot that indicates we have 'nslots'
322
                         * number of contiguous buffers, we allocate the
323
                         * buffers from that slot and mark the entries as '0'
324
                         * indicating unavailable.
325
                         */
326
                        if (io_tlb_list[index] >= nslots) {
327
                                int count = 0;
328
 
329
                                for (i = index; i < (int) (index + nslots); i++)
330
                                        io_tlb_list[i] = 0;
331
                                for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
332
                                        io_tlb_list[i] = ++count;
333
                                dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
334
 
335
                                /*
336
                                 * Update the indices to avoid searching in
337
                                 * the next round.
338
                                 */
339
                                io_tlb_index = ((index + nslots) < io_tlb_nslabs
340
                                                ? (index + nslots) : 0);
341
 
342
                                goto found;
343
                        }
344
                        index += stride;
345
                        if (index >= io_tlb_nslabs)
346
                                index = 0;
347
                } while (index != wrap);
348
 
349
                spin_unlock_irqrestore(&io_tlb_lock, flags);
350
                return NULL;
351
        }
352
  found:
353
        spin_unlock_irqrestore(&io_tlb_lock, flags);
354
 
355
        /*
356
         * Save away the mapping from the original address to the DMA address.
357
         * This is needed when we sync the memory.  Then we sync the buffer if
358
         * needed.
359
         */
360
        for (i = 0; i < nslots; i++)
361
                io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
362
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
363
                memcpy(dma_addr, buffer, size);
364
 
365
        return dma_addr;
366
}
367
 
368
/*
369
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
370
 */
371
static void
372
unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
373
{
374
        unsigned long flags;
375
        int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
376
        int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
377
        char *buffer = io_tlb_orig_addr[index];
378
 
379
        /*
380
         * First, sync the memory before unmapping the entry
381
         */
382
        if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
383
                /*
384
                 * bounce... copy the data back into the original buffer * and
385
                 * delete the bounce buffer.
386
                 */
387
                memcpy(buffer, dma_addr, size);
388
 
389
        /*
390
         * Return the buffer to the free list by setting the corresponding
391
         * entries to indicate the number of contigous entries available.
392
         * While returning the entries to the free list, we merge the entries
393
         * with slots below and above the pool being returned.
394
         */
395
        spin_lock_irqsave(&io_tlb_lock, flags);
396
        {
397
                count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
398
                         io_tlb_list[index + nslots] : 0);
399
                /*
400
                 * Step 1: return the slots to the free list, merging the
401
                 * slots with superceeding slots
402
                 */
403
                for (i = index + nslots - 1; i >= index; i--)
404
                        io_tlb_list[i] = ++count;
405
                /*
406
                 * Step 2: merge the returned slots with the preceding slots,
407
                 * if available (non zero)
408
                 */
409
                for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
410
                        io_tlb_list[i] = ++count;
411
        }
412
        spin_unlock_irqrestore(&io_tlb_lock, flags);
413
}
414
 
415
static void
416
sync_single(struct device *hwdev, char *dma_addr, size_t size,
417
            int dir, int target)
418
{
419
        int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
420
        char *buffer = io_tlb_orig_addr[index];
421
 
422
        buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
423
 
424
        switch (target) {
425
        case SYNC_FOR_CPU:
426
                if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
427
                        memcpy(buffer, dma_addr, size);
428
                else
429
                        BUG_ON(dir != DMA_TO_DEVICE);
430
                break;
431
        case SYNC_FOR_DEVICE:
432
                if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
433
                        memcpy(dma_addr, buffer, size);
434
                else
435
                        BUG_ON(dir != DMA_FROM_DEVICE);
436
                break;
437
        default:
438
                BUG();
439
        }
440
}
441
 
442
void *
443
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
444
                       dma_addr_t *dma_handle, gfp_t flags)
445
{
446
        dma_addr_t dev_addr;
447
        void *ret;
448
        int order = get_order(size);
449
 
450
        /*
451
         * XXX fix me: the DMA API should pass us an explicit DMA mask
452
         * instead, or use ZONE_DMA32 (ia64 overloads ZONE_DMA to be a ~32
453
         * bit range instead of a 16MB one).
454
         */
455
        flags |= GFP_DMA;
456
 
457
        ret = (void *)__get_free_pages(flags, order);
458
        if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
459
                /*
460
                 * The allocated memory isn't reachable by the device.
461
                 * Fall back on swiotlb_map_single().
462
                 */
463
                free_pages((unsigned long) ret, order);
464
                ret = NULL;
465
        }
466
        if (!ret) {
467
                /*
468
                 * We are either out of memory or the device can't DMA
469
                 * to GFP_DMA memory; fall back on
470
                 * swiotlb_map_single(), which will grab memory from
471
                 * the lowest available address range.
472
                 */
473
                dma_addr_t handle;
474
                handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
475
                if (swiotlb_dma_mapping_error(handle))
476
                        return NULL;
477
 
478
                ret = bus_to_virt(handle);
479
        }
480
 
481
        memset(ret, 0, size);
482
        dev_addr = virt_to_bus(ret);
483
 
484
        /* Confirm address can be DMA'd by device */
485
        if (address_needs_mapping(hwdev, dev_addr)) {
486
                printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
487
                       (unsigned long long)*hwdev->dma_mask,
488
                       (unsigned long long)dev_addr);
489
                panic("swiotlb_alloc_coherent: allocated memory is out of "
490
                      "range for device");
491
        }
492
        *dma_handle = dev_addr;
493
        return ret;
494
}
495
 
496
void
497
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
498
                      dma_addr_t dma_handle)
499
{
500
        WARN_ON(irqs_disabled());
501
        if (!(vaddr >= (void *)io_tlb_start
502
                    && vaddr < (void *)io_tlb_end))
503
                free_pages((unsigned long) vaddr, get_order(size));
504
        else
505
                /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
506
                swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
507
}
508
 
509
static void
510
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
511
{
512
        /*
513
         * Ran out of IOMMU space for this operation. This is very bad.
514
         * Unfortunately the drivers cannot handle this operation properly.
515
         * unless they check for dma_mapping_error (most don't)
516
         * When the mapping is small enough return a static buffer to limit
517
         * the damage, or panic when the transfer is too big.
518
         */
519
        printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
520
               "device %s\n", size, dev ? dev->bus_id : "?");
521
 
522
        if (size > io_tlb_overflow && do_panic) {
523
                if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
524
                        panic("DMA: Memory would be corrupted\n");
525
                if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
526
                        panic("DMA: Random memory would be DMAed\n");
527
        }
528
}
529
 
530
/*
531
 * Map a single buffer of the indicated size for DMA in streaming mode.  The
532
 * physical address to use is returned.
533
 *
534
 * Once the device is given the dma address, the device owns this memory until
535
 * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
536
 */
537
dma_addr_t
538
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
539
{
540
        dma_addr_t dev_addr = virt_to_bus(ptr);
541
        void *map;
542
 
543
        BUG_ON(dir == DMA_NONE);
544
        /*
545
         * If the pointer passed in happens to be in the device's DMA window,
546
         * we can safely return the device addr and not worry about bounce
547
         * buffering it.
548
         */
549
        if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
550
                return dev_addr;
551
 
552
        /*
553
         * Oh well, have to allocate and map a bounce buffer.
554
         */
555
        map = map_single(hwdev, ptr, size, dir);
556
        if (!map) {
557
                swiotlb_full(hwdev, size, dir, 1);
558
                map = io_tlb_overflow_buffer;
559
        }
560
 
561
        dev_addr = virt_to_bus(map);
562
 
563
        /*
564
         * Ensure that the address returned is DMA'ble
565
         */
566
        if (address_needs_mapping(hwdev, dev_addr))
567
                panic("map_single: bounce buffer is not DMA'ble");
568
 
569
        return dev_addr;
570
}
571
 
572
/*
573
 * Unmap a single streaming mode DMA translation.  The dma_addr and size must
574
 * match what was provided for in a previous swiotlb_map_single call.  All
575
 * other usages are undefined.
576
 *
577
 * After this call, reads by the cpu to the buffer are guaranteed to see
578
 * whatever the device wrote there.
579
 */
580
void
581
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
582
                     int dir)
583
{
584
        char *dma_addr = bus_to_virt(dev_addr);
585
 
586
        BUG_ON(dir == DMA_NONE);
587
        if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
588
                unmap_single(hwdev, dma_addr, size, dir);
589
        else if (dir == DMA_FROM_DEVICE)
590
                dma_mark_clean(dma_addr, size);
591
}
592
 
593
/*
594
 * Make physical memory consistent for a single streaming mode DMA translation
595
 * after a transfer.
596
 *
597
 * If you perform a swiotlb_map_single() but wish to interrogate the buffer
598
 * using the cpu, yet do not wish to teardown the dma mapping, you must
599
 * call this function before doing so.  At the next point you give the dma
600
 * address back to the card, you must first perform a
601
 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
602
 */
603
static void
604
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
605
                    size_t size, int dir, int target)
606
{
607
        char *dma_addr = bus_to_virt(dev_addr);
608
 
609
        BUG_ON(dir == DMA_NONE);
610
        if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
611
                sync_single(hwdev, dma_addr, size, dir, target);
612
        else if (dir == DMA_FROM_DEVICE)
613
                dma_mark_clean(dma_addr, size);
614
}
615
 
616
void
617
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
618
                            size_t size, int dir)
619
{
620
        swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
621
}
622
 
623
void
624
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
625
                               size_t size, int dir)
626
{
627
        swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
628
}
629
 
630
/*
631
 * Same as above, but for a sub-range of the mapping.
632
 */
633
static void
634
swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
635
                          unsigned long offset, size_t size,
636
                          int dir, int target)
637
{
638
        char *dma_addr = bus_to_virt(dev_addr) + offset;
639
 
640
        BUG_ON(dir == DMA_NONE);
641
        if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
642
                sync_single(hwdev, dma_addr, size, dir, target);
643
        else if (dir == DMA_FROM_DEVICE)
644
                dma_mark_clean(dma_addr, size);
645
}
646
 
647
void
648
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
649
                                  unsigned long offset, size_t size, int dir)
650
{
651
        swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
652
                                  SYNC_FOR_CPU);
653
}
654
 
655
void
656
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
657
                                     unsigned long offset, size_t size, int dir)
658
{
659
        swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
660
                                  SYNC_FOR_DEVICE);
661
}
662
 
663
/*
664
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
665
 * This is the scatter-gather version of the above swiotlb_map_single
666
 * interface.  Here the scatter gather list elements are each tagged with the
667
 * appropriate dma address and length.  They are obtained via
668
 * sg_dma_{address,length}(SG).
669
 *
670
 * NOTE: An implementation may be able to use a smaller number of
671
 *       DMA address/length pairs than there are SG table elements.
672
 *       (for example via virtual mapping capabilities)
673
 *       The routine returns the number of addr/length pairs actually
674
 *       used, at most nents.
675
 *
676
 * Device ownership issues as mentioned above for swiotlb_map_single are the
677
 * same here.
678
 */
679
int
680
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
681
               int dir)
682
{
683
        struct scatterlist *sg;
684
        void *addr;
685
        dma_addr_t dev_addr;
686
        int i;
687
 
688
        BUG_ON(dir == DMA_NONE);
689
 
690
        for_each_sg(sgl, sg, nelems, i) {
691
                addr = SG_ENT_VIRT_ADDRESS(sg);
692
                dev_addr = virt_to_bus(addr);
693
                if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
694
                        void *map = map_single(hwdev, addr, sg->length, dir);
695
                        if (!map) {
696
                                /* Don't panic here, we expect map_sg users
697
                                   to do proper error handling. */
698
                                swiotlb_full(hwdev, sg->length, dir, 0);
699
                                swiotlb_unmap_sg(hwdev, sgl, i, dir);
700
                                sgl[0].dma_length = 0;
701
                                return 0;
702
                        }
703
                        sg->dma_address = virt_to_bus(map);
704
                } else
705
                        sg->dma_address = dev_addr;
706
                sg->dma_length = sg->length;
707
        }
708
        return nelems;
709
}
710
 
711
/*
712
 * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
713
 * concerning calls here are the same as for swiotlb_unmap_single() above.
714
 */
715
void
716
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
717
                 int dir)
718
{
719
        struct scatterlist *sg;
720
        int i;
721
 
722
        BUG_ON(dir == DMA_NONE);
723
 
724
        for_each_sg(sgl, sg, nelems, i) {
725
                if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
726
                        unmap_single(hwdev, bus_to_virt(sg->dma_address),
727
                                     sg->dma_length, dir);
728
                else if (dir == DMA_FROM_DEVICE)
729
                        dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
730
        }
731
}
732
 
733
/*
734
 * Make physical memory consistent for a set of streaming mode DMA translations
735
 * after a transfer.
736
 *
737
 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
738
 * and usage.
739
 */
740
static void
741
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
742
                int nelems, int dir, int target)
743
{
744
        struct scatterlist *sg;
745
        int i;
746
 
747
        BUG_ON(dir == DMA_NONE);
748
 
749
        for_each_sg(sgl, sg, nelems, i) {
750
                if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
751
                        sync_single(hwdev, bus_to_virt(sg->dma_address),
752
                                    sg->dma_length, dir, target);
753
                else if (dir == DMA_FROM_DEVICE)
754
                        dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
755
        }
756
}
757
 
758
void
759
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
760
                        int nelems, int dir)
761
{
762
        swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
763
}
764
 
765
void
766
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
767
                           int nelems, int dir)
768
{
769
        swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
770
}
771
 
772
int
773
swiotlb_dma_mapping_error(dma_addr_t dma_addr)
774
{
775
        return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
776
}
777
 
778
/*
779
 * Return whether the given device DMA address mask can be supported
780
 * properly.  For example, if your device can only drive the low 24-bits
781
 * during bus mastering, then you would pass 0x00ffffff as the mask to
782
 * this function.
783
 */
784
int
785
swiotlb_dma_supported(struct device *hwdev, u64 mask)
786
{
787
        return virt_to_bus(io_tlb_end - 1) <= mask;
788
}
789
 
790
EXPORT_SYMBOL(swiotlb_map_single);
791
EXPORT_SYMBOL(swiotlb_unmap_single);
792
EXPORT_SYMBOL(swiotlb_map_sg);
793
EXPORT_SYMBOL(swiotlb_unmap_sg);
794
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
795
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
796
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
797
EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
798
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
799
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
800
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
801
EXPORT_SYMBOL(swiotlb_alloc_coherent);
802
EXPORT_SYMBOL(swiotlb_free_coherent);
803
EXPORT_SYMBOL(swiotlb_dma_supported);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.