OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [include/] [asm-or32/] [dma-mapping.h] - Blame information for rev 82

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
#ifndef __OR32_DMA_MAPPING_H__
2
#define __OR32_DMA_MAPPING_H__
3
 
4 81 tac2
//#warning "__PHX__ DMA mapping is disabled, change & fix here to enable it"#include <asm-generic/dma-mapping-broken.h>
5 62 marcus.erl
#include <asm/scatterlist.h>
6 82 tac2
#include <linux/pci.h>
7
/* need struct page definitions */
8
#include <linux/mm.h>
9 62 marcus.erl
 
10
#endif /* __OR32_DMA_MAPPING_H__ */
11 82 tac2
 
12
/* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
13
 *
14
 * Implements the generic device dma API via the existing pci_ one
15
 * for unconverted architectures
16
 */
17
 
18
 
19
 
20
 
21
#ifdef CONFIG_PCI
22
 
23
/* we implement the API below in terms of the existing PCI one,
24
 * so include it */
25
 
26
 
27
static inline int
28
dma_supported(struct device *dev, u64 mask)
29
{
30
        BUG_ON(dev->bus != &pci_bus_type);
31
 
32
        return pci_dma_supported(to_pci_dev(dev), mask);
33
}
34
 
35
static inline int
36
dma_set_mask(struct device *dev, u64 dma_mask)
37
{
38
        BUG_ON(dev->bus != &pci_bus_type);
39
 
40
        return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
41
}
42
 
43
static inline void *
44
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
45
                   gfp_t flag)
46
{
47
        BUG_ON(dev->bus != &pci_bus_type);
48
 
49
        return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
50
}
51
 
52
static inline void
53
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
54
                    dma_addr_t dma_handle)
55
{
56
        BUG_ON(dev->bus != &pci_bus_type);
57
 
58
        pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
59
}
60
 
61
static inline dma_addr_t
62
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
63
               enum dma_data_direction direction)
64
{
65
        BUG_ON(dev->bus != &pci_bus_type);
66
 
67
        return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
68
}
69
 
70
static inline void
71
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
72
                 enum dma_data_direction direction)
73
{
74
        BUG_ON(dev->bus != &pci_bus_type);
75
 
76
        pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
77
}
78
 
79
static inline dma_addr_t
80
dma_map_page(struct device *dev, struct page *page,
81
             unsigned long offset, size_t size,
82
             enum dma_data_direction direction)
83
{
84
        BUG_ON(dev->bus != &pci_bus_type);
85
 
86
        return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
87
}
88
 
89
static inline void
90
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
91
               enum dma_data_direction direction)
92
{
93
        BUG_ON(dev->bus != &pci_bus_type);
94
 
95
        pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
96
}
97
 
98
static inline int
99
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
100
           enum dma_data_direction direction)
101
{
102
        BUG_ON(dev->bus != &pci_bus_type);
103
 
104
        return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
105
}
106
 
107
static inline void
108
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
109
             enum dma_data_direction direction)
110
{
111
        BUG_ON(dev->bus != &pci_bus_type);
112
 
113
        pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
114
}
115
 
116
static inline void
117
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
118
                        enum dma_data_direction direction)
119
{
120
        BUG_ON(dev->bus != &pci_bus_type);
121
 
122
        pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
123
                                    size, (int)direction);
124
}
125
 
126
static inline void
127
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
128
                           enum dma_data_direction direction)
129
{
130
        BUG_ON(dev->bus != &pci_bus_type);
131
 
132
        pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
133
                                       size, (int)direction);
134
}
135
 
136
static inline void
137
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
138
                    enum dma_data_direction direction)
139
{
140
        BUG_ON(dev->bus != &pci_bus_type);
141
 
142
        pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
143
}
144
 
145
static inline void
146
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
147
                       enum dma_data_direction direction)
148
{
149
        BUG_ON(dev->bus != &pci_bus_type);
150
 
151
        pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
152
}
153
 
154
static inline int
155
dma_mapping_error(dma_addr_t dma_addr)
156
{
157
        return pci_dma_mapping_error(dma_addr);
158
}
159
 
160
 
161
#else
162
 
163
static inline int
164
dma_supported(struct device *dev, u64 mask)
165
{
166
        return 0;
167
}
168
 
169
static inline int
170
dma_set_mask(struct device *dev, u64 dma_mask)
171
{
172
        BUG();
173
        return 0;
174
}
175
 
176
static inline void *
177
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
178
                   gfp_t flag)
179
{
180
 
181
 
182
       void *virt;
183
                printk("a");
184
                 virt = kmalloc(size, flag);
185
                 printk("b");
186
                 if (!virt)
187
                         return NULL;
188
                 printk("c");
189
                 *dma_handle =  virt_to_bus(virt);
190
                 printk("d");
191
                 return virt;
192
 
193
 
194
 //return ret; 
195
//BUG();
196
        //return 0;
197
 
198
}
199
 
200
 
201
 
202
static inline void
203
dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
204
                    dma_addr_t dma_handle)
205
{
206
 
207
                 kfree(cpu_addr);
208
                 return;
209
 
210
 
211
}
212
 
213
static inline dma_addr_t
214
dma_map_single(struct device *dev, void *cpu_addr, size_t size,
215
               enum dma_data_direction direction)
216
{
217
        BUG();
218
        return 0;
219
}
220
 
221
static inline void
222
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
223
                 enum dma_data_direction direction)
224
{
225
        BUG();
226
}
227
 
228
static inline dma_addr_t
229
dma_map_page(struct device *dev, struct page *page,
230
             unsigned long offset, size_t size,
231
             enum dma_data_direction direction)
232
{
233
        BUG();
234
        return 0;
235
}
236
 
237
static inline void
238
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
239
               enum dma_data_direction direction)
240
{
241
        BUG();
242
}
243
 
244
static inline int
245
dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
246
           enum dma_data_direction direction)
247
{
248
        struct scatterlist *sg;
249
        int i;
250
 
251
        BUG_ON(!valid_dma_direction(direction));
252
        WARN_ON(nents == 0 || sglist[0].length == 0);
253
 
254
        for_each_sg(sglist, sg, nents, i) {
255
                BUG_ON(!sg_page(sg));
256
 
257
                sg->address = sg_phys(sg);
258
        }
259
 
260
        //flush_write_buffers();
261
        return nents;
262
}
263
 
264
static inline void
265
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
266
             enum dma_data_direction direction)
267
{
268
        BUG_ON(!valid_dma_direction(direction));
269
        //XXX:BUG();
270
}
271
 
272
static inline void
273
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
274
                        enum dma_data_direction direction)
275
{
276
        BUG();
277
}
278
 
279
static inline void
280
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
281
                           enum dma_data_direction direction)
282
{
283
        BUG();
284
}
285
 
286
static inline void
287
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
288
                    enum dma_data_direction direction)
289
{
290
        BUG();
291
}
292
 
293
static inline void
294
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
295
                       enum dma_data_direction direction)
296
{
297
        BUG();
298
}
299
 
300
static inline int
301
dma_error(dma_addr_t dma_addr)
302
{
303
        return 0;
304
}
305
 
306
#endif
307
 
308
/* Now for the API extensions over the pci_ one */
309
 
310
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
311
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
312
#define dma_is_consistent(d, h) (1)
313
 
314
static inline int
315
dma_get_cache_alignment(void)
316
{
317
        /* no easy way to get cache size on all processors, so return
318
         * the maximum possible, to be safe */
319
        return (1 << INTERNODE_CACHE_SHIFT);
320
}
321
 
322
static inline void
323
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
324
                              unsigned long offset, size_t size,
325
                              enum dma_data_direction direction)
326
{
327
        /* just sync everything, that's all the pci API can do */
328
        dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
329
}
330
 
331
static inline void
332
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
333
                                 unsigned long offset, size_t size,
334
                                 enum dma_data_direction direction)
335
{
336
        /* just sync everything, that's all the pci API can do */
337
        dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
338
}
339
 
340
static inline void
341
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
342
               enum dma_data_direction direction)
343
{
344
        /* could define this in terms of the dma_cache ... operations,
345
         * but if you get this on a platform, you should convert the platform
346
         * to using the generic device DMA API */
347
        BUG();
348
}
349
 
350
 
351
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.