OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-x86_64/] [pci.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef __x8664_PCI_H
2
#define __x8664_PCI_H
3
 
4
#include <linux/config.h>
5
#include <asm/io.h>
6
 
7
#ifdef __KERNEL__
8
 
9
extern dma_addr_t bad_dma_address;
10
 
11
/* Can be used to override the logic in pci_scan_bus for skipping
12
   already-configured bus numbers - to be used for buggy BIOSes
13
   or architectures with incomplete PCI setup by the loader */
14
 
15
#ifdef CONFIG_PCI
16
extern unsigned int pcibios_assign_all_busses(void);
17
#else
18
#define pcibios_assign_all_busses()     0
19
#endif
20
#define pcibios_scan_all_fns()          0
21
 
22
extern unsigned long pci_mem_start;
23
#define PCIBIOS_MIN_IO          0x1000
24
#define PCIBIOS_MIN_MEM         (pci_mem_start)
25
 
26
void pcibios_config_init(void);
27
struct pci_bus * pcibios_scan_root(int bus);
28
extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
29
extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
30
 
31
void pcibios_set_master(struct pci_dev *dev);
32
void pcibios_penalize_isa_irq(int irq);
33
struct irq_routing_table *pcibios_get_irq_routing_table(void);
34
int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
35
 
36
#include <linux/types.h>
37
#include <linux/slab.h>
38
#include <asm/scatterlist.h>
39
#include <linux/string.h>
40
#include <asm/io.h>
41
#include <asm/page.h>
42
#include <asm/mmzone.h>
43
 
44
struct pci_dev;
45
extern int force_mmu;
46
 
47
/* Allocate and map kernel buffer using consistent mode DMA for a device.
48
 * hwdev should be valid struct pci_dev pointer for PCI devices,
49
 * NULL for PCI-like buses (ISA, EISA).
50
 * Returns non-NULL cpu-view pointer to the buffer if successful and
51
 * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
52
 * is undefined.
53
 */
54
extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
55
                                  dma_addr_t *dma_handle);
56
 
57
/* Free and unmap a consistent DMA buffer.
58
 * cpu_addr is what was returned from pci_alloc_consistent,
59
 * size must be the same as what as passed into pci_alloc_consistent,
60
 * and likewise dma_addr must be the same as what *dma_addrp was set to.
61
 *
62
 * References to the memory and mappings associated with cpu_addr/dma_addr
63
 * past this call are illegal.
64
 */
65
extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
66
                                void *vaddr, dma_addr_t dma_handle);
67
 
68
#ifdef CONFIG_SWIOTLB
69
extern int swiotlb;
70
extern dma_addr_t swiotlb_map_single (struct pci_dev *hwdev, void *ptr, size_t size,
71
                                     int dir);
72
extern void swiotlb_unmap_single (struct pci_dev *hwdev, dma_addr_t dev_addr,
73
                                 size_t size, int dir);
74
extern void swiotlb_sync_single (struct pci_dev *hwdev, dma_addr_t dev_addr,
75
                                size_t size, int dir);
76
extern void swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems,
77
                            int dir);
78
#endif
79
 
80
#ifdef CONFIG_GART_IOMMU
81
 
82
/* Map a single buffer of the indicated size for DMA in streaming mode.
83
 * The 32-bit bus address to use is returned.
84
 *
85
 * Once the device is given the dma address, the device owns this memory
86
 * until either pci_unmap_single or pci_dma_sync_single is performed.
87
 */
88
extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
89
                                 size_t size, int direction);
90
 
91
 
92
void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t addr,
93
                                   size_t size, int direction);
94
 
95
/*
96
 * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
97
 * to pci_map_single, but takes a struct page instead of a virtual address
98
 */
99
 
100
#define pci_map_page(dev,page,offset,size,dir) \
101
        pci_map_single((dev), page_address(page)+(offset), (size), (dir))
102
 
103
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)       \
104
        dma_addr_t ADDR_NAME;
105
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)         \
106
        __u32 LEN_NAME;
107
#define pci_unmap_addr(PTR, ADDR_NAME)                  \
108
        ((PTR)->ADDR_NAME)
109
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL)         \
110
        (((PTR)->ADDR_NAME) = (VAL))
111
#define pci_unmap_len(PTR, LEN_NAME)                    \
112
        ((PTR)->LEN_NAME)
113
#define pci_unmap_len_set(PTR, LEN_NAME, VAL)           \
114
        (((PTR)->LEN_NAME) = (VAL))
115
 
116
static inline void pci_dma_sync_single(struct pci_dev *hwdev,
117
                                       dma_addr_t dma_handle,
118
                                       size_t size, int direction)
119
{
120
#ifdef CONFIG_SWIOTLB
121
       if (swiotlb)
122
               return swiotlb_sync_single(hwdev,dma_handle,size,direction);
123
#endif
124
        BUG_ON(direction == PCI_DMA_NONE);
125
}
126
 
127
static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
128
                                   struct scatterlist *sg,
129
                                   int nelems, int direction)
130
{
131
        BUG_ON(direction == PCI_DMA_NONE);
132
#ifdef CONFIG_SWIOTLB
133
       if (swiotlb)
134
               return swiotlb_sync_sg(hwdev,sg,nelems,direction);
135
#endif
136
}
137
 
138
/* The PCI address space does equal the physical memory
139
 * address space.  The networking and block device layers use
140
 * this boolean for bounce buffer decisions.
141
 */
142
#define PCI_DMA_BUS_IS_PHYS     (0)
143
 
144
 
145
#else
146
static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
147
                                        size_t size, int direction)
148
{
149
        dma_addr_t addr;
150
 
151
        if (direction == PCI_DMA_NONE)
152
                out_of_line_bug();
153
        addr = virt_to_bus(ptr);
154
 
155
        /*
156
         * This is gross, but what should I do.
157
         * Unfortunately drivers do not test the return value of this.
158
         */
159
        if ((addr+size) & ~hwdev->dma_mask)
160
                out_of_line_bug();
161
        return addr;
162
}
163
 
164
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
165
                                    size_t size, int direction)
166
{
167
        if (direction == PCI_DMA_NONE)
168
                out_of_line_bug();
169
        /* Nothing to do */
170
}
171
 
172
static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
173
                                      unsigned long offset, size_t size, int direction)
174
{
175
        dma_addr_t addr;
176
        if (direction == PCI_DMA_NONE)
177
                out_of_line_bug();
178
        addr = page_to_pfn(page) * PAGE_SIZE + offset;
179
        if ((addr+size) & ~hwdev->dma_mask)
180
                out_of_line_bug();
181
        return addr;
182
}
183
 
184
/* pci_unmap_{page,single} is a nop so... */
185
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
186
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
187
#define pci_unmap_addr(PTR, ADDR_NAME)          (0)
188
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
189
#define pci_unmap_len(PTR, LEN_NAME)            (0)
190
#define pci_unmap_len_set(PTR, LEN_NAME, VAL)   do { } while (0)
191
 
192
/* Make physical memory consistent for a single
193
 * streaming mode DMA translation after a transfer.
194
 *
195
 * If you perform a pci_map_single() but wish to interrogate the
196
 * buffer using the cpu, yet do not wish to teardown the PCI dma
197
 * mapping, you must call this function before doing so.  At the
198
 * next point you give the PCI dma address back to the card, the
199
 * device again owns the buffer.
200
 */
201
static inline void pci_dma_sync_single(struct pci_dev *hwdev,
202
                                       dma_addr_t dma_handle,
203
                                       size_t size, int direction)
204
{
205
        if (direction == PCI_DMA_NONE)
206
                out_of_line_bug();
207
        flush_write_buffers();
208
}
209
 
210
/* Make physical memory consistent for a set of streaming
211
 * mode DMA translations after a transfer.
212
 *
213
 * The same as pci_dma_sync_single but for a scatter-gather list,
214
 * same rules and usage.
215
 */
216
static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
217
                                   struct scatterlist *sg,
218
                                   int nelems, int direction)
219
{
220
        if (direction == PCI_DMA_NONE)
221
                out_of_line_bug();
222
        flush_write_buffers();
223
}
224
 
225
#define PCI_DMA_BUS_IS_PHYS     1
226
 
227
#endif
228
 
229
extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
230
                      int nents, int direction);
231
extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
232
                         int nents, int direction);
233
 
234
#define pci_unmap_page pci_unmap_single
235
 
236
/* Return whether the given PCI device DMA address mask can
237
 * be supported properly.  For example, if your device can
238
 * only drive the low 24-bits during PCI bus mastering, then
239
 * you would pass 0x00ffffff as the mask to this function.
240
 */
241
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
242
{
243
        /*
244
         * we fall back to GFP_DMA when the mask isn't all 1s,
245
         * so we can't guarantee allocations that must be
246
         * within a tighter range than GFP_DMA..
247
         */
248
        if(mask < 0x00ffffff)
249
                return 0;
250
 
251
        return 1;
252
}
253
 
254
/* This is always fine. */
255
#define pci_dac_dma_supported(pci_dev, mask)    (1)
256
 
257
static __inline__ dma64_addr_t
258
pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
259
{
260
        return ((dma64_addr_t) page_to_bus(page) +
261
                (dma64_addr_t) offset);
262
}
263
 
264
static __inline__ struct page *
265
pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
266
{
267
        return pfn_to_page(phys_to_pfn(dma_addr));
268
}
269
 
270
static __inline__ unsigned long
271
pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
272
{
273
        return (dma_addr & ~PAGE_MASK);
274
}
275
 
276
static __inline__ void
277
pci_dac_dma_sync_single(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
278
{
279
        flush_write_buffers();
280
}
281
 
282
/* These macros should be used after a pci_map_sg call has been done
283
 * to get bus addresses of each of the SG entries and their lengths.
284
 * You should only work with the number of sg entries pci_map_sg
285
 * returns.
286
 */
287
#define sg_dma_address(sg)      ((sg)->dma_address)
288
#define sg_dma_len(sg)          ((sg)->length)
289
 
290
/* Return the index of the PCI controller for device. */
291
static inline int pci_controller_num(struct pci_dev *dev)
292
{
293
        return 0;
294
}
295
 
296
#define HAVE_PCI_MMAP
297
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
298
                               enum pci_mmap_state mmap_state, int write_combine);
299
 
300
#endif /* __KERNEL__ */
301
 
302
#endif /* __x8664_PCI_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.