1 |
1275 |
phoenix |
#ifndef __ALPHA_PCI_H
|
2 |
|
|
#define __ALPHA_PCI_H
|
3 |
|
|
|
4 |
|
|
#ifdef __KERNEL__
|
5 |
|
|
|
6 |
|
|
#include <linux/spinlock.h>
|
7 |
|
|
#include <asm/scatterlist.h>
|
8 |
|
|
#include <asm/machvec.h>
|
9 |
|
|
|
10 |
|
|
/*
|
11 |
|
|
* The following structure is used to manage multiple PCI busses.
|
12 |
|
|
*/
|
13 |
|
|
|
14 |
|
|
struct pci_dev;
|
15 |
|
|
struct pci_bus;
|
16 |
|
|
struct resource;
|
17 |
|
|
struct pci_iommu_arena;
|
18 |
|
|
struct page;
|
19 |
|
|
|
20 |
|
|
/* A controller. Used to manage multiple PCI busses. */
|
21 |
|
|
|
22 |
|
|
struct pci_controller {
|
23 |
|
|
struct pci_controller *next;
|
24 |
|
|
struct pci_bus *bus;
|
25 |
|
|
struct resource *io_space;
|
26 |
|
|
struct resource *mem_space;
|
27 |
|
|
|
28 |
|
|
/* The following are for reporting to userland. The invariant is
|
29 |
|
|
that if we report a BWX-capable dense memory, we do not report
|
30 |
|
|
a sparse memory at all, even if it exists. */
|
31 |
|
|
unsigned long sparse_mem_base;
|
32 |
|
|
unsigned long dense_mem_base;
|
33 |
|
|
unsigned long sparse_io_base;
|
34 |
|
|
unsigned long dense_io_base;
|
35 |
|
|
|
36 |
|
|
/* This one's for the kernel only. It's in KSEG somewhere. */
|
37 |
|
|
unsigned long config_space_base;
|
38 |
|
|
|
39 |
|
|
unsigned int index;
|
40 |
|
|
unsigned int first_busno;
|
41 |
|
|
unsigned int last_busno;
|
42 |
|
|
|
43 |
|
|
struct pci_iommu_arena *sg_pci;
|
44 |
|
|
struct pci_iommu_arena *sg_isa;
|
45 |
|
|
|
46 |
|
|
void *sysdata;
|
47 |
|
|
};
|
48 |
|
|
|
49 |
|
|
/* Override the logic in pci_scan_bus for skipping already-configured
|
50 |
|
|
bus numbers. */
|
51 |
|
|
|
52 |
|
|
#define pcibios_assign_all_busses() 1
|
53 |
|
|
#define pcibios_scan_all_fns() 0
|
54 |
|
|
|
55 |
|
|
#define PCIBIOS_MIN_IO alpha_mv.min_io_address
|
56 |
|
|
#define PCIBIOS_MIN_MEM alpha_mv.min_mem_address
|
57 |
|
|
|
58 |
|
|
extern void pcibios_set_master(struct pci_dev *dev);
|
59 |
|
|
|
60 |
|
|
extern inline void pcibios_penalize_isa_irq(int irq)
|
61 |
|
|
{
|
62 |
|
|
/* We don't do dynamic PCI IRQ allocation */
|
63 |
|
|
}
|
64 |
|
|
|
65 |
|
|
/* IOMMU controls. */
|
66 |
|
|
|
67 |
|
|
/* The PCI address space does not equal the physical memory address space.
|
68 |
|
|
The networking and block device layers use this boolean for bounce buffer
|
69 |
|
|
decisions. */
|
70 |
|
|
#define PCI_DMA_BUS_IS_PHYS 0
|
71 |
|
|
|
72 |
|
|
/* Allocate and map kernel buffer using consistant mode DMA for PCI
|
73 |
|
|
device. Returns non-NULL cpu-view pointer to the buffer if
|
74 |
|
|
successful and sets *DMA_ADDRP to the pci side dma address as well,
|
75 |
|
|
else DMA_ADDRP is undefined. */
|
76 |
|
|
|
77 |
|
|
extern void *pci_alloc_consistent(struct pci_dev *, size_t, dma_addr_t *);
|
78 |
|
|
|
79 |
|
|
/* Free and unmap a consistant DMA buffer. CPU_ADDR and DMA_ADDR must
|
80 |
|
|
be values that were returned from pci_alloc_consistant. SIZE must
|
81 |
|
|
be the same as what as passed into pci_alloc_consistant.
|
82 |
|
|
References to the memory and mappings assosciated with CPU_ADDR or
|
83 |
|
|
DMA_ADDR past this call are illegal. */
|
84 |
|
|
|
85 |
|
|
extern void pci_free_consistent(struct pci_dev *, size_t, void *, dma_addr_t);
|
86 |
|
|
|
87 |
|
|
/* Map a single buffer of the indicate size for PCI DMA in streaming
|
88 |
|
|
mode. The 32-bit PCI bus mastering address to use is returned.
|
89 |
|
|
Once the device is given the dma address, the device owns this memory
|
90 |
|
|
until either pci_unmap_single or pci_dma_sync_single is performed. */
|
91 |
|
|
|
92 |
|
|
extern dma_addr_t pci_map_single(struct pci_dev *, void *, size_t, int);
|
93 |
|
|
|
94 |
|
|
/* Likewise, but for a page instead of an address. */
|
95 |
|
|
extern dma_addr_t pci_map_page(struct pci_dev *, struct page *,
|
96 |
|
|
unsigned long, size_t, int);
|
97 |
|
|
|
98 |
|
|
/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
|
99 |
|
|
SIZE must match what was provided for in a previous pci_map_single
|
100 |
|
|
call. All other usages are undefined. After this call, reads by
|
101 |
|
|
the cpu to the buffer are guarenteed to see whatever the device
|
102 |
|
|
wrote there. */
|
103 |
|
|
|
104 |
|
|
extern void pci_unmap_single(struct pci_dev *, dma_addr_t, size_t, int);
|
105 |
|
|
extern void pci_unmap_page(struct pci_dev *, dma_addr_t, size_t, int);
|
106 |
|
|
|
107 |
|
|
/* pci_unmap_{single,page} is not a nop, thus... */
|
108 |
|
|
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
|
109 |
|
|
dma_addr_t ADDR_NAME;
|
110 |
|
|
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
|
111 |
|
|
__u32 LEN_NAME;
|
112 |
|
|
#define pci_unmap_addr(PTR, ADDR_NAME) \
|
113 |
|
|
((PTR)->ADDR_NAME)
|
114 |
|
|
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
|
115 |
|
|
(((PTR)->ADDR_NAME) = (VAL))
|
116 |
|
|
#define pci_unmap_len(PTR, LEN_NAME) \
|
117 |
|
|
((PTR)->LEN_NAME)
|
118 |
|
|
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
|
119 |
|
|
(((PTR)->LEN_NAME) = (VAL))
|
120 |
|
|
|
121 |
|
|
/* Map a set of buffers described by scatterlist in streaming mode for
|
122 |
|
|
PCI DMA. This is the scather-gather version of the above
|
123 |
|
|
pci_map_single interface. Here the scatter gather list elements
|
124 |
|
|
are each tagged with the appropriate PCI dma address and length.
|
125 |
|
|
They are obtained via sg_dma_{address,length}(SG).
|
126 |
|
|
|
127 |
|
|
NOTE: An implementation may be able to use a smaller number of DMA
|
128 |
|
|
address/length pairs than there are SG table elements. (for
|
129 |
|
|
example via virtual mapping capabilities) The routine returns the
|
130 |
|
|
number of addr/length pairs actually used, at most nents.
|
131 |
|
|
|
132 |
|
|
Device ownership issues as mentioned above for pci_map_single are
|
133 |
|
|
the same here. */
|
134 |
|
|
|
135 |
|
|
extern int pci_map_sg(struct pci_dev *, struct scatterlist *, int, int);
|
136 |
|
|
|
137 |
|
|
/* Unmap a set of streaming mode DMA translations. Again, cpu read
|
138 |
|
|
rules concerning calls here are the same as for pci_unmap_single()
|
139 |
|
|
above. */
|
140 |
|
|
|
141 |
|
|
extern void pci_unmap_sg(struct pci_dev *, struct scatterlist *, int, int);
|
142 |
|
|
|
143 |
|
|
/* Make physical memory consistant for a single streaming mode DMA
|
144 |
|
|
translation after a transfer.
|
145 |
|
|
|
146 |
|
|
If you perform a pci_map_single() but wish to interrogate the
|
147 |
|
|
buffer using the cpu, yet do not wish to teardown the PCI dma
|
148 |
|
|
mapping, you must call this function before doing so. At the next
|
149 |
|
|
point you give the PCI dma address back to the card, the device
|
150 |
|
|
again owns the buffer. */
|
151 |
|
|
|
152 |
|
|
static inline void
|
153 |
|
|
pci_dma_sync_single(struct pci_dev *dev, dma_addr_t dma_addr, long size,
|
154 |
|
|
int direction)
|
155 |
|
|
{
|
156 |
|
|
/* Nothing to do. */
|
157 |
|
|
}
|
158 |
|
|
|
159 |
|
|
/* Make physical memory consistant for a set of streaming mode DMA
|
160 |
|
|
translations after a transfer. The same as pci_dma_sync_single but
|
161 |
|
|
for a scatter-gather list, same rules and usage. */
|
162 |
|
|
|
163 |
|
|
static inline void
|
164 |
|
|
pci_dma_sync_sg(struct pci_dev *dev, struct scatterlist *sg, int nents,
|
165 |
|
|
int direction)
|
166 |
|
|
{
|
167 |
|
|
/* Nothing to do. */
|
168 |
|
|
}
|
169 |
|
|
|
170 |
|
|
/* Return whether the given PCI device DMA address mask can
|
171 |
|
|
be supported properly. For example, if your device can
|
172 |
|
|
only drive the low 24-bits during PCI bus mastering, then
|
173 |
|
|
you would pass 0x00ffffff as the mask to this function. */
|
174 |
|
|
|
175 |
|
|
extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
|
176 |
|
|
|
177 |
|
|
/* True if the machine supports DAC addressing, and DEV can
|
178 |
|
|
make use of it given MASK. */
|
179 |
|
|
extern int pci_dac_dma_supported(struct pci_dev *hwdev, u64 mask);
|
180 |
|
|
|
181 |
|
|
/* Convert to/from DAC dma address and struct page. */
|
182 |
|
|
extern dma64_addr_t pci_dac_page_to_dma(struct pci_dev *, struct page *, unsigned long, int);
|
183 |
|
|
extern struct page *pci_dac_dma_to_page(struct pci_dev *, dma64_addr_t);
|
184 |
|
|
extern unsigned long pci_dac_dma_to_offset(struct pci_dev *, dma64_addr_t);
|
185 |
|
|
|
186 |
|
|
static __inline__ void
|
187 |
|
|
pci_dac_dma_sync_single(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
|
188 |
|
|
{
|
189 |
|
|
/* Nothing to do. */
|
190 |
|
|
}
|
191 |
|
|
|
192 |
|
|
/* Return the index of the PCI controller for device PDEV. */
|
193 |
|
|
extern int pci_controller_num(struct pci_dev *pdev);
|
194 |
|
|
#endif /* __KERNEL__ */
|
195 |
|
|
|
196 |
|
|
/* Values for the `which' argument to sys_pciconfig_iobase. */
|
197 |
|
|
#define IOBASE_HOSE 0
|
198 |
|
|
#define IOBASE_SPARSE_MEM 1
|
199 |
|
|
#define IOBASE_DENSE_MEM 2
|
200 |
|
|
#define IOBASE_SPARSE_IO 3
|
201 |
|
|
#define IOBASE_DENSE_IO 4
|
202 |
|
|
#define IOBASE_ROOT_BUS 5
|
203 |
|
|
#define IOBASE_FROM_HOSE 0x10000
|
204 |
|
|
|
205 |
|
|
#endif /* __ALPHA_PCI_H */
|