1 |
1275 |
phoenix |
/*
|
2 |
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
3 |
|
|
* License. See the file "COPYING" in the main directory of this archive
|
4 |
|
|
* for more details.
|
5 |
|
|
*
|
6 |
|
|
* Copyright (C) 1994, 1995 Waldorf GmbH
|
7 |
|
|
* Copyright (C) 1994 - 2000, 03 Ralf Baechle
|
8 |
|
|
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
9 |
|
|
*/
|
10 |
|
|
#ifndef _ASM_IO_H
|
11 |
|
|
#define _ASM_IO_H
|
12 |
|
|
|
13 |
|
|
#include <linux/config.h>
|
14 |
|
|
#include <asm/addrspace.h>
|
15 |
|
|
#include <asm/page.h>
|
16 |
|
|
#include <asm/byteorder.h>
|
17 |
|
|
|
18 |
|
|
#ifdef CONFIG_MIPS_COBALT
|
19 |
|
|
#include <asm/cobalt/io.h>
|
20 |
|
|
#endif
|
21 |
|
|
|
22 |
|
|
#ifdef CONFIG_DECSTATION
|
23 |
|
|
#include <asm/dec/io.h>
|
24 |
|
|
#endif
|
25 |
|
|
|
26 |
|
|
#ifdef CONFIG_MIPS_ATLAS
|
27 |
|
|
#include <asm/mips-boards/io.h>
|
28 |
|
|
#endif
|
29 |
|
|
|
30 |
|
|
#ifdef CONFIG_MIPS_MALTA
|
31 |
|
|
#include <asm/mips-boards/io.h>
|
32 |
|
|
#endif
|
33 |
|
|
|
34 |
|
|
#ifdef CONFIG_MIPS_SEAD
|
35 |
|
|
#include <asm/mips-boards/io.h>
|
36 |
|
|
#endif
|
37 |
|
|
|
38 |
|
|
#ifdef CONFIG_SGI_IP22
|
39 |
|
|
#include <asm/sgi/io.h>
|
40 |
|
|
#endif
|
41 |
|
|
|
42 |
|
|
#ifdef CONFIG_SGI_IP27
|
43 |
|
|
#include <asm/sn/io.h>
|
44 |
|
|
#endif
|
45 |
|
|
|
46 |
|
|
#ifdef CONFIG_SIBYTE_SB1xxx_SOC
|
47 |
|
|
#include <asm/sibyte/io.h>
|
48 |
|
|
#endif
|
49 |
|
|
|
50 |
|
|
#ifdef CONFIG_SGI_IP27
|
51 |
|
|
extern unsigned long bus_to_baddr[256];
|
52 |
|
|
|
53 |
|
|
#define bus_to_baddr(bus, addr) (bus_to_baddr[(bus)->number] + (addr))
|
54 |
|
|
#define baddr_to_bus(bus, addr) ((addr) - bus_to_baddr[(bus)->number])
|
55 |
|
|
#define __swizzle_addr_w(port) ((port) ^ 2)
|
56 |
|
|
#else
|
57 |
|
|
#define bus_to_baddr(bus, addr) (addr)
|
58 |
|
|
#define baddr_to_bus(bus, addr) (addr)
|
59 |
|
|
#define __swizzle_addr_w(port) (port)
|
60 |
|
|
#endif
|
61 |
|
|
|
62 |
|
|
/*
|
63 |
|
|
* Slowdown I/O port space accesses for antique hardware.
|
64 |
|
|
*/
|
65 |
|
|
#undef CONF_SLOWDOWN_IO
|
66 |
|
|
|
67 |
|
|
/*
|
68 |
|
|
* Sane hardware offers swapping of I/O space accesses in hardware; less
|
69 |
|
|
* sane hardware forces software to fiddle with this. Totally insane hardware
|
70 |
|
|
* introduces special cases like:
|
71 |
|
|
*
|
72 |
|
|
* IP22 seems braindead enough to swap 16-bits values in hardware, but not
|
73 |
|
|
* 32-bits. Go figure... Can't tell without documentation.
|
74 |
|
|
*
|
75 |
|
|
* We only do the swapping to keep the kernel config bits of bi-endian
|
76 |
|
|
* machines a bit saner.
|
77 |
|
|
*/
|
78 |
|
|
#if defined(CONFIG_SWAP_IO_SPACE_W) && defined(__MIPSEB__)
|
79 |
|
|
#define __ioswab16(x) swab16(x)
|
80 |
|
|
#else
|
81 |
|
|
#define __ioswab16(x) (x)
|
82 |
|
|
#endif
|
83 |
|
|
#if defined(CONFIG_SWAP_IO_SPACE_L) && defined(__MIPSEB__)
|
84 |
|
|
#define __ioswab32(x) swab32(x)
|
85 |
|
|
#else
|
86 |
|
|
#define __ioswab32(x) (x)
|
87 |
|
|
#endif
|
88 |
|
|
|
89 |
|
|
/*
|
90 |
|
|
* Change "struct page" to physical address.
|
91 |
|
|
*/
|
92 |
|
|
#define page_to_phys(page) PAGE_TO_PA(page)
|
93 |
|
|
|
94 |
|
|
/*
|
95 |
|
|
* ioremap - map bus memory into CPU space
|
96 |
|
|
* @offset: bus address of the memory
|
97 |
|
|
* @size: size of the resource to map
|
98 |
|
|
*
|
99 |
|
|
* ioremap performs a platform specific sequence of operations to
|
100 |
|
|
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
101 |
|
|
* writew/writel functions and the other mmio helpers. The returned
|
102 |
|
|
* address is not guaranteed to be usable directly as a virtual
|
103 |
|
|
* address.
|
104 |
|
|
*/
|
105 |
|
|
static inline void * ioremap(unsigned long offset, unsigned long size)
|
106 |
|
|
{
|
107 |
|
|
return (void *) (IO_SPACE_BASE | offset);
|
108 |
|
|
}
|
109 |
|
|
|
110 |
|
|
/*
|
111 |
|
|
* ioremap_nocache - map bus memory into CPU space
|
112 |
|
|
* @offset: bus address of the memory
|
113 |
|
|
* @size: size of the resource to map
|
114 |
|
|
*
|
115 |
|
|
* ioremap_nocache performs a platform specific sequence of operations to
|
116 |
|
|
* make bus memory CPU accessible via the readb/readw/readl/writeb/
|
117 |
|
|
* writew/writel functions and the other mmio helpers. The returned
|
118 |
|
|
* address is not guaranteed to be usable directly as a virtual
|
119 |
|
|
* address.
|
120 |
|
|
*
|
121 |
|
|
* This version of ioremap ensures that the memory is marked uncachable
|
122 |
|
|
* on the CPU as well as honouring existing caching rules from things like
|
123 |
|
|
* the PCI bus. Note that there are other caches and buffers on many
|
124 |
|
|
* busses. In paticular driver authors should read up on PCI writes
|
125 |
|
|
*
|
126 |
|
|
* It's useful if some control registers are in such an area and
|
127 |
|
|
* write combining or read caching is not desirable:
|
128 |
|
|
*/
|
129 |
|
|
static inline void * ioremap_nocache (unsigned long offset, unsigned long size)
|
130 |
|
|
{
|
131 |
|
|
return (void *) (IO_SPACE_BASE | offset);
|
132 |
|
|
}
|
133 |
|
|
|
134 |
|
|
static inline void iounmap(void *addr)
|
135 |
|
|
{
|
136 |
|
|
}
|
137 |
|
|
|
138 |
|
|
/*
|
139 |
|
|
* XXX We need system specific versions of these to handle EISA address bits
|
140 |
|
|
* 24-31 on SNI.
|
141 |
|
|
* XXX more SNI hacks.
|
142 |
|
|
*/
|
143 |
|
|
#define readb(addr) (*(volatile unsigned char *)(addr))
|
144 |
|
|
#define readw(addr) __ioswab16((*(volatile unsigned short *)(addr)))
|
145 |
|
|
#define readl(addr) __ioswab32((*(volatile unsigned int *)(addr)))
|
146 |
|
|
|
147 |
|
|
#define __raw_readb(addr) (*(volatile unsigned char *)(addr))
|
148 |
|
|
#define __raw_readw(addr) (*(volatile unsigned short *)(addr))
|
149 |
|
|
#define __raw_readl(addr) (*(volatile unsigned int *)(addr))
|
150 |
|
|
|
151 |
|
|
#define writeb(b,addr) ((*(volatile unsigned char *)(addr)) = (b))
|
152 |
|
|
#define writew(b,addr) ((*(volatile unsigned short *)(addr)) = (__ioswab16(b)))
|
153 |
|
|
#define writel(b,addr) ((*(volatile unsigned int *)(addr)) = (__ioswab32(b)))
|
154 |
|
|
|
155 |
|
|
#define __raw_writeb(b,addr) ((*(volatile unsigned char *)(addr)) = (b))
|
156 |
|
|
#define __raw_writew(w,addr) ((*(volatile unsigned short *)(addr)) = (w))
|
157 |
|
|
#define __raw_writel(l,addr) ((*(volatile unsigned int *)(addr)) = (l))
|
158 |
|
|
|
159 |
|
|
/*
|
160 |
|
|
* TODO: Should use variants that don't do prefetching.
|
161 |
|
|
*/
|
162 |
|
|
#define memset_io(a,b,c) memset((void *)(a),(b),(c))
|
163 |
|
|
#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
|
164 |
|
|
#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
|
165 |
|
|
|
166 |
|
|
/*
|
167 |
|
|
* isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
|
168 |
|
|
* for the processor. This implies the assumption that there is only
|
169 |
|
|
* one of these busses.
|
170 |
|
|
*/
|
171 |
|
|
extern unsigned long isa_slot_offset;
|
172 |
|
|
|
173 |
|
|
/*
|
174 |
|
|
* ISA space is 'always mapped' on currently supported MIPS systems, no need
|
175 |
|
|
* to explicitly ioremap() it. The fact that the ISA IO space is mapped
|
176 |
|
|
* to PAGE_OFFSET is pure coincidence - it does not mean ISA values
|
177 |
|
|
* are physical addresses. The following constant pointer can be
|
178 |
|
|
* used as the IO-area pointer (it can be iounmapped as well, so the
|
179 |
|
|
* analogy with PCI is quite large):
|
180 |
|
|
*/
|
181 |
|
|
#define __ISA_IO_base ((char *)(isa_slot_offset))
|
182 |
|
|
|
183 |
|
|
#define isa_readb(a) readb(__ISA_IO_base + (a))
|
184 |
|
|
#define isa_readw(a) readw(__ISA_IO_base + (a))
|
185 |
|
|
#define isa_readl(a) readl(__ISA_IO_base + (a))
|
186 |
|
|
#define isa_writeb(b,a) writeb(b,__ISA_IO_base + (a))
|
187 |
|
|
#define isa_writew(w,a) writew(w,__ISA_IO_base + (a))
|
188 |
|
|
#define isa_writel(l,a) writel(l,__ISA_IO_base + (a))
|
189 |
|
|
#define isa_memset_io(a,b,c) memset_io(__ISA_IO_base + (a),(b),(c))
|
190 |
|
|
#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a),__ISA_IO_base + (b),(c))
|
191 |
|
|
#define isa_memcpy_toio(a,b,c) memcpy_toio(__ISA_IO_base + (a),(b),(c))
|
192 |
|
|
|
193 |
|
|
/*
|
194 |
|
|
* We don't have csum_partial_copy_fromio() yet, so we cheat here and
|
195 |
|
|
* just copy it. The net code will then do the checksum later.
|
196 |
|
|
*/
|
197 |
|
|
#define eth_io_copy_and_sum(skb,src,len,unused) memcpy_fromio((skb)->data,(src),(len))
|
198 |
|
|
#define isa_eth_io_copy_and_sum(a,b,c,d) eth_copy_and_sum((a),(b),(c),(d))
|
199 |
|
|
|
200 |
|
|
/*
|
201 |
|
|
* check_signature - find BIOS signatures
|
202 |
|
|
* @io_addr: mmio address to check
|
203 |
|
|
* @signature: signature block
|
204 |
|
|
* @length: length of signature
|
205 |
|
|
*
|
206 |
|
|
* Perform a signature comparison with the mmio address io_addr. This
|
207 |
|
|
* address should have been obtained by ioremap.
|
208 |
|
|
* Returns 1 on a match.
|
209 |
|
|
*/
|
210 |
|
|
static inline int check_signature(unsigned long io_addr,
|
211 |
|
|
const unsigned char *signature, int length)
|
212 |
|
|
{
|
213 |
|
|
int retval = 0;
|
214 |
|
|
do {
|
215 |
|
|
if (readb(io_addr) != *signature)
|
216 |
|
|
goto out;
|
217 |
|
|
io_addr++;
|
218 |
|
|
signature++;
|
219 |
|
|
length--;
|
220 |
|
|
} while (length);
|
221 |
|
|
retval = 1;
|
222 |
|
|
out:
|
223 |
|
|
return retval;
|
224 |
|
|
}
|
225 |
|
|
|
226 |
|
|
/*
|
227 |
|
|
* isa_check_signature - find BIOS signatures
|
228 |
|
|
* @io_addr: mmio address to check
|
229 |
|
|
* @signature: signature block
|
230 |
|
|
* @length: length of signature
|
231 |
|
|
*
|
232 |
|
|
* Perform a signature comparison with the ISA mmio address io_addr.
|
233 |
|
|
* Returns 1 on a match.
|
234 |
|
|
*
|
235 |
|
|
* This function is deprecated. New drivers should use ioremap and
|
236 |
|
|
* check_signature.
|
237 |
|
|
*/
|
238 |
|
|
|
239 |
|
|
static inline int isa_check_signature(unsigned long io_addr,
|
240 |
|
|
const unsigned char *signature, int length)
|
241 |
|
|
{
|
242 |
|
|
int retval = 0;
|
243 |
|
|
do {
|
244 |
|
|
if (isa_readb(io_addr) != *signature)
|
245 |
|
|
goto out;
|
246 |
|
|
io_addr++;
|
247 |
|
|
signature++;
|
248 |
|
|
length--;
|
249 |
|
|
} while (length);
|
250 |
|
|
retval = 1;
|
251 |
|
|
out:
|
252 |
|
|
return retval;
|
253 |
|
|
}
|
254 |
|
|
|
255 |
|
|
/*
|
256 |
|
|
* virt_to_phys - map virtual addresses to physical
|
257 |
|
|
* @address: address to remap
|
258 |
|
|
*
|
259 |
|
|
* The returned physical address is the physical (CPU) mapping for
|
260 |
|
|
* the memory address given. It is only valid to use this function on
|
261 |
|
|
* addresses directly mapped or allocated via kmalloc.
|
262 |
|
|
*
|
263 |
|
|
* This function does not give bus mappings for DMA transfers. In
|
264 |
|
|
* almost all conceivable cases a device driver should not be using
|
265 |
|
|
* this function
|
266 |
|
|
*/
|
267 |
|
|
|
268 |
|
|
static inline unsigned long virt_to_phys(volatile void * address)
|
269 |
|
|
{
|
270 |
|
|
return (unsigned long)address - PAGE_OFFSET;
|
271 |
|
|
}
|
272 |
|
|
|
273 |
|
|
/*
|
274 |
|
|
* phys_to_virt - map physical address to virtual
|
275 |
|
|
* @address: address to remap
|
276 |
|
|
*
|
277 |
|
|
* The returned virtual address is a current CPU mapping for
|
278 |
|
|
* the memory address given. It is only valid to use this function on
|
279 |
|
|
* addresses that have a kernel mapping
|
280 |
|
|
*
|
281 |
|
|
* This function does not handle bus mappings for DMA transfers. In
|
282 |
|
|
* almost all conceivable cases a device driver should not be using
|
283 |
|
|
* this function
|
284 |
|
|
*/
|
285 |
|
|
|
286 |
|
|
static inline void * phys_to_virt(unsigned long address)
|
287 |
|
|
{
|
288 |
|
|
return (void *)(address + PAGE_OFFSET);
|
289 |
|
|
}
|
290 |
|
|
|
291 |
|
|
/*
|
292 |
|
|
* IO bus memory addresses are also 1:1 with the physical address
|
293 |
|
|
*/
|
294 |
|
|
static inline unsigned long virt_to_bus(volatile void * address)
|
295 |
|
|
{
|
296 |
|
|
return (unsigned long)address - PAGE_OFFSET;
|
297 |
|
|
}
|
298 |
|
|
|
299 |
|
|
static inline void * bus_to_virt(unsigned long address)
|
300 |
|
|
{
|
301 |
|
|
return (void *)(address + PAGE_OFFSET);
|
302 |
|
|
}
|
303 |
|
|
|
304 |
|
|
/* This is too simpleminded for more sophisticated than dumb hardware ... */
|
305 |
|
|
#define page_to_bus page_to_phys
|
306 |
|
|
|
307 |
|
|
/*
|
308 |
|
|
* On MIPS I/O ports are memory mapped, so we access them using normal
|
309 |
|
|
* load/store instructions. mips_io_port_base is the virtual address to
|
310 |
|
|
* which all ports are being mapped. For sake of efficiency some code
|
311 |
|
|
* assumes that this is an address that can be loaded with a single lui
|
312 |
|
|
* instruction, so the lower 16 bits must be zero. Should be true on
|
313 |
|
|
* on any sane architecture; generic code does not use this assumption.
|
314 |
|
|
*/
|
315 |
|
|
extern const unsigned long mips_io_port_base;
|
316 |
|
|
|
317 |
|
|
#define set_io_port_base(base) \
|
318 |
|
|
do { * (unsigned long *) &mips_io_port_base = (base); } while (0)
|
319 |
|
|
|
320 |
|
|
#define __SLOW_DOWN_IO \
|
321 |
|
|
__asm__ __volatile__( \
|
322 |
|
|
"sb\t$0,0x80(%0)" \
|
323 |
|
|
: : "r" (mips_io_port_base));
|
324 |
|
|
|
325 |
|
|
#ifdef CONF_SLOWDOWN_IO
|
326 |
|
|
#ifdef REALLY_SLOW_IO
|
327 |
|
|
#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
|
328 |
|
|
#else
|
329 |
|
|
#define SLOW_DOWN_IO __SLOW_DOWN_IO
|
330 |
|
|
#endif
|
331 |
|
|
#else
|
332 |
|
|
#define SLOW_DOWN_IO
|
333 |
|
|
#endif
|
334 |
|
|
|
335 |
|
|
#define outb(val,port) \
|
336 |
|
|
do { \
|
337 |
|
|
*(volatile u8 *)(mips_io_port_base + (port)) = (val); \
|
338 |
|
|
} while(0)
|
339 |
|
|
|
340 |
|
|
#define outw(val,port) \
|
341 |
|
|
do { \
|
342 |
|
|
*(volatile u16 *)(mips_io_port_base + __swizzle_addr_w(port)) = \
|
343 |
|
|
__ioswab16(val); \
|
344 |
|
|
} while(0)
|
345 |
|
|
|
346 |
|
|
#define outl(val,port) \
|
347 |
|
|
do { \
|
348 |
|
|
*(volatile u32 *)(mips_io_port_base + (port)) = __ioswab32(val);\
|
349 |
|
|
} while(0)
|
350 |
|
|
|
351 |
|
|
#define outb_p(val,port) \
|
352 |
|
|
do { \
|
353 |
|
|
*(volatile u8 *)(mips_io_port_base + (port)) = (val); \
|
354 |
|
|
SLOW_DOWN_IO; \
|
355 |
|
|
} while(0)
|
356 |
|
|
|
357 |
|
|
#define outw_p(val,port) \
|
358 |
|
|
do { \
|
359 |
|
|
*(volatile u16 *)(mips_io_port_base + __swizzle_addr_w(port)) = \
|
360 |
|
|
__ioswab16(val); \
|
361 |
|
|
SLOW_DOWN_IO; \
|
362 |
|
|
} while(0)
|
363 |
|
|
|
364 |
|
|
#define outl_p(val,port) \
|
365 |
|
|
do { \
|
366 |
|
|
*(volatile u32 *)(mips_io_port_base + (port)) = __ioswab32(val);\
|
367 |
|
|
SLOW_DOWN_IO; \
|
368 |
|
|
} while(0)
|
369 |
|
|
|
370 |
|
|
static inline unsigned char inb(unsigned long port)
|
371 |
|
|
{
|
372 |
|
|
return *(volatile u8 *)(mips_io_port_base + port);
|
373 |
|
|
}
|
374 |
|
|
|
375 |
|
|
static inline unsigned short inw(unsigned long port)
|
376 |
|
|
{
|
377 |
|
|
port = __swizzle_addr_w(port);
|
378 |
|
|
|
379 |
|
|
return __ioswab16(*(volatile u16 *)(mips_io_port_base + port));
|
380 |
|
|
}
|
381 |
|
|
|
382 |
|
|
static inline unsigned int inl(unsigned long port)
|
383 |
|
|
{
|
384 |
|
|
return __ioswab32(*(volatile u32 *)(mips_io_port_base + port));
|
385 |
|
|
}
|
386 |
|
|
|
387 |
|
|
static inline unsigned char inb_p(unsigned long port)
|
388 |
|
|
{
|
389 |
|
|
u8 __val;
|
390 |
|
|
|
391 |
|
|
__val = *(volatile u8 *)(mips_io_port_base + port);
|
392 |
|
|
SLOW_DOWN_IO;
|
393 |
|
|
|
394 |
|
|
return __val;
|
395 |
|
|
}
|
396 |
|
|
|
397 |
|
|
static inline unsigned short inw_p(unsigned long port)
|
398 |
|
|
{
|
399 |
|
|
u16 __val;
|
400 |
|
|
|
401 |
|
|
port = __swizzle_addr_w(port);
|
402 |
|
|
__val = *(volatile u16 *)(mips_io_port_base + port);
|
403 |
|
|
SLOW_DOWN_IO;
|
404 |
|
|
|
405 |
|
|
return __ioswab16(__val);
|
406 |
|
|
}
|
407 |
|
|
|
408 |
|
|
static inline unsigned int inl_p(unsigned long port)
|
409 |
|
|
{
|
410 |
|
|
u32 __val;
|
411 |
|
|
|
412 |
|
|
__val = *(volatile u32 *)(mips_io_port_base + port);
|
413 |
|
|
SLOW_DOWN_IO;
|
414 |
|
|
return __ioswab32(__val);
|
415 |
|
|
}
|
416 |
|
|
|
417 |
|
|
static inline void __outsb(unsigned long port, void *addr, unsigned int count)
|
418 |
|
|
{
|
419 |
|
|
while (count--) {
|
420 |
|
|
outb(*(u8 *)addr, port);
|
421 |
|
|
addr++;
|
422 |
|
|
}
|
423 |
|
|
}
|
424 |
|
|
|
425 |
|
|
static inline void __insb(unsigned long port, void *addr, unsigned int count)
|
426 |
|
|
{
|
427 |
|
|
while (count--) {
|
428 |
|
|
*(u8 *)addr = inb(port);
|
429 |
|
|
addr++;
|
430 |
|
|
}
|
431 |
|
|
}
|
432 |
|
|
|
433 |
|
|
static inline void __outsw(unsigned long port, void *addr, unsigned int count)
|
434 |
|
|
{
|
435 |
|
|
while (count--) {
|
436 |
|
|
outw(*(u16 *)addr, port);
|
437 |
|
|
addr += 2;
|
438 |
|
|
}
|
439 |
|
|
}
|
440 |
|
|
|
441 |
|
|
static inline void __insw(unsigned long port, void *addr, unsigned int count)
|
442 |
|
|
{
|
443 |
|
|
while (count--) {
|
444 |
|
|
*(u16 *)addr = inw(port);
|
445 |
|
|
addr += 2;
|
446 |
|
|
}
|
447 |
|
|
}
|
448 |
|
|
|
449 |
|
|
static inline void __outsl(unsigned long port, void *addr, unsigned int count)
|
450 |
|
|
{
|
451 |
|
|
while (count--) {
|
452 |
|
|
outl(*(u32 *)addr, port);
|
453 |
|
|
addr += 4;
|
454 |
|
|
}
|
455 |
|
|
}
|
456 |
|
|
|
457 |
|
|
static inline void __insl(unsigned long port, void *addr, unsigned int count)
|
458 |
|
|
{
|
459 |
|
|
while (count--) {
|
460 |
|
|
*(u32 *)addr = inl(port);
|
461 |
|
|
addr += 4;
|
462 |
|
|
}
|
463 |
|
|
}
|
464 |
|
|
|
465 |
|
|
#define outsb(port, addr, count) __outsb(port, addr, count)
|
466 |
|
|
#define insb(port, addr, count) __insb(port, addr, count)
|
467 |
|
|
#define outsw(port, addr, count) __outsw(port, addr, count)
|
468 |
|
|
#define insw(port, addr, count) __insw(port, addr, count)
|
469 |
|
|
#define outsl(port, addr, count) __outsl(port, addr, count)
|
470 |
|
|
#define insl(port, addr, count) __insl(port, addr, count)
|
471 |
|
|
|
472 |
|
|
/*
|
473 |
|
|
* The caches on some architectures aren't dma-coherent and have need to
|
474 |
|
|
* handle this in software. There are three types of operations that
|
475 |
|
|
* can be applied to dma buffers.
|
476 |
|
|
*
|
477 |
|
|
* - dma_cache_wback_inv(start, size) makes caches and coherent by
|
478 |
|
|
* writing the content of the caches back to memory, if necessary.
|
479 |
|
|
* The function also invalidates the affected part of the caches as
|
480 |
|
|
* necessary before DMA transfers from outside to memory.
|
481 |
|
|
* - dma_cache_wback(start, size) makes caches and coherent by
|
482 |
|
|
* writing the content of the caches back to memory, if necessary.
|
483 |
|
|
* The function also invalidates the affected part of the caches as
|
484 |
|
|
* necessary before DMA transfers from outside to memory.
|
485 |
|
|
* - dma_cache_inv(start, size) invalidates the affected parts of the
|
486 |
|
|
* caches. Dirty lines of the caches may be written back or simply
|
487 |
|
|
* be discarded. This operation is necessary before dma operations
|
488 |
|
|
* to the memory.
|
489 |
|
|
*/
|
490 |
|
|
#ifdef CONFIG_NONCOHERENT_IO
|
491 |
|
|
|
492 |
|
|
extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
|
493 |
|
|
extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
|
494 |
|
|
extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
|
495 |
|
|
|
496 |
|
|
#define dma_cache_wback_inv(start,size) _dma_cache_wback_inv(start,size)
|
497 |
|
|
#define dma_cache_wback(start,size) _dma_cache_wback(start,size)
|
498 |
|
|
#define dma_cache_inv(start,size) _dma_cache_inv(start,size)
|
499 |
|
|
|
500 |
|
|
#else /* Sane hardware */
|
501 |
|
|
|
502 |
|
|
#define dma_cache_wback_inv(start,size) \
|
503 |
|
|
do { (void) (start); (void) (size); } while (0)
|
504 |
|
|
#define dma_cache_wback(start,size) \
|
505 |
|
|
do { (void) (start); (void) (size); } while (0)
|
506 |
|
|
#define dma_cache_inv(start,size) \
|
507 |
|
|
do { (void) (start); (void) (size); } while (0)
|
508 |
|
|
|
509 |
|
|
#endif /* CONFIG_NONCOHERENT_IO */
|
510 |
|
|
|
511 |
|
|
#endif /* _ASM_IO_H */
|