1 |
1276 |
phoenix |
#ifndef __ASM_SH64_IO_H
|
2 |
|
|
#define __ASM_SH64_IO_H
|
3 |
|
|
|
4 |
|
|
/*
|
5 |
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
6 |
|
|
* License. See the file "COPYING" in the main directory of this archive
|
7 |
|
|
* for more details.
|
8 |
|
|
*
|
9 |
|
|
* include/asm-sh64/io.h
|
10 |
|
|
*
|
11 |
|
|
* Copyright (C) 2000, 2001 Paolo Alberelli
|
12 |
|
|
* Copyright (C) 2003 Paul Mundt
|
13 |
|
|
*
|
14 |
|
|
*/
|
15 |
|
|
|
16 |
|
|
/*
|
17 |
|
|
* Convention:
|
18 |
|
|
* read{b,w,l}/write{b,w,l} are for PCI,
|
19 |
|
|
* while in{b,w,l}/out{b,w,l} are for ISA
|
20 |
|
|
* These may (will) be platform specific function.
|
21 |
|
|
*
|
22 |
|
|
* In addition, we have
|
23 |
|
|
* ctrl_in{b,w,l}/ctrl_out{b,w,l} for SuperH specific I/O.
|
24 |
|
|
* which are processor specific. Address should be the result of
|
25 |
|
|
* onchip_remap();
|
26 |
|
|
*/
|
27 |
|
|
|
28 |
|
|
#include <asm/cache.h>
|
29 |
|
|
#include <asm/system.h>
|
30 |
|
|
|
31 |
|
|
#define virt_to_bus virt_to_phys
|
32 |
|
|
#define bus_to_virt phys_to_virt
|
33 |
|
|
#define page_to_bus page_to_phys
|
34 |
|
|
|
35 |
|
|
/*
|
36 |
|
|
* Nothing overly special here.. instead of doing the same thing
|
37 |
|
|
* over and over again, we just define a set of sh64_in/out functions
|
38 |
|
|
* with an implicit size. The traditional read{b,w,l}/write{b,w,l}
|
39 |
|
|
* mess is wrapped to this, as are the SH-specific ctrl_in/out routines.
|
40 |
|
|
*/
|
41 |
|
|
static inline unsigned char sh64_in8(unsigned long addr)
|
42 |
|
|
{
|
43 |
|
|
return *(volatile unsigned char *)addr;
|
44 |
|
|
}
|
45 |
|
|
|
46 |
|
|
static inline unsigned short sh64_in16(unsigned long addr)
|
47 |
|
|
{
|
48 |
|
|
return *(volatile unsigned short *)addr;
|
49 |
|
|
}
|
50 |
|
|
|
51 |
|
|
static inline unsigned long sh64_in32(unsigned long addr)
|
52 |
|
|
{
|
53 |
|
|
return *(volatile unsigned long *)addr;
|
54 |
|
|
}
|
55 |
|
|
|
56 |
|
|
static inline unsigned long long sh64_in64(unsigned long addr)
|
57 |
|
|
{
|
58 |
|
|
return *(volatile unsigned long long *)addr;
|
59 |
|
|
}
|
60 |
|
|
|
61 |
|
|
static inline void sh64_out8(unsigned char b, unsigned long addr)
|
62 |
|
|
{
|
63 |
|
|
*(volatile unsigned char *)addr = b;
|
64 |
|
|
wmb();
|
65 |
|
|
}
|
66 |
|
|
|
67 |
|
|
static inline void sh64_out16(unsigned short b, unsigned long addr)
|
68 |
|
|
{
|
69 |
|
|
*(volatile unsigned short *)addr = b;
|
70 |
|
|
wmb();
|
71 |
|
|
}
|
72 |
|
|
|
73 |
|
|
static inline void sh64_out32(unsigned long b, unsigned long addr)
|
74 |
|
|
{
|
75 |
|
|
*(volatile unsigned long *)addr = b;
|
76 |
|
|
wmb();
|
77 |
|
|
}
|
78 |
|
|
|
79 |
|
|
static inline void sh64_out64(unsigned long long b, unsigned long addr)
|
80 |
|
|
{
|
81 |
|
|
*(volatile unsigned long long *)addr = b;
|
82 |
|
|
wmb();
|
83 |
|
|
}
|
84 |
|
|
|
85 |
|
|
#define readb(addr) sh64_in8(addr)
|
86 |
|
|
#define readw(addr) sh64_in16(addr)
|
87 |
|
|
#define readl(addr) sh64_in32(addr)
|
88 |
|
|
|
89 |
|
|
#define writeb(b, addr) sh64_out8(b, addr)
|
90 |
|
|
#define writew(b, addr) sh64_out16(b, addr)
|
91 |
|
|
#define writel(b, addr) sh64_out32(b, addr)
|
92 |
|
|
|
93 |
|
|
#define ctrl_inb(addr) sh64_in8(addr)
|
94 |
|
|
#define ctrl_inw(addr) sh64_in16(addr)
|
95 |
|
|
#define ctrl_inl(addr) sh64_in32(addr)
|
96 |
|
|
|
97 |
|
|
#define ctrl_outb(b, addr) sh64_out8(b, addr)
|
98 |
|
|
#define ctrl_outw(b, addr) sh64_out16(b, addr)
|
99 |
|
|
#define ctrl_outl(b, addr) sh64_out32(b, addr)
|
100 |
|
|
|
101 |
|
|
unsigned long inb(unsigned long port);
|
102 |
|
|
unsigned long inw(unsigned long port);
|
103 |
|
|
unsigned long inl(unsigned long port);
|
104 |
|
|
void outb(unsigned long value, unsigned long port);
|
105 |
|
|
void outw(unsigned long value, unsigned long port);
|
106 |
|
|
void outl(unsigned long value, unsigned long port);
|
107 |
|
|
|
108 |
|
|
#ifdef __KERNEL__
|
109 |
|
|
|
110 |
|
|
#define IO_SPACE_LIMIT 0xffffffff
|
111 |
|
|
|
112 |
|
|
/*
|
113 |
|
|
* Change virtual addresses to physical addresses and vv.
|
114 |
|
|
* These are trivial on the 1:1 Linux/SuperH mapping
|
115 |
|
|
*/
|
116 |
|
|
extern __inline__ unsigned long virt_to_phys(volatile void * address)
|
117 |
|
|
{
|
118 |
|
|
return __pa(address);
|
119 |
|
|
}
|
120 |
|
|
|
121 |
|
|
extern __inline__ void * phys_to_virt(unsigned long address)
|
122 |
|
|
{
|
123 |
|
|
return __va(address);
|
124 |
|
|
}
|
125 |
|
|
|
126 |
|
|
extern void * __ioremap(unsigned long phys_addr, unsigned long size,
|
127 |
|
|
unsigned long flags);
|
128 |
|
|
|
129 |
|
|
extern __inline__ void * ioremap(unsigned long phys_addr, unsigned long size)
|
130 |
|
|
{
|
131 |
|
|
return __ioremap(phys_addr, size, 1);
|
132 |
|
|
}
|
133 |
|
|
|
134 |
|
|
extern __inline__ void * ioremap_nocache (unsigned long phys_addr, unsigned long size)
|
135 |
|
|
{
|
136 |
|
|
return __ioremap(phys_addr, size, 0);
|
137 |
|
|
}
|
138 |
|
|
|
139 |
|
|
extern void iounmap(void *addr);
|
140 |
|
|
|
141 |
|
|
unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
|
142 |
|
|
extern void onchip_unmap(unsigned long vaddr);
|
143 |
|
|
|
144 |
|
|
static __inline__ int check_signature(unsigned long io_addr,
|
145 |
|
|
const unsigned char *signature, int length)
|
146 |
|
|
{
|
147 |
|
|
int retval = 0;
|
148 |
|
|
do {
|
149 |
|
|
if (readb(io_addr) != *signature)
|
150 |
|
|
goto out;
|
151 |
|
|
io_addr++;
|
152 |
|
|
signature++;
|
153 |
|
|
length--;
|
154 |
|
|
} while (length);
|
155 |
|
|
retval = 1;
|
156 |
|
|
out:
|
157 |
|
|
return retval;
|
158 |
|
|
}
|
159 |
|
|
|
160 |
|
|
/*
|
161 |
|
|
* The caches on some architectures aren't dma-coherent and have need to
|
162 |
|
|
* handle this in software. There are three types of operations that
|
163 |
|
|
* can be applied to dma buffers.
|
164 |
|
|
*
|
165 |
|
|
* - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
|
166 |
|
|
* writing the content of the caches back to memory, if necessary.
|
167 |
|
|
* The function also invalidates the affected part of the caches as
|
168 |
|
|
* necessary before DMA transfers from outside to memory.
|
169 |
|
|
* - dma_cache_inv(start, size) invalidates the affected parts of the
|
170 |
|
|
* caches. Dirty lines of the caches may be written back or simply
|
171 |
|
|
* be discarded. This operation is necessary before dma operations
|
172 |
|
|
* to the memory.
|
173 |
|
|
* - dma_cache_wback(start, size) writes back any dirty lines but does
|
174 |
|
|
* not invalidate the cache. This can be used before DMA reads from
|
175 |
|
|
* memory,
|
176 |
|
|
*/
|
177 |
|
|
|
178 |
|
|
/*
|
179 |
|
|
* Implemented despite DMA is not yet supported on ST50.
|
180 |
|
|
*
|
181 |
|
|
* Also note that PCI DMA is supposed to be cache coherent,
|
182 |
|
|
* therefore these should not be used by PCI device drivers.
|
183 |
|
|
*
|
184 |
|
|
*/
|
185 |
|
|
|
186 |
|
|
static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
|
187 |
|
|
{
|
188 |
|
|
unsigned long s = start & L1_CACHE_ALIGN_MASK;
|
189 |
|
|
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
|
190 |
|
|
|
191 |
|
|
for (; s <= e; s += L1_CACHE_BYTES)
|
192 |
|
|
asm volatile ("ocbp %0, 0" : : "r" (s));
|
193 |
|
|
}
|
194 |
|
|
|
195 |
|
|
static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
|
196 |
|
|
{
|
197 |
|
|
// Note that caller has to be careful with overzealous
|
198 |
|
|
// invalidation should there be partial cache lines at the extremities
|
199 |
|
|
// of the specified range
|
200 |
|
|
unsigned long s = start & L1_CACHE_ALIGN_MASK;
|
201 |
|
|
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
|
202 |
|
|
|
203 |
|
|
for (; s <= e; s += L1_CACHE_BYTES)
|
204 |
|
|
asm volatile ("ocbi %0, 0" : : "r" (s));
|
205 |
|
|
}
|
206 |
|
|
|
207 |
|
|
static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
|
208 |
|
|
{
|
209 |
|
|
unsigned long s = start & L1_CACHE_ALIGN_MASK;
|
210 |
|
|
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
|
211 |
|
|
|
212 |
|
|
for (; s <= e; s += L1_CACHE_BYTES)
|
213 |
|
|
asm volatile ("ocbwb %0, 0" : : "r" (s));
|
214 |
|
|
}
|
215 |
|
|
|
216 |
|
|
#endif /* __KERNEL__ */
|
217 |
|
|
#endif /* __ASM_SH64_IO_H */
|