OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [include/] [asm-m68knommu/] [io.h] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1633 jcastillo
#ifndef _M68K_IO_H
2
#define _M68K_IO_H
3
 
4
/*
5
 * readX/writeX() are used to access memory mapped devices. On some
6
 * architectures the memory mapped IO stuff needs to be accessed
7
 * differently. On the m68k architecture, we just read/write the
8
 * memory location directly.
9
 */
10
/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
11
 * two accesses to memory, which may be undesireable for some devices.
12
 */
13
#define readb(addr) \
14
    ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
15
#define readw(addr) \
16
    ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
17
#define readl(addr) \
18
    ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
19
 
20
#define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
21
#define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
22
#define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
23
 
24
#if 0
25
 
26
/* There is no difference between I/O and memory on 68k, these are the same */
27
#define inb(addr) \
28
    ({ unsigned char __v = (*(volatile unsigned char *) (addr)); printk("inb(%x)=%02x\n", (addr), __v); __v; })
29
#define inw(addr) \
30
    ({ unsigned short __v = (*(volatile unsigned short *) (addr)); printk("inw(%x)=%04x\n", (addr), __v); __v; })
31
#define inl(addr) \
32
    ({ unsigned int __v = (*(volatile unsigned int *) (addr)); printk("inl(%x)=%08x\n", (addr), __v); __v; })
33
 
34
#define outb(b,addr) { ((*(volatile unsigned char *) (addr)) = (b)) ; printk("outb(%x)=%02x\n", (addr), (b)); }
35
#define outw(b,addr) { ((*(volatile unsigned short *) (addr)) = (b)) ; printk("outw(%x)=%04x\n", (addr), (b)); }
36
#define outl(b,addr) { ((*(volatile unsigned int *) (addr)) = (b)) ; printk("outl(%x)=%08x\n", (addr), (b)); }
37
 
38
#else
39
 
40
/* There is no difference between I/O and memory on 68k, these are the same */
41
#define inb(addr) \
42
    ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
43
#define inw(addr) \
44
    ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
45
#define inl(addr) \
46
    ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
47
 
48
#define outb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
49
#define outw(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
50
#define outl(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
51
 
52
#endif
53
 
54
#ifdef CONFIG_COLDFIRE
55
 
56
#define inb_p   inb
57
#define inw_p   inw
58
#define outb_p  outb
59
#define outw_p  outw
60
 
61
 
62
static inline void outsb(void *addr, void *buf, int len)
63
{
64
        volatile unsigned char *ap = (volatile unsigned char *) addr;
65
        unsigned char *bp = (unsigned char *) buf;
66
        while (len--)
67
                *ap = *bp++;
68
}
69
 
70
static inline void outsw(void *addr, void *buf, int len)
71
{
72
        volatile unsigned short *ap = (volatile unsigned short *) addr;
73
        unsigned short *bp = (unsigned short *) buf;
74
        while (len--)
75
                *ap = *bp++;
76
}
77
 
78
static inline void outsl(void *addr, void *buf, int len)
79
{
80
        volatile unsigned int *ap = (volatile unsigned int *) addr;
81
        unsigned int *bp = (unsigned int *) buf;
82
        while (len--)
83
                *ap = *bp++;
84
}
85
 
86
static inline void insb(void *addr, void *buf, int len)
87
{
88
        volatile unsigned char *ap = (volatile unsigned char *) addr;
89
        unsigned char *bp = (unsigned char *) buf;
90
        while (len--)
91
                *bp++ = *ap;
92
}
93
 
94
static inline void insw(void *addr, void *buf, int len)
95
{
96
        volatile unsigned short *ap = (volatile unsigned short *) addr;
97
        unsigned short *bp = (unsigned short *) buf;
98
        while (len--)
99
                *bp++ = *ap;
100
}
101
 
102
static inline void insl(void *addr, void *buf, int len)
103
{
104
        volatile unsigned int *ap = (volatile unsigned int *) addr;
105
        unsigned int *bp = (unsigned int *) buf;
106
        while (len--)
107
                *bp++ = *ap;
108
}
109
 
110
#else
111
 
112
/* These try and unroll 64 transfers, then 8, then 1 at a time */
113
static inline void outsw(void *addr,void *buf,int len)
114
{
115
   unsigned short * __e = (unsigned short *)(buf) + (len);
116
   unsigned short * __p = (unsigned short *)(buf);
117
   while (__p + 32 < __e) {
118
      asm volatile ("
119
       movem.w %0@+, %%d4-%%d7;
120
       rol.w #8, %%d4;
121
       rol.w #8, %%d5;
122
       rol.w #8, %%d6;
123
       rol.w #8, %%d7;
124
       movem.w %%d4-%%d5, %2@;
125
       movem.w %%d6-%%d7, %2@;
126
 
127
       movem.w %0@+, %%d4-%%d7;
128
       rol.w #8, %%d4;
129
       rol.w #8, %%d5;
130
       rol.w #8, %%d6;
131
       rol.w #8, %%d7;
132
       movem.w %%d4-%%d5, %2@;
133
       movem.w %%d6-%%d7, %2@;
134
 
135
       movem.w %0@+, %%d4-%%d7;
136
       rol.w #8, %%d4;
137
       rol.w #8, %%d5;
138
       rol.w #8, %%d6;
139
       rol.w #8, %%d7;
140
       movem.w %%d4-%%d5, %2@;
141
       movem.w %%d6-%%d7, %2@;
142
 
143
       movem.w %0@+, %%d4-%%d7;
144
       rol.w #8, %%d4;
145
       rol.w #8, %%d5;
146
       rol.w #8, %%d6;
147
       rol.w #8, %%d7;
148
       movem.w %%d4-%%d5, %2@;
149
       movem.w %%d6-%%d7, %2@;
150
 
151
       movem.w %0@+, %%d4-%%d7;
152
       rol.w #8, %%d4;
153
       rol.w #8, %%d5;
154
       rol.w #8, %%d6;
155
       rol.w #8, %%d7;
156
       movem.w %%d4-%%d5, %2@;
157
       movem.w %%d6-%%d7, %2@;
158
 
159
       movem.w %0@+, %%d4-%%d7;
160
       rol.w #8, %%d4;
161
       rol.w #8, %%d5;
162
       rol.w #8, %%d6;
163
       rol.w #8, %%d7;
164
       movem.w %%d4-%%d5, %2@;
165
       movem.w %%d6-%%d7, %2@;
166
 
167
       movem.w %0@+, %%d4-%%d7;
168
       rol.w #8, %%d4;
169
       rol.w #8, %%d5;
170
       rol.w #8, %%d6;
171
       rol.w #8, %%d7;
172
       movem.w %%d4-%%d5, %2@;
173
       movem.w %%d6-%%d7, %2@;
174
 
175
       movem.w %0@+, %%d4-%%d7;
176
       rol.w #8, %%d4;
177
       rol.w #8, %%d5;
178
       rol.w #8, %%d6;
179
       rol.w #8, %%d7;
180
       movem.w %%d4-%%d5, %2@;
181
       movem.w %%d6-%%d7, %2@;
182
 
183
       "
184
       : "=a" (__p)
185
       : "0" (__p) , "a" (addr)
186
       : "d4", "d5", "d6", "d7");
187
    }
188
   while (__p + 8 < __e) {
189
      asm volatile ("
190
       movem.w %0@+, %%d4-%%d7;
191
       rol.w #8, %%d4;
192
       rol.w #8, %%d5;
193
       rol.w #8, %%d6;
194
       rol.w #8, %%d7;
195
       movem.w %%d4-%%d5, %2@;
196
       movem.w %%d6-%%d7, %2@;
197
 
198
       movem.w %0@+, %%d4-%%d7;
199
       rol.w #8, %%d4;
200
       rol.w #8, %%d5;
201
       rol.w #8, %%d6;
202
       rol.w #8, %%d7;
203
       movem.w %%d4-%%d5, %2@;
204
       movem.w %%d6-%%d7, %2@;
205
       "
206
       : "=a" (__p)
207
       : "0" (__p) , "a" (addr)
208
       : "d4", "d5", "d6", "d7");
209
    }
210
    while (__p < __e) {
211
       *(volatile unsigned short *)(addr) =
212
         (((*__p) & 0xff) << 8) | ((*__p) >> 8);
213
       __p++;
214
    }
215
}
216
 
217
static inline void insw(void *addr,void *buf,int len)
218
{
219
   unsigned short * __e = (unsigned short *)(buf) + (len);
220
   unsigned short * __p = (unsigned short *)(buf);
221
   unsigned short __v;
222
   while (__p + 32 < __e) {
223
      asm volatile ("
224
       movem.w %2@, %%d4-%%d5;
225
       movem.w %2@, %%d6-%%d7;
226
       rol.w #8, %%d4;
227
       rol.w #8, %%d5;
228
       rol.w #8, %%d6;
229
       rol.w #8, %%d7;
230
       movem.w %%d4-%%d7, %0@;
231
       addq #8, %0;
232
 
233
       movem.w %2@, %%d4-%%d5;
234
       movem.w %2@, %%d6-%%d7;
235
       rol.w #8, %%d4;
236
       rol.w #8, %%d5;
237
       rol.w #8, %%d6;
238
       rol.w #8, %%d7;
239
       movem.w %%d4-%%d7, %0@;
240
       addq #8, %0;
241
 
242
       movem.w %2@, %%d4-%%d5;
243
       movem.w %2@, %%d6-%%d7;
244
       rol.w #8, %%d4;
245
       rol.w #8, %%d5;
246
       rol.w #8, %%d6;
247
       rol.w #8, %%d7;
248
       movem.w %%d4-%%d7, %0@;
249
       addq #8, %0;
250
 
251
       movem.w %2@, %%d4-%%d5;
252
       movem.w %2@, %%d6-%%d7;
253
       rol.w #8, %%d4;
254
       rol.w #8, %%d5;
255
       rol.w #8, %%d6;
256
       rol.w #8, %%d7;
257
       movem.w %%d4-%%d7, %0@;
258
       addq #8, %0;
259
 
260
       movem.w %2@, %%d4-%%d5;
261
       movem.w %2@, %%d6-%%d7;
262
       rol.w #8, %%d4;
263
       rol.w #8, %%d5;
264
       rol.w #8, %%d6;
265
       rol.w #8, %%d7;
266
       movem.w %%d4-%%d7, %0@;
267
       addq #8, %0;
268
 
269
       movem.w %2@, %%d4-%%d5;
270
       movem.w %2@, %%d6-%%d7;
271
       rol.w #8, %%d4;
272
       rol.w #8, %%d5;
273
       rol.w #8, %%d6;
274
       rol.w #8, %%d7;
275
       movem.w %%d4-%%d7, %0@;
276
       addq #8, %0;
277
 
278
       movem.w %2@, %%d4-%%d5;
279
       movem.w %2@, %%d6-%%d7;
280
       rol.w #8, %%d4;
281
       rol.w #8, %%d5;
282
       rol.w #8, %%d6;
283
       rol.w #8, %%d7;
284
       movem.w %%d4-%%d7, %0@;
285
       addq #8, %0;
286
 
287
       movem.w %2@, %%d4-%%d5;
288
       movem.w %2@, %%d6-%%d7;
289
       rol.w #8, %%d4;
290
       rol.w #8, %%d5;
291
       rol.w #8, %%d6;
292
       rol.w #8, %%d7;
293
       movem.w %%d4-%%d7, %0@;
294
       addq #8, %0;
295
 
296
       "
297
       : "=a" (__p)
298
       : "0" (__p) , "a" (addr)
299
       : "d4", "d5", "d6", "d7");
300
    }
301
    while (__p + 8 < __e) {
302
      asm volatile ("
303
       movem.w %2@, %%d4-%%d5;
304
       movem.w %2@, %%d6-%%d7;
305
       rol.w #8, %%d4;
306
       rol.w #8, %%d5;
307
       rol.w #8, %%d6;
308
       rol.w #8, %%d7;
309
       movem.w %%d4-%%d7, %0@;
310
       addq #8, %0;
311
 
312
       movem.w %2@, %%d4-%%d5;
313
       movem.w %2@, %%d6-%%d7;
314
       rol.w #8, %%d4;
315
       rol.w #8, %%d5;
316
       rol.w #8, %%d6;
317
       rol.w #8, %%d7;
318
       movem.w %%d4-%%d7, %0@;
319
       addq #8, %0;
320
       "
321
       : "=a" (__p)
322
       : "0" (__p) , "a" (addr)
323
       : "d4", "d5", "d6", "d7");
324
    }
325
    while (__p < __e) {
326
       __v = *(volatile unsigned short *)(addr);
327
       *(__p++) = ((__v & 0xff) << 8) | (__v >> 8);
328
    }
329
}
330
 
331
#define inb_p(addr) get_user_byte_io((char *)(addr))
332
#define outb_p(x,addr) put_user_byte_io((x),(char *)(addr))
333
 
334
#endif /* CONFIG_COLDFIRE */
335
 
336
static inline unsigned char get_user_byte_io(const char * addr)
337
{
338
        register unsigned char _v;
339
 
340
        __asm__ __volatile__ ("moveb %1,%0":"=dm" (_v):"m" (*addr));
341
        return _v;
342
}
343
 
344
static inline void put_user_byte_io(char val,char *addr)
345
{
346
        __asm__ __volatile__ ("moveb %0,%1"
347
                              : /* no outputs */
348
                              :"idm" (val),"m" (*addr)
349
                              : "memory");
350
}
351
 
352
/*
353
 * Change virtual addresses to physical addresses and vv.
354
 * These are trivial on the 1:1 Linux/i386 mapping (but if we ever
355
 * make the kernel segment mapped at 0, we need to do translation
356
 * on the i386 as well)
357
 */
358
extern unsigned long mm_vtop(unsigned long addr);
359
extern unsigned long mm_ptov(unsigned long addr);
360
 
361
extern inline unsigned long virt_to_phys(volatile void * address)
362
{
363
        return (unsigned long) mm_vtop((unsigned long)address);
364
}
365
 
366
extern inline void * phys_to_virt(unsigned long address)
367
{
368
        return (void *) mm_ptov(address);
369
}
370
 
371
/*
372
 * IO bus memory addresses are also 1:1 with the physical address
373
 */
374
#define virt_to_bus virt_to_phys
375
#define bus_to_virt phys_to_virt
376
 
377
 
378
#endif /* _M68K_IO_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.