OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-mips/] [system.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1276 phoenix
/*
2
 * This file is subject to the terms and conditions of the GNU General Public
3
 * License.  See the file "COPYING" in the main directory of this archive
4
 * for more details.
5
 *
6
 * Copyright (C) 1994 - 1999 by Ralf Baechle
7
 * Copyright (C) 1996 by Paul M. Antoine
8
 * Copyright (C) 1994 - 1999 by Ralf Baechle
9
 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10
 * Copyright (C) 2000 MIPS Technologies, Inc.
11
 */
12
#ifndef _ASM_SYSTEM_H
13
#define _ASM_SYSTEM_H
14
 
15
#include <linux/config.h>
16
#include <asm/sgidefs.h>
17
 
18
#include <linux/kernel.h>
19
 
20
#include <asm/addrspace.h>
21
#include <asm/ptrace.h>
22
 
23
__asm__ (
24
        ".macro\t__sti\n\t"
25
        ".set\tpush\n\t"
26
        ".set\treorder\n\t"
27
        ".set\tnoat\n\t"
28
        "mfc0\t$1,$12\n\t"
29
        "ori\t$1,0x1f\n\t"
30
        "xori\t$1,0x1e\n\t"
31
        "mtc0\t$1,$12\n\t"
32
        ".set\tpop\n\t"
33
        ".endm");
34
 
35
static __inline__ void
36
__sti(void)
37
{
38
        __asm__ __volatile__(
39
                "__sti"
40
                : /* no outputs */
41
                : /* no inputs */
42
                : "memory");
43
}
44
 
45
/*
46
 * For cli() we have to insert nops to make sure that the new value
47
 * has actually arrived in the status register before the end of this
48
 * macro.
49
 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
50
 * no nops at all.
51
 */
52
__asm__ (
53
        ".macro\t__cli\n\t"
54
        ".set\tpush\n\t"
55
        ".set\tnoat\n\t"
56
        "mfc0\t$1,$12\n\t"
57
        "ori\t$1,1\n\t"
58
        "xori\t$1,1\n\t"
59
        ".set\tnoreorder\n\t"
60
        "mtc0\t$1,$12\n\t"
61
        "sll\t$0, $0, 1\t\t\t# nop\n\t"
62
        "sll\t$0, $0, 1\t\t\t# nop\n\t"
63
        "sll\t$0, $0, 1\t\t\t# nop\n\t"
64
        ".set\tpop\n\t"
65
        ".endm");
66
 
67
static __inline__ void
68
__cli(void)
69
{
70
        __asm__ __volatile__(
71
                "__cli"
72
                : /* no outputs */
73
                : /* no inputs */
74
                : "memory");
75
}
76
 
77
__asm__ (
78
        ".macro\t__save_flags flags\n\t"
79
        ".set\tpush\n\t"
80
        ".set\treorder\n\t"
81
        "mfc0\t\\flags, $12\n\t"
82
        ".set\tpop\n\t"
83
        ".endm");
84
 
85
#define __save_flags(x)                                                 \
86
__asm__ __volatile__(                                                   \
87
        "__save_flags %0"                                               \
88
        : "=r" (x))
89
 
90
__asm__ (
91
        ".macro\t__save_and_cli result\n\t"
92
        ".set\tpush\n\t"
93
        ".set\treorder\n\t"
94
        ".set\tnoat\n\t"
95
        "mfc0\t\\result, $12\n\t"
96
        "ori\t$1, \\result, 1\n\t"
97
        "xori\t$1, 1\n\t"
98
        ".set\tnoreorder\n\t"
99
        "mtc0\t$1, $12\n\t"
100
        "sll\t$0, $0, 1\t\t\t# nop\n\t"
101
        "sll\t$0, $0, 1\t\t\t# nop\n\t"
102
        "sll\t$0, $0, 1\t\t\t# nop\n\t"
103
        ".set\tpop\n\t"
104
        ".endm");
105
 
106
#define __save_and_cli(x)                                               \
107
__asm__ __volatile__(                                                   \
108
        "__save_and_cli\t%0"                                            \
109
        : "=r" (x)                                                      \
110
        : /* no inputs */                                               \
111
        : "memory")
112
 
113
__asm__ (
114
        ".macro\t__save_and_sti result\n\t"
115
        ".set\tpush\n\t"
116
        ".set\treorder\n\t"
117
        ".set\tnoat\n\t"
118
        "mfc0\t\\result, $12\n\t"
119
        "ori\t$1, \\result, 1\n\t"
120
        ".set\tnoreorder\n\t"
121
        "mtc0\t$1, $12\n\t"
122
        ".set\tpop\n\t"
123
        ".endm");
124
 
125
#define __save_and_sti(x)                                               \
126
__asm__ __volatile__(                                                   \
127
        "__save_and_sti\t%0"                                            \
128
        : "=r" (x)                                                      \
129
        : /* no inputs */                                               \
130
        : "memory")
131
 
132
__asm__(".macro\t__restore_flags flags\n\t"
133
        ".set\tnoreorder\n\t"
134
        ".set\tnoat\n\t"
135
        "mfc0\t$1, $12\n\t"
136
        "andi\t\\flags, 1\n\t"
137
        "ori\t$1, 1\n\t"
138
        "xori\t$1, 1\n\t"
139
        "or\t\\flags, $1\n\t"
140
        "mtc0\t\\flags, $12\n\t"
141
        "sll\t$0, $0, 1\t\t\t# nop\n\t"
142
        "sll\t$0, $0, 1\t\t\t# nop\n\t"
143
        "sll\t$0, $0, 1\t\t\t# nop\n\t"
144
        ".set\tat\n\t"
145
        ".set\treorder\n\t"
146
        ".endm");
147
 
148
#define __restore_flags(flags)                                          \
149
do {                                                                    \
150
        unsigned long __tmp1;                                           \
151
                                                                        \
152
        __asm__ __volatile__(                                           \
153
                "__restore_flags\t%0"                                   \
154
                : "=r" (__tmp1)                                         \
155
                : "0" (flags)                                            \
156
                : "memory");                                            \
157
} while(0)
158
 
159
#ifdef CONFIG_SMP
160
 
161
extern void __global_sti(void);
162
extern void __global_cli(void);
163
extern unsigned long __global_save_flags(void);
164
extern void __global_restore_flags(unsigned long);
165
#  define sti() __global_sti()
166
#  define cli() __global_cli()
167
#  define save_flags(x) do { x = __global_save_flags(); } while (0)
168
#  define restore_flags(x) __global_restore_flags(x)
169
#  define save_and_cli(x) do { save_flags(x); cli(); } while(0)
170
#  define save_and_sti(x) do { save_flags(x); sti(); } while(0)
171
 
172
#else /* Single processor */
173
 
174
#  define sti() __sti()
175
#  define cli() __cli()
176
#  define save_flags(x) __save_flags(x)
177
#  define save_and_cli(x) __save_and_cli(x)
178
#  define restore_flags(x) __restore_flags(x)
179
#  define save_and_sti(x) __save_and_sti(x)
180
 
181
#endif /* SMP */
182
 
183
/* For spinlocks etc */
184
#define local_irq_save(x)       __save_and_cli(x)
185
#define local_irq_set(x)        __save_and_sti(x)
186
#define local_irq_restore(x)    __restore_flags(x)
187
#define local_irq_disable()     __cli()
188
#define local_irq_enable()      __sti()
189
 
190
#ifdef CONFIG_CPU_HAS_SYNC
191
#define __sync()                                \
192
        __asm__ __volatile__(                   \
193
                ".set   push\n\t"               \
194
                ".set   noreorder\n\t"          \
195
                ".set   mips2\n\t"              \
196
                "sync\n\t"                      \
197
                ".set   pop"                    \
198
                : /* no output */               \
199
                : /* no input */                \
200
                : "memory")
201
#else
202
#define __sync()        do { } while(0)
203
#endif
204
 
205
#define __fast_iob()                            \
206
        __asm__ __volatile__(                   \
207
                ".set   push\n\t"               \
208
                ".set   noreorder\n\t"          \
209
                "lw     $0,%0\n\t"              \
210
                "nop\n\t"                       \
211
                ".set   pop"                    \
212
                : /* no output */               \
213
                : "m" (*(int *)KSEG1)           \
214
                : "memory")
215
 
216
#define fast_wmb()      __sync()
217
#define fast_rmb()      __sync()
218
#define fast_mb()       __sync()
219
#define fast_iob()                              \
220
        do {                                    \
221
                __sync();                       \
222
                __fast_iob();                   \
223
        } while (0)
224
 
225
#ifdef CONFIG_CPU_HAS_WB
226
 
227
#include <asm/wbflush.h>
228
 
229
#define wmb()           fast_wmb()
230
#define rmb()           fast_rmb()
231
#define mb()            wbflush()
232
#define iob()           wbflush()
233
 
234
#else /* !CONFIG_CPU_HAS_WB */
235
 
236
#define wmb()           fast_wmb()
237
#define rmb()           fast_rmb()
238
#define mb()            fast_mb()
239
#define iob()           fast_iob()
240
 
241
#endif /* !CONFIG_CPU_HAS_WB */
242
 
243
#ifdef CONFIG_SMP
244
#define smp_mb()        mb()
245
#define smp_rmb()       rmb()
246
#define smp_wmb()       wmb()
247
#else
248
#define smp_mb()        barrier()
249
#define smp_rmb()       barrier()
250
#define smp_wmb()       barrier()
251
#endif
252
 
253
#define set_mb(var, value) \
254
do { var = value; mb(); } while (0)
255
 
256
#define set_wmb(var, value) \
257
do { var = value; wmb(); } while (0)
258
 
259
/*
260
 * switch_to(n) should switch tasks to task nr n, first
261
 * checking that n isn't the current task, in which case it does nothing.
262
 */
263
extern asmlinkage void *resume(void *last, void *next);
264
 
265
#define prepare_to_switch()     do { } while(0)
266
 
267
struct task_struct;
268
 
269
#define switch_to(prev,next,last) \
270
do { \
271
        (last) = resume(prev, next); \
272
} while(0)
273
 
274
/*
275
 * For 32 and 64 bit operands we can take advantage of ll and sc.
276
 * FIXME: This doesn't work for R3000 machines.
277
 */
278
static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
279
{
280
#ifdef CONFIG_CPU_HAS_LLSC
281
        unsigned long dummy;
282
 
283
        __asm__ __volatile__(
284
                ".set\tpush\t\t\t\t# xchg_u32\n\t"
285
                ".set\tnoreorder\n\t"
286
                ".set\tnomacro\n\t"
287
                "ll\t%0, %3\n"
288
                "1:\tmove\t%2, %z4\n\t"
289
                "sc\t%2, %1\n\t"
290
                "beqzl\t%2, 1b\n\t"
291
                " ll\t%0, %3\n\t"
292
                "sync\n\t"
293
                ".set\tpop"
294
                : "=&r" (val), "=m" (*m), "=&r" (dummy)
295
                : "R" (*m), "Jr" (val)
296
                : "memory");
297
 
298
        return val;
299
#else
300
        unsigned long flags, retval;
301
 
302
        local_irq_save(flags);
303
        retval = *m;
304
        *m = val;
305
        local_irq_restore(flags);       /* implies memory barrier  */
306
        return retval;
307
#endif /* Processor-dependent optimization */
308
}
309
 
310
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
311
#define tas(ptr) (xchg((ptr),1))
312
 
313
static __inline__ unsigned long
314
__xchg(unsigned long x, volatile void * ptr, int size)
315
{
316
        switch (size) {
317
                case 4:
318
                        return xchg_u32(ptr, x);
319
        }
320
        return x;
321
}
322
 
323
extern void *set_except_vector(int n, void *addr);
324
extern void per_cpu_trap_init(void);
325
 
326
extern void __die(const char *, struct pt_regs *, const char *file,
327
        const char *func, unsigned long line) __attribute__((noreturn));
328
extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
329
        const char *func, unsigned long line);
330
 
331
#define die(msg, regs)                                                  \
332
        __die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
333
#define die_if_kernel(msg, regs)                                        \
334
        __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
335
 
336
#endif /* _ASM_SYSTEM_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.