1 |
1276 |
phoenix |
#ifndef _M68K_SYSTEM_H
|
2 |
|
|
#define _M68K_SYSTEM_H
|
3 |
|
|
|
4 |
|
|
#include <linux/config.h> /* get configuration macros */
|
5 |
|
|
#include <linux/linkage.h>
|
6 |
|
|
#include <linux/kernel.h>
|
7 |
|
|
#include <linux/init.h>
|
8 |
|
|
#include <asm/segment.h>
|
9 |
|
|
#include <asm/entry.h>
|
10 |
|
|
|
11 |
|
|
#define prepare_to_switch() do { } while(0)
|
12 |
|
|
|
13 |
|
|
#ifdef __KERNEL__
|
14 |
|
|
|
15 |
|
|
/*
|
16 |
|
|
* switch_to(n) should switch tasks to task ptr, first checking that
|
17 |
|
|
* ptr isn't the current task, in which case it does nothing. This
|
18 |
|
|
* also clears the TS-flag if the task we switched to has used the
|
19 |
|
|
* math co-processor latest.
|
20 |
|
|
*/
|
21 |
|
|
/*
|
22 |
|
|
* switch_to() saves the extra registers, that are not saved
|
23 |
|
|
* automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
|
24 |
|
|
* a0-a1. Some of these are used by schedule() and its predecessors
|
25 |
|
|
* and so we might get see unexpected behaviors when a task returns
|
26 |
|
|
* with unexpected register values.
|
27 |
|
|
*
|
28 |
|
|
* syscall stores these registers itself and none of them are used
|
29 |
|
|
* by syscall after the function in the syscall has been called.
|
30 |
|
|
*
|
31 |
|
|
* Beware that resume now expects *next to be in d1 and the offset of
|
32 |
|
|
* tss to be in a1. This saves a few instructions as we no longer have
|
33 |
|
|
* to push them onto the stack and read them back right after.
|
34 |
|
|
*
|
35 |
|
|
* 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
|
36 |
|
|
*
|
37 |
|
|
* Changed 96/09/19 by Andreas Schwab
|
38 |
|
|
* pass prev in a0, next in a1, offset of tss in d1, and whether
|
39 |
|
|
* the mm structures are shared in d2 (to avoid atc flushing).
|
40 |
|
|
*/
|
41 |
|
|
asmlinkage void resume(void);
|
42 |
|
|
#define switch_to(prev,next,last) { \
|
43 |
|
|
register void *_prev __asm__ ("a0") = (prev); \
|
44 |
|
|
register void *_next __asm__ ("a1") = (next); \
|
45 |
|
|
register void *_last __asm__ ("d1"); \
|
46 |
|
|
__asm__ __volatile__("jbsr " SYMBOL_NAME_STR(resume) \
|
47 |
|
|
: "=a" (_prev), "=a" (_next), "=d" (_last) \
|
48 |
|
|
: "0" (_prev), "1" (_next) \
|
49 |
|
|
: "d0", "d2", "d3", "d4", "d5"); \
|
50 |
|
|
(last) = _last; \
|
51 |
|
|
}
|
52 |
|
|
|
53 |
|
|
/* interrupt control.. */
|
54 |
|
|
#if 0
|
55 |
|
|
#define __sti() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
|
56 |
|
|
#else
|
57 |
|
|
#include <asm/hardirq.h>
|
58 |
|
|
#define __sti() ({ \
|
59 |
|
|
if (MACH_IS_Q40 || !local_irq_count(smp_processor_id())) \
|
60 |
|
|
asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
|
61 |
|
|
})
|
62 |
|
|
#endif
|
63 |
|
|
#define __cli() asm volatile ("oriw #0x0700,%%sr": : : "memory")
|
64 |
|
|
#define __save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
|
65 |
|
|
#define __restore_flags(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
|
66 |
|
|
|
67 |
|
|
/* For spinlocks etc */
|
68 |
|
|
#define local_irq_save(x) ({ __save_flags(x); __cli(); })
|
69 |
|
|
#define local_irq_set(x) ({ __save_flags(x); __sti(); })
|
70 |
|
|
#define local_irq_restore(x) __restore_flags(x)
|
71 |
|
|
#define local_irq_disable() __cli()
|
72 |
|
|
#define local_irq_enable() __sti()
|
73 |
|
|
|
74 |
|
|
#define cli() __cli()
|
75 |
|
|
#define sti() __sti()
|
76 |
|
|
#define save_flags(x) __save_flags(x)
|
77 |
|
|
#define restore_flags(x) __restore_flags(x)
|
78 |
|
|
#define save_and_cli(x) do { save_flags(x); cli(); } while(0)
|
79 |
|
|
#define save_and_set(x) do { save_flags(x); sti(); } while(0)
|
80 |
|
|
|
81 |
|
|
/*
|
82 |
|
|
* Force strict CPU ordering.
|
83 |
|
|
* Not really required on m68k...
|
84 |
|
|
*/
|
85 |
|
|
#define nop() do { asm volatile ("nop"); barrier(); } while (0)
|
86 |
|
|
#define mb() barrier()
|
87 |
|
|
#define rmb() barrier()
|
88 |
|
|
#define wmb() barrier()
|
89 |
|
|
#define set_mb(var, value) do { var = value; mb(); } while (0)
|
90 |
|
|
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
|
91 |
|
|
|
92 |
|
|
#define smp_mb() barrier()
|
93 |
|
|
#define smp_rmb() barrier()
|
94 |
|
|
#define smp_wmb() barrier()
|
95 |
|
|
|
96 |
|
|
|
97 |
|
|
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
|
98 |
|
|
#define tas(ptr) (xchg((ptr),1))
|
99 |
|
|
|
100 |
|
|
struct __xchg_dummy { unsigned long a[100]; };
|
101 |
|
|
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
|
102 |
|
|
|
103 |
|
|
#ifndef CONFIG_RMW_INSNS
|
104 |
|
|
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
|
105 |
|
|
{
|
106 |
|
|
unsigned long tmp, flags;
|
107 |
|
|
|
108 |
|
|
save_flags(flags);
|
109 |
|
|
cli();
|
110 |
|
|
|
111 |
|
|
switch (size) {
|
112 |
|
|
case 1:
|
113 |
|
|
__asm__ __volatile__
|
114 |
|
|
("moveb %2,%0\n\t"
|
115 |
|
|
"moveb %1,%2"
|
116 |
|
|
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
|
117 |
|
|
break;
|
118 |
|
|
case 2:
|
119 |
|
|
__asm__ __volatile__
|
120 |
|
|
("movew %2,%0\n\t"
|
121 |
|
|
"movew %1,%2"
|
122 |
|
|
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
|
123 |
|
|
break;
|
124 |
|
|
case 4:
|
125 |
|
|
__asm__ __volatile__
|
126 |
|
|
("movel %2,%0\n\t"
|
127 |
|
|
"movel %1,%2"
|
128 |
|
|
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
|
129 |
|
|
break;
|
130 |
|
|
}
|
131 |
|
|
restore_flags(flags);
|
132 |
|
|
return tmp;
|
133 |
|
|
}
|
134 |
|
|
#else
|
135 |
|
|
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
|
136 |
|
|
{
|
137 |
|
|
switch (size) {
|
138 |
|
|
case 1:
|
139 |
|
|
__asm__ __volatile__
|
140 |
|
|
("moveb %2,%0\n\t"
|
141 |
|
|
"1:\n\t"
|
142 |
|
|
"casb %0,%1,%2\n\t"
|
143 |
|
|
"jne 1b"
|
144 |
|
|
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
|
145 |
|
|
break;
|
146 |
|
|
case 2:
|
147 |
|
|
__asm__ __volatile__
|
148 |
|
|
("movew %2,%0\n\t"
|
149 |
|
|
"1:\n\t"
|
150 |
|
|
"casw %0,%1,%2\n\t"
|
151 |
|
|
"jne 1b"
|
152 |
|
|
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
|
153 |
|
|
break;
|
154 |
|
|
case 4:
|
155 |
|
|
__asm__ __volatile__
|
156 |
|
|
("movel %2,%0\n\t"
|
157 |
|
|
"1:\n\t"
|
158 |
|
|
"casl %0,%1,%2\n\t"
|
159 |
|
|
"jne 1b"
|
160 |
|
|
: "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
|
161 |
|
|
break;
|
162 |
|
|
}
|
163 |
|
|
return x;
|
164 |
|
|
}
|
165 |
|
|
#endif
|
166 |
|
|
|
167 |
|
|
#endif /* __KERNEL__ */
|
168 |
|
|
|
169 |
|
|
#endif /* _M68K_SYSTEM_H */
|