OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-i386/] [system.h] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
#ifndef __ASM_SYSTEM_H
2
#define __ASM_SYSTEM_H
3
 
4
#include <linux/config.h>
5
#include <linux/kernel.h>
6
#include <linux/init.h>
7
#include <asm/segment.h>
8
#include <linux/bitops.h> /* for LOCK_PREFIX */
9
 
10
#ifdef __KERNEL__
11
 
12
struct task_struct;     /* one of the stranger aspects of C forward declarations.. */
13
extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
14
 
15
#define prepare_to_switch()     do { } while(0)
16
#define switch_to(prev,next,last) do {                                  \
17
        asm volatile("pushl %%esi\n\t"                                  \
18
                     "pushl %%edi\n\t"                                  \
19
                     "pushl %%ebp\n\t"                                  \
20
                     "movl %%esp,%0\n\t"        /* save ESP */          \
21
                     "movl %3,%%esp\n\t"        /* restore ESP */       \
22
                     "movl $1f,%1\n\t"          /* save EIP */          \
23
                     "pushl %4\n\t"             /* restore EIP */       \
24
                     "jmp __switch_to\n"                                \
25
                     "1:\t"                                             \
26
                     "popl %%ebp\n\t"                                   \
27
                     "popl %%edi\n\t"                                   \
28
                     "popl %%esi\n\t"                                   \
29
                     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),  \
30
                      "=b" (last)                                       \
31
                     :"m" (next->thread.esp),"m" (next->thread.eip),    \
32
                      "a" (prev), "d" (next),                           \
33
                      "b" (prev));                                      \
34
} while (0)
35
 
36
#define _set_base(addr,base) do { unsigned long __pr; \
37
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
38
        "rorl $16,%%edx\n\t" \
39
        "movb %%dl,%2\n\t" \
40
        "movb %%dh,%3" \
41
        :"=&d" (__pr) \
42
        :"m" (*((addr)+2)), \
43
         "m" (*((addr)+4)), \
44
         "m" (*((addr)+7)), \
45
         "0" (base) \
46
        ); } while(0)
47
 
48
#define _set_limit(addr,limit) do { unsigned long __lr; \
49
__asm__ __volatile__ ("movw %%dx,%1\n\t" \
50
        "rorl $16,%%edx\n\t" \
51
        "movb %2,%%dh\n\t" \
52
        "andb $0xf0,%%dh\n\t" \
53
        "orb %%dh,%%dl\n\t" \
54
        "movb %%dl,%2" \
55
        :"=&d" (__lr) \
56
        :"m" (*(addr)), \
57
         "m" (*((addr)+6)), \
58
         "0" (limit) \
59
        ); } while(0)
60
 
61
#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
62
#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
63
 
64
static inline unsigned long _get_base(char * addr)
65
{
66
        unsigned long __base;
67
        __asm__("movb %3,%%dh\n\t"
68
                "movb %2,%%dl\n\t"
69
                "shll $16,%%edx\n\t"
70
                "movw %1,%%dx"
71
                :"=&d" (__base)
72
                :"m" (*((addr)+2)),
73
                 "m" (*((addr)+4)),
74
                 "m" (*((addr)+7)));
75
        return __base;
76
}
77
 
78
#define get_base(ldt) _get_base( ((char *)&(ldt)) )
79
 
80
/*
81
 * Load a segment. Fall back on loading the zero
82
 * segment if something goes wrong..
83
 */
84
#define loadsegment(seg,value)                  \
85
        asm volatile("\n"                       \
86
                "1:\t"                          \
87
                "movl %0,%%" #seg "\n"          \
88
                "2:\n"                          \
89
                ".section .fixup,\"ax\"\n"      \
90
                "3:\t"                          \
91
                "pushl $0\n\t"                  \
92
                "popl %%" #seg "\n\t"           \
93
                "jmp 2b\n"                      \
94
                ".previous\n"                   \
95
                ".section __ex_table,\"a\"\n\t" \
96
                ".align 4\n\t"                  \
97
                ".long 1b,3b\n"                 \
98
                ".previous"                     \
99
                : :"m" (*(unsigned int *)&(value)))
100
 
101
/*
102
 * Clear and set 'TS' bit respectively
103
 */
104
#define clts() __asm__ __volatile__ ("clts")
105
#define read_cr0() ({ \
106
        unsigned int __dummy; \
107
        __asm__( \
108
                "movl %%cr0,%0\n\t" \
109
                :"=r" (__dummy)); \
110
        __dummy; \
111
})
112
#define write_cr0(x) \
113
        __asm__("movl %0,%%cr0": :"r" (x));
114
 
115
#define read_cr4() ({ \
116
        unsigned int __dummy; \
117
        __asm__( \
118
                "movl %%cr4,%0\n\t" \
119
                :"=r" (__dummy)); \
120
        __dummy; \
121
})
122
#define write_cr4(x) \
123
        __asm__("movl %0,%%cr4": :"r" (x));
124
#define stts() write_cr0(8 | read_cr0())
125
 
126
#endif  /* __KERNEL__ */
127
 
128
#define wbinvd() \
129
        __asm__ __volatile__ ("wbinvd": : :"memory");
130
 
131
static inline unsigned long get_limit(unsigned long segment)
132
{
133
        unsigned long __limit;
134
        __asm__("lsll %1,%0"
135
                :"=r" (__limit):"r" (segment));
136
        return __limit+1;
137
}
138
 
139
#define nop() __asm__ __volatile__ ("nop")
140
 
141
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
142
 
143
#define tas(ptr) (xchg((ptr),1))
144
 
145
struct __xchg_dummy { unsigned long a[100]; };
146
#define __xg(x) ((struct __xchg_dummy *)(x))
147
 
148
 
149
/*
150
 * The semantics of XCHGCMP8B are a bit strange, this is why
151
 * there is a loop and the loading of %%eax and %%edx has to
152
 * be inside. This inlines well in most cases, the cached
153
 * cost is around ~38 cycles. (in the future we might want
154
 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
155
 * might have an implicit FPU-save as a cost, so it's not
156
 * clear which path to go.)
157
 *
158
 * chmxchg8b must be used with the lock prefix here to allow
159
 * the instruction to be executed atomically, see page 3-102
160
 * of the instruction set reference 24319102.pdf. We need
161
 * the reader side to see the coherent 64bit value.
162
 */
163
static inline void __set_64bit (unsigned long long * ptr,
164
                unsigned int low, unsigned int high)
165
{
166
        __asm__ __volatile__ (
167
                "\n1:\t"
168
                "movl (%0), %%eax\n\t"
169
                "movl 4(%0), %%edx\n\t"
170
                "lock cmpxchg8b (%0)\n\t"
171
                "jnz 1b"
172
                : /* no outputs */
173
                :       "D"(ptr),
174
                        "b"(low),
175
                        "c"(high)
176
                :       "ax","dx","memory");
177
}
178
 
179
static inline void __set_64bit_constant (unsigned long long *ptr,
180
                                                 unsigned long long value)
181
{
182
        __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
183
}
184
#define ll_low(x)       *(((unsigned int*)&(x))+0)
185
#define ll_high(x)      *(((unsigned int*)&(x))+1)
186
 
187
static inline void __set_64bit_var (unsigned long long *ptr,
188
                         unsigned long long value)
189
{
190
        __set_64bit(ptr,ll_low(value), ll_high(value));
191
}
192
 
193
#define set_64bit(ptr,value) \
194
(__builtin_constant_p(value) ? \
195
 __set_64bit_constant(ptr, value) : \
196
 __set_64bit_var(ptr, value) )
197
 
198
#define _set_64bit(ptr,value) \
199
(__builtin_constant_p(value) ? \
200
 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
201
 __set_64bit(ptr, ll_low(value), ll_high(value)) )
202
 
203
/*
204
 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
205
 * Note 2: xchg has side effect, so that attribute volatile is necessary,
206
 *        but generally the primitive is invalid, *ptr is output argument. --ANK
207
 */
208
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
209
{
210
        switch (size) {
211
                case 1:
212
                        __asm__ __volatile__("xchgb %b0,%1"
213
                                :"=q" (x)
214
                                :"m" (*__xg(ptr)), "0" (x)
215
                                :"memory");
216
                        break;
217
                case 2:
218
                        __asm__ __volatile__("xchgw %w0,%1"
219
                                :"=r" (x)
220
                                :"m" (*__xg(ptr)), "0" (x)
221
                                :"memory");
222
                        break;
223
                case 4:
224
                        __asm__ __volatile__("xchgl %0,%1"
225
                                :"=r" (x)
226
                                :"m" (*__xg(ptr)), "0" (x)
227
                                :"memory");
228
                        break;
229
        }
230
        return x;
231
}
232
 
233
/*
234
 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
235
 * store NEW in MEM.  Return the initial value in MEM.  Success is
236
 * indicated by comparing RETURN with OLD.
237
 */
238
 
239
#ifdef CONFIG_X86_CMPXCHG
240
#define __HAVE_ARCH_CMPXCHG 1
241
#endif
242
 
243
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
244
                                      unsigned long new, int size)
245
{
246
        unsigned long prev;
247
        switch (size) {
248
        case 1:
249
                __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
250
                                     : "=a"(prev)
251
                                     : "q"(new), "m"(*__xg(ptr)), "0"(old)
252
                                     : "memory");
253
                return prev;
254
        case 2:
255
                __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
256
                                     : "=a"(prev)
257
                                     : "q"(new), "m"(*__xg(ptr)), "0"(old)
258
                                     : "memory");
259
                return prev;
260
        case 4:
261
                __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
262
                                     : "=a"(prev)
263
                                     : "q"(new), "m"(*__xg(ptr)), "0"(old)
264
                                     : "memory");
265
                return prev;
266
        }
267
        return old;
268
}
269
 
270
#define cmpxchg(ptr,o,n)\
271
        ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
272
                                        (unsigned long)(n),sizeof(*(ptr))))
273
 
274
/*
275
 * Force strict CPU ordering.
276
 * And yes, this is required on UP too when we're talking
277
 * to devices.
278
 *
279
 * For now, "wmb()" doesn't actually do anything, as all
280
 * Intel CPU's follow what Intel calls a *Processor Order*,
281
 * in which all writes are seen in the program order even
282
 * outside the CPU.
283
 *
284
 * I expect future Intel CPU's to have a weaker ordering,
285
 * but I'd also expect them to finally get their act together
286
 * and add some real memory barriers if so.
287
 *
288
 * Some non intel clones support out of order store. wmb() ceases to be a
289
 * nop for these.
290
 */
291
 
292
#define mb()    __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
293
#define rmb()   mb()
294
 
295
#ifdef CONFIG_X86_OOSTORE
296
#define wmb()   __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
297
#else
298
#define wmb()   __asm__ __volatile__ ("": : :"memory")
299
#endif
300
 
301
#ifdef CONFIG_SMP
302
#define smp_mb()        mb()
303
#define smp_rmb()       rmb()
304
#define smp_wmb()       wmb()
305
#define set_mb(var, value) do { xchg(&var, value); } while (0)
306
#else
307
#define smp_mb()        barrier()
308
#define smp_rmb()       barrier()
309
#define smp_wmb()       barrier()
310
#define set_mb(var, value) do { var = value; barrier(); } while (0)
311
#endif
312
 
313
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
314
 
315
/* interrupt control.. */
316
#define __save_flags(x)         __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */)
317
#define __restore_flags(x)      __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc")
318
#define __cli()                 __asm__ __volatile__("cli": : :"memory")
319
#define __sti()                 __asm__ __volatile__("sti": : :"memory")
320
/* used in the idle loop; sti takes one instruction cycle to complete */
321
#define safe_halt()             __asm__ __volatile__("sti; hlt": : :"memory")
322
 
323
#define __save_and_cli(x)       do { __save_flags(x); __cli(); } while(0);
324
#define __save_and_sti(x)       do { __save_flags(x); __sti(); } while(0);
325
 
326
/* For spinlocks etc */
327
#if 0
328
#define local_irq_save(x)       __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
329
#define local_irq_set(x)        __asm__ __volatile__("pushfl ; popl %0 ; sti":"=g" (x): /* no input */ :"memory")
330
#else
331
#define local_irq_save(x)       __save_and_cli(x)
332
#define local_irq_set(x)        __save_and_sti(x)
333
#endif
334
 
335
#define local_irq_restore(x)    __restore_flags(x)
336
#define local_irq_disable()     __cli()
337
#define local_irq_enable()      __sti()
338
 
339
#ifdef CONFIG_SMP
340
 
341
extern void __global_cli(void);
342
extern void __global_sti(void);
343
extern unsigned long __global_save_flags(void);
344
extern void __global_restore_flags(unsigned long);
345
#define cli() __global_cli()
346
#define sti() __global_sti()
347
#define save_flags(x) ((x)=__global_save_flags())
348
#define restore_flags(x) __global_restore_flags(x)
349
#define save_and_cli(x) do { save_flags(x); cli(); } while(0);
350
#define save_and_sti(x) do { save_flags(x); sti(); } while(0);
351
 
352
#else
353
 
354
#define cli() __cli()
355
#define sti() __sti()
356
#define save_flags(x) __save_flags(x)
357
#define restore_flags(x) __restore_flags(x)
358
#define save_and_cli(x) __save_and_cli(x)
359
#define save_and_sti(x) __save_and_sti(x)
360
 
361
#endif
362
 
363
/*
364
 * disable hlt during certain critical i/o operations
365
 */
366
#define HAVE_DISABLE_HLT
367
void disable_hlt(void);
368
void enable_hlt(void);
369
 
370
extern unsigned long dmi_broken;
371
extern int is_sony_vaio_laptop;
372
 
373
#define BROKEN_ACPI_Sx          0x0001
374
#define BROKEN_INIT_AFTER_S1    0x0002
375
#define BROKEN_PNP_BIOS         0x0004
376
 
377
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.