OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [uclinux/] [uClinux-2.0.x/] [include/] [asm-i386/] [system.h] - Blame information for rev 199

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
#ifndef __ASM_SYSTEM_H
2
#define __ASM_SYSTEM_H
3
 
4
#include <asm/segment.h>
5
 
6
/*
7
 * Entry into gdt where to find first TSS. GDT layout:
8
 *   0 - null
9
 *   1 - not used
10
 *   2 - kernel code segment
11
 *   3 - kernel data segment
12
 *   4 - user code segment
13
 *   5 - user data segment
14
 * ...
15
 *   8 - TSS #0
16
 *   9 - LDT #0
17
 *  10 - TSS #1
18
 *  11 - LDT #1
19
 */
20
#define FIRST_TSS_ENTRY 8
21
#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
22
#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
23
#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
24
#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
25
#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
26
#define store_TR(n) \
27
__asm__("str %%ax\n\t" \
28
        "subl %2,%%eax\n\t" \
29
        "shrl $4,%%eax" \
30
        :"=a" (n) \
31
        :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
32
 
33
/* This special macro can be used to load a debugging register */
34
 
35
#define loaddebug(tsk,register) \
36
                __asm__("movl %0,%%edx\n\t" \
37
                        "movl %%edx,%%db" #register "\n\t" \
38
                        : /* no output */ \
39
                        :"m" (tsk->debugreg[register]) \
40
                        :"dx");
41
 
42
 
43
/*
44
 *      switch_to(n) should switch tasks to task nr n, first
45
 * checking that n isn't the current task, in which case it does nothing.
46
 * This also clears the TS-flag if the task we switched to has used
47
 * the math co-processor latest.
48
 *
49
 * It also reloads the debug regs if necessary..
50
 */
51
 
52
 
53
#ifdef __SMP__
54
        /*
55
         *      Keep the lock depth straight. If we switch on an interrupt from
56
         *      kernel->user task we need to lose a depth, and if we switch the
57
         *      other way we need to gain a depth. Same layer switches come out
58
         *      the same.
59
         *
60
         *      We spot a switch in user mode because the kernel counter is the
61
         *      same as the interrupt counter depth. (We never switch during the
62
         *      message/invalidate IPI).
63
         *
64
         *      We fsave/fwait so that an exception goes off at the right time
65
         *      (as a call from the fsave or fwait in effect) rather than to
66
         *      the wrong process.
67
         */
68
 
69
#define switch_to(prev,next) do { \
70
        cli();\
71
        if(prev->flags&PF_USEDFPU) \
72
        { \
73
                __asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
74
                __asm__ __volatile__("fwait"); \
75
                prev->flags&=~PF_USEDFPU;        \
76
        } \
77
        prev->lock_depth=syscall_count; \
78
        kernel_counter+=next->lock_depth-prev->lock_depth; \
79
        syscall_count=next->lock_depth; \
80
__asm__("pushl %%edx\n\t" \
81
        "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \
82
        "movl 0x20(%%edx), %%edx\n\t" \
83
        "shrl $22,%%edx\n\t" \
84
        "and  $0x3C,%%edx\n\t" \
85
        "movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \
86
        "popl %%edx\n\t" \
87
        "ljmp %0\n\t" \
88
        "sti\n\t" \
89
        : /* no output */ \
90
        :"m" (*(((char *)&next->tss.tr)-4)), \
91
         "c" (next)); \
92
        /* Now maybe reload the debug registers */ \
93
        if(prev->debugreg[7]){ \
94
                loaddebug(prev,0); \
95
                loaddebug(prev,1); \
96
                loaddebug(prev,2); \
97
                loaddebug(prev,3); \
98
                loaddebug(prev,6); \
99
        } \
100
} while (0)
101
 
102
#else
103
#define switch_to(prev,next) do { \
104
__asm__("movl %2,"SYMBOL_NAME_STR(current_set)"\n\t" \
105
        "ljmp %0\n\t" \
106
        "cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
107
        "jne 1f\n\t" \
108
        "clts\n" \
109
        "1:" \
110
        : /* no outputs */ \
111
        :"m" (*(((char *)&next->tss.tr)-4)), \
112
         "r" (prev), "r" (next)); \
113
        /* Now maybe reload the debug registers */ \
114
        if(prev->debugreg[7]){ \
115
                loaddebug(prev,0); \
116
                loaddebug(prev,1); \
117
                loaddebug(prev,2); \
118
                loaddebug(prev,3); \
119
                loaddebug(prev,6); \
120
        } \
121
} while (0)
122
#endif
123
 
124
#define _set_base(addr,base) \
125
__asm__("movw %%dx,%0\n\t" \
126
        "rorl $16,%%edx\n\t" \
127
        "movb %%dl,%1\n\t" \
128
        "movb %%dh,%2" \
129
        : /* no output */ \
130
        :"m" (*((addr)+2)), \
131
         "m" (*((addr)+4)), \
132
         "m" (*((addr)+7)), \
133
         "d" (base) \
134
        :"dx")
135
 
136
#define _set_limit(addr,limit) \
137
__asm__("movw %%dx,%0\n\t" \
138
        "rorl $16,%%edx\n\t" \
139
        "movb %1,%%dh\n\t" \
140
        "andb $0xf0,%%dh\n\t" \
141
        "orb %%dh,%%dl\n\t" \
142
        "movb %%dl,%1" \
143
        : /* no output */ \
144
        :"m" (*(addr)), \
145
         "m" (*((addr)+6)), \
146
         "d" (limit) \
147
        :"dx")
148
 
149
#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base )
150
#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 )
151
 
152
static inline unsigned long _get_base(char * addr)
153
{
154
        unsigned long __base;
155
        __asm__("movb %3,%%dh\n\t"
156
                "movb %2,%%dl\n\t"
157
                "shll $16,%%edx\n\t"
158
                "movw %1,%%dx"
159
                :"=&d" (__base)
160
                :"m" (*((addr)+2)),
161
                 "m" (*((addr)+4)),
162
                 "m" (*((addr)+7)));
163
        return __base;
164
}
165
 
166
#define get_base(ldt) _get_base( ((char *)&(ldt)) )
167
 
168
static inline unsigned long get_limit(unsigned long segment)
169
{
170
        unsigned long __limit;
171
        __asm__("lsll %1,%0"
172
                :"=r" (__limit):"r" (segment));
173
        return __limit+1;
174
}
175
 
176
#define nop() __asm__ __volatile__ ("nop")
177
 
178
/*
179
 * Clear and set 'TS' bit respectively
180
 */
181
#define clts() __asm__ __volatile__ ("clts")
182
#define stts() \
183
__asm__ __volatile__ ( \
184
        "movl %%cr0,%%eax\n\t" \
185
        "orl $8,%%eax\n\t" \
186
        "movl %%eax,%%cr0" \
187
        : /* no outputs */ \
188
        : /* no inputs */ \
189
        :"ax")
190
 
191
 
192
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
193
#define tas(ptr) (xchg((ptr),1))
194
 
195
struct __xchg_dummy { unsigned long a[100]; };
196
#define __xg(x) ((struct __xchg_dummy *)(x))
197
 
198
static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
199
{
200
        switch (size) {
201
                case 1:
202
                        __asm__("xchgb %b0,%1"
203
                                :"=q" (x)
204
                                :"m" (*__xg(ptr)), "0" (x)
205
                                :"memory");
206
                        break;
207
                case 2:
208
                        __asm__("xchgw %w0,%1"
209
                                :"=r" (x)
210
                                :"m" (*__xg(ptr)), "0" (x)
211
                                :"memory");
212
                        break;
213
                case 4:
214
                        __asm__("xchgl %0,%1"
215
                                :"=r" (x)
216
                                :"m" (*__xg(ptr)), "0" (x)
217
                                :"memory");
218
                        break;
219
        }
220
        return x;
221
}
222
 
223
#define mb()  __asm__ __volatile__ (""   : : :"memory")
224
#define sti() __asm__ __volatile__ ("sti": : :"memory")
225
#define cli() __asm__ __volatile__ ("cli": : :"memory")
226
 
227
#define save_flags(x) \
228
__asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
229
 
230
#define restore_flags(x) \
231
__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
232
 
233
#define iret() __asm__ __volatile__ ("iret": : :"memory")
234
 
235
#define _set_gate(gate_addr,type,dpl,addr) \
236
__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
237
        "movw %2,%%dx\n\t" \
238
        "movl %%eax,%0\n\t" \
239
        "movl %%edx,%1" \
240
        :"=m" (*((long *) (gate_addr))), \
241
         "=m" (*(1+(long *) (gate_addr))) \
242
        :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
243
         "d" ((char *) (addr)),"a" (KERNEL_CS << 16) \
244
        :"ax","dx")
245
 
246
#define set_intr_gate(n,addr) \
247
        _set_gate(&idt[n],14,0,addr)
248
 
249
#define set_trap_gate(n,addr) \
250
        _set_gate(&idt[n],15,0,addr)
251
 
252
#define set_system_gate(n,addr) \
253
        _set_gate(&idt[n],15,3,addr)
254
 
255
#define set_call_gate(a,addr) \
256
        _set_gate(a,12,3,addr)
257
 
258
#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
259
        *((gate_addr)+1) = ((base) & 0xff000000) | \
260
                (((base) & 0x00ff0000)>>16) | \
261
                ((limit) & 0xf0000) | \
262
                ((dpl)<<13) | \
263
                (0x00408000) | \
264
                ((type)<<8); \
265
        *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
266
                ((limit) & 0x0ffff); }
267
 
268
#define _set_tssldt_desc(n,addr,limit,type) \
269
__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \
270
        "movw %%ax,%2\n\t" \
271
        "rorl $16,%%eax\n\t" \
272
        "movb %%al,%3\n\t" \
273
        "movb $" type ",%4\n\t" \
274
        "movb $0x00,%5\n\t" \
275
        "movb %%ah,%6\n\t" \
276
        "rorl $16,%%eax" \
277
        : /* no output */ \
278
        :"a" (addr+__PAGE_OFFSET), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \
279
         "m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \
280
        )
281
 
282
#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89")
283
#define set_ldt_desc(n,addr,size) \
284
        _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82")
285
 
286
/*
287
 * This is the ldt that every process will get unless we need
288
 * something other than this.
289
 */
290
extern struct desc_struct default_ldt;
291
 
292
/*
293
 * disable hlt during certain critical i/o operations
294
 */
295
#define HAVE_DISABLE_HLT
296
void disable_hlt(void);
297
void enable_hlt(void);
298
 
299
static __inline__ unsigned long long rdmsr(unsigned int msr)
300
{
301
        unsigned long long ret;
302
        __asm__ __volatile__("rdmsr"
303
                            : "=A" (ret)
304
                            : "c" (msr));
305
        return ret;
306
}
307
 
308
static __inline__ void wrmsr(unsigned int msr,unsigned long long val)
309
{
310
        __asm__ __volatile__("wrmsr"
311
                            : /* no Outputs */
312
                            : "c" (msr), "A" (val));
313
}
314
 
315
 
316
static __inline__ unsigned long long rdtsc(void)
317
{
318
        unsigned long long ret;
319
        __asm__ __volatile__("rdtsc"
320
                            : "=A" (ret)
321
                            : /* no inputs */);
322
        return ret;
323
}
324
 
325
static __inline__ unsigned long long rdpmc(unsigned int counter)
326
{
327
        unsigned long long ret;
328
        __asm__ __volatile__("rdpmc"
329
                            : "=A" (ret)
330
                            : "c" (counter));
331
        return ret;
332
}
333
 
334
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.