OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [include/] [asm-x86_64/] [processor.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * include/asm-x86_64/processor.h
3
 *
4
 * Copyright (C) 1994 Linus Torvalds
5
 */
6
 
7
#ifndef __ASM_X86_64_PROCESSOR_H
8
#define __ASM_X86_64_PROCESSOR_H
9
 
10
#include <asm/segment.h>
11
#include <asm/page.h>
12
#include <asm/types.h>
13
#include <asm/sigcontext.h>
14
#include <asm/cpufeature.h>
15
#include <linux/config.h>
16
#include <linux/threads.h>
17
#include <asm/msr.h>
18
#include <asm/current.h>
19
#include <asm/system.h>
20
#include <asm/cpufeature.h>
21
 
22
#define TF_MASK         0x00000100
23
#define IF_MASK         0x00000200
24
#define IOPL_MASK       0x00003000
25
#define NT_MASK         0x00004000
26
#define VM_MASK         0x00020000
27
#define AC_MASK         0x00040000
28
#define VIF_MASK        0x00080000      /* virtual interrupt flag */
29
#define VIP_MASK        0x00100000      /* virtual interrupt pending */
30
#define ID_MASK         0x00200000
31
 
32
/*
33
 * Default implementation of macro that returns current
34
 * instruction pointer ("program counter").
35
 */
36
#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
37
 
38
/*
39
 *  CPU type and hardware bug flags. Kept separately for each CPU.
40
 */
41
 
42
struct cpuinfo_x86 {
43
        __u8    x86;            /* CPU family */
44
        __u8    x86_vendor;     /* CPU vendor */
45
        __u8    x86_model;
46
        __u8    x86_mask;
47
        int     cpuid_level;    /* Maximum supported CPUID level, -1=no CPUID */
48
        __u32   x86_capability[NCAPINTS];
49
        char    x86_vendor_id[16];
50
        char    x86_model_id[64];
51
        int     x86_cache_size;  /* in KB - valid for CPUS which support this
52
                                    call  */
53
        int     x86_clflush_size;
54
        int     x86_tlbsize;    /* number of 4K pages in DTLB/ITLB combined(in pages)*/
55
        __u8    x86_virt_bits, x86_phys_bits;
56
        __u32   x86_power;
57
        unsigned long loops_per_jiffy;
58
} ____cacheline_aligned;
59
 
60
#define X86_VENDOR_INTEL 0
61
#define X86_VENDOR_CYRIX 1
62
#define X86_VENDOR_AMD 2
63
#define X86_VENDOR_UMC 3
64
#define X86_VENDOR_NEXGEN 4
65
#define X86_VENDOR_CENTAUR 5
66
#define X86_VENDOR_RISE 6
67
#define X86_VENDOR_TRANSMETA 7
68
#define X86_VENDOR_UNKNOWN 0xff
69
 
70
extern struct cpuinfo_x86 boot_cpu_data;
71
extern struct tss_struct init_tss[NR_CPUS];
72
 
73
#ifdef CONFIG_SMP
74
extern struct cpuinfo_x86 cpu_data[];
75
#define current_cpu_data cpu_data[smp_processor_id()]
76
#else
77
#define cpu_data (&boot_cpu_data)
78
#define current_cpu_data boot_cpu_data
79
#endif
80
 
81
extern char ignore_irq13;
82
 
83
extern void identify_cpu(struct cpuinfo_x86 *);
84
extern void print_cpu_info(struct cpuinfo_x86 *);
85
extern void dodgy_tsc(void);
86
 
87
/*
88
 * EFLAGS bits
89
 */
90
#define X86_EFLAGS_CF   0x00000001 /* Carry Flag */
91
#define X86_EFLAGS_PF   0x00000004 /* Parity Flag */
92
#define X86_EFLAGS_AF   0x00000010 /* Auxillary carry Flag */
93
#define X86_EFLAGS_ZF   0x00000040 /* Zero Flag */
94
#define X86_EFLAGS_SF   0x00000080 /* Sign Flag */
95
#define X86_EFLAGS_TF   0x00000100 /* Trap Flag */
96
#define X86_EFLAGS_IF   0x00000200 /* Interrupt Flag */
97
#define X86_EFLAGS_DF   0x00000400 /* Direction Flag */
98
#define X86_EFLAGS_OF   0x00000800 /* Overflow Flag */
99
#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
100
#define X86_EFLAGS_NT   0x00004000 /* Nested Task */
101
#define X86_EFLAGS_RF   0x00010000 /* Resume Flag */
102
#define X86_EFLAGS_VM   0x00020000 /* Virtual Mode */
103
#define X86_EFLAGS_AC   0x00040000 /* Alignment Check */
104
#define X86_EFLAGS_VIF  0x00080000 /* Virtual Interrupt Flag */
105
#define X86_EFLAGS_VIP  0x00100000 /* Virtual Interrupt Pending */
106
#define X86_EFLAGS_ID   0x00200000 /* CPUID detection flag */
107
 
108
/*
109
 *      Generic CPUID function
110
 *      FIXME: This really belongs to msr.h
111
 */
112
extern inline void cpuid(int op, int *eax, int *ebx, int *ecx, int *edx)
113
{
114
        __asm__("cpuid"
115
                : "=a" (*eax),
116
                  "=b" (*ebx),
117
                  "=c" (*ecx),
118
                  "=d" (*edx)
119
                : "0" (op));
120
}
121
 
122
/*
123
 * CPUID functions returning a single datum
124
 */
125
extern inline unsigned int cpuid_eax(unsigned int op)
126
{
127
        unsigned int eax;
128
 
129
        __asm__("cpuid"
130
                : "=a" (eax)
131
                : "0" (op)
132
                : "bx", "cx", "dx");
133
        return eax;
134
}
135
extern inline unsigned int cpuid_ebx(unsigned int op)
136
{
137
        unsigned int eax, ebx;
138
 
139
        __asm__("cpuid"
140
                : "=a" (eax), "=b" (ebx)
141
                : "0" (op)
142
                : "cx", "dx" );
143
        return ebx;
144
}
145
extern inline unsigned int cpuid_ecx(unsigned int op)
146
{
147
        unsigned int eax, ecx;
148
 
149
        __asm__("cpuid"
150
                : "=a" (eax), "=c" (ecx)
151
                : "0" (op)
152
                : "bx", "dx" );
153
        return ecx;
154
}
155
extern inline unsigned int cpuid_edx(unsigned int op)
156
{
157
        unsigned int eax, edx;
158
 
159
        __asm__("cpuid"
160
                : "=a" (eax), "=d" (edx)
161
                : "0" (op)
162
                : "bx", "cx");
163
        return edx;
164
}
165
 
166
/*
167
 * Intel CPU features in CR4
168
 */
169
#define X86_CR4_VME             0x0001  /* enable vm86 extensions */
170
#define X86_CR4_PVI             0x0002  /* virtual interrupts flag enable */
171
#define X86_CR4_TSD             0x0004  /* disable time stamp at ipl 3 */
172
#define X86_CR4_DE              0x0008  /* enable debugging extensions */
173
#define X86_CR4_PSE             0x0010  /* enable page size extensions */
174
#define X86_CR4_PAE             0x0020  /* enable physical address extensions */
175
#define X86_CR4_MCE             0x0040  /* Machine check enable */
176
#define X86_CR4_PGE             0x0080  /* enable global pages */
177
#define X86_CR4_PCE             0x0100  /* enable performance counters at ipl 3 */
178
#define X86_CR4_OSFXSR          0x0200  /* enable fast FPU save and restore */
179
#define X86_CR4_OSXMMEXCPT      0x0400  /* enable unmasked SSE exceptions */
180
 
181
/*
182
 * Save the cr4 feature set we're using (ie
183
 * Pentium 4MB enable and PPro Global page
184
 * enable), so that any CPU's that boot up
185
 * after us can get the correct flags.
186
 */
187
extern unsigned long mmu_cr4_features;
188
 
189
static inline void set_in_cr4 (unsigned long mask)
190
{
191
        mmu_cr4_features |= mask;
192
        __asm__("movq %%cr4,%%rax\n\t"
193
                "orq %0,%%rax\n\t"
194
                "movq %%rax,%%cr4\n"
195
                : : "irg" (mask)
196
                :"ax");
197
}
198
 
199
static inline void clear_in_cr4 (unsigned long mask)
200
{
201
        mmu_cr4_features &= ~mask;
202
        __asm__("movq %%cr4,%%rax\n\t"
203
                "andq %0,%%rax\n\t"
204
                "movq %%rax,%%cr4\n"
205
                : : "irg" (~mask)
206
                :"ax");
207
}
208
 
209
/*
210
 *      Cyrix CPU configuration register indexes
211
 */
212
#define CX86_CCR0 0xc0
213
#define CX86_CCR1 0xc1
214
#define CX86_CCR2 0xc2
215
#define CX86_CCR3 0xc3
216
#define CX86_CCR4 0xe8
217
#define CX86_CCR5 0xe9
218
#define CX86_CCR6 0xea
219
#define CX86_CCR7 0xeb
220
#define CX86_DIR0 0xfe
221
#define CX86_DIR1 0xff
222
#define CX86_ARR_BASE 0xc4
223
#define CX86_RCR_BASE 0xdc
224
 
225
/*
226
 *      Cyrix CPU indexed register access macros
227
 */
228
 
229
#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
230
 
231
#define setCx86(reg, data) do { \
232
        outb((reg), 0x22); \
233
        outb((data), 0x23); \
234
} while (0)
235
 
236
/*
237
 * Bus types
238
 */
239
#define EISA_bus 0
240
#define MCA_bus 0
241
#define MCA_bus__is_a_macro
242
 
243
 
244
/*
245
 * User space process size: 512GB - 1GB (default).
246
 */
247
#define TASK_SIZE       (0x0000007fc0000000)
248
 
249
/* This decides where the kernel will search for a free chunk of vm
250
 * space during mmap's.
251
 */
252
 
253
#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
254
#define TASK_UNMAPPED_32 (IA32_PAGE_OFFSET / 3) 
255
#define TASK_UNMAPPED_64 (TASK_SIZE/3) 
256
#define TASK_UNMAPPED_BASE      \
257
        ((current->thread.flags & THREAD_IA32) ? TASK_UNMAPPED_32 : TASK_UNMAPPED_64)
258
 
259
/*
260
 * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
261
 */
262
#define IO_BITMAP_SIZE  32
263
#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
264
#define INVALID_IO_BITMAP_OFFSET 0x8000
265
 
266
struct i387_fxsave_struct {
267
        u16     cwd;
268
        u16     swd;
269
        u16     twd;
270
        u16     fop;
271
        u64     rip;
272
        u64     rdp;
273
        u32     mxcsr;
274
        u32     mxcsr_mask;
275
        u32     st_space[32];   /* 8*16 bytes for each FP-reg = 128 bytes */
276
        u32     xmm_space[64];  /* 16*16 bytes for each XMM-reg = 128 bytes */
277
        u32     padding[24];
278
} __attribute__ ((aligned (16)));
279
 
280
union i387_union {
281
        struct i387_fxsave_struct       fxsave;
282
};
283
 
284
typedef struct {
285
        unsigned long seg;
286
} mm_segment_t;
287
 
288
struct tss_struct {
289
        u32 reserved1;
290
        u64 rsp0;
291
        u64 rsp1;
292
        u64 rsp2;
293
        u64 reserved2;
294
        u64 ist[7];
295
        u32 reserved3;
296
        u32 reserved4;
297
        u16 reserved5;
298
        u16 io_map_base;
299
        u32 io_bitmap[IO_BITMAP_SIZE];
300
} __attribute__((packed)) ____cacheline_aligned;
301
 
302
struct thread_struct {
303
        unsigned long   rsp0;
304
        unsigned long   rip;
305
        unsigned long   rsp;
306
        unsigned long   userrsp;        /* Copy from PDA */
307
        unsigned long   fs;
308
        unsigned long   gs;
309
        unsigned short  es, ds, fsindex, gsindex;
310
        enum {
311
                THREAD_IA32 = 0x0001,
312
        } flags;
313
/* Hardware debugging registers */
314
        unsigned long   debugreg[8];  /* %%db0-7 debug registers */
315
/* fault info */
316
        unsigned long   cr2, trap_no, error_code;
317
/* floating point info */
318
        union i387_union        i387;
319
        u32     *io_bitmap_ptr;
320
};
321
 
322
#define INIT_THREAD  {                                          \
323
}
324
 
325
#define INIT_MMAP \
326
{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
327
 
328
#define STACKFAULT_STACK 1
329
#define DOUBLEFAULT_STACK 2 
330
#define NMI_STACK 3 
331
#define N_EXCEPTION_STACKS 3  /* hw limit: 7 */
332
#define EXCEPTION_STKSZ PAGE_SIZE
333
#define EXCEPTION_STK_ORDER 0
334
 
335
extern void load_gs_index(unsigned);
336
 
337
#define start_thread(regs,new_rip,new_rsp) do { \
338
        asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0));       \
339
        load_gs_index(0);                                                        \
340
        (regs)->rip = (new_rip);                                                 \
341
        (regs)->rsp = (new_rsp);                                                 \
342
        write_pda(oldrsp, (new_rsp));                                            \
343
        (regs)->cs = __USER_CS;                                                  \
344
        (regs)->ss = __USER_DS;                                                  \
345
        (regs)->eflags = 0x200;                                                  \
346
        set_fs(USER_DS);                                                         \
347
} while(0)
348
 
349
struct task_struct;
350
struct mm_struct;
351
 
352
/* Free all resources held by a thread. */
353
extern void release_thread(struct task_struct *);
354
/*
355
 * create a kernel thread without removing it from tasklists
356
 */
357
extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
358
extern int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
359
 
360
/* Copy and release all segment info associated with a VM */
361
extern void copy_segments(struct task_struct *p, struct mm_struct * mm);
362
extern void release_segments(struct mm_struct * mm);
363
 
364
/*
365
 * Return saved PC of a blocked thread.
366
 * What is this good for? it will be always the scheduler or ret_from_fork.
367
 */
368
 
369
extern inline unsigned long thread_saved_pc(struct thread_struct *t)
370
{
371
        return *(unsigned long *)(t->rsp - 8);
372
}
373
 
374
extern unsigned long get_wchan(struct task_struct *p);
375
#define KSTK_EIP(tsk) \
376
        (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
377
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
378
 
379
/* Note: most of the infrastructure to separate stack and task_struct
380
   are already there. When you run out of stack try this first. */
381
#define alloc_task_struct() \
382
        ((struct task_struct *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
383
#define free_task_struct(p) free_pages((unsigned long) (p), 1)
384
#define get_task_struct(tsk)      atomic_inc(&virt_to_page(tsk)->count)
385
 
386
#define init_task       (init_task_union.task)
387
#define init_stack      (init_task_union.stack)
388
 
389
/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
390
extern inline void rep_nop(void)
391
{
392
        __asm__ __volatile__("rep;nop":::"memory");
393
}
394
 
395
/* Avoid speculative execution by the CPU */
396
extern inline void sync_core(void)
397
{
398
        int tmp;
399
        asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
400
}
401
 
402
#define ARCH_HAS_PREFETCH
403
#define ARCH_HAS_SPINLOCK_PREFETCH
404
 
405
#ifdef CONFIG_MK8
406
#define ARCH_HAS_PREFETCHW
407
#define prefetchw(x) __builtin_prefetch((x),1)
408
#define spin_lock_prefetch(x)  prefetchw(x)
409
#else
410
#define spin_lock_prefetch(x)  prefetch(x)
411
#endif
412
 
413
#define prefetch(x) __builtin_prefetch((x),0)
414
 
415
#define cpu_relax()   rep_nop()
416
 
417
 
418
static __inline__ void __monitor(const void *eax, unsigned long ecx,
419
               unsigned long edx)
420
{
421
       /* "monitor %eax,%ecx,%edx;" */
422
       asm volatile(
423
               ".byte 0x0f,0x01,0xc8;"
424
               : :"a" (eax), "c" (ecx), "d"(edx));
425
}
426
 
427
static __inline__ void __mwait(unsigned long eax, unsigned long ecx)
428
{
429
       /* "mwait %eax,%ecx;" */
430
       asm volatile(
431
               ".byte 0x0f,0x01,0xc9;"
432
               : :"a" (eax), "c" (ecx));
433
}
434
 
435
#define ARCH_HAS_SMP_BALANCE 1
436
 
437
#endif /* __ASM_X86_64_PROCESSOR_H */
438
 
439
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.