1 |
1275 |
phoenix |
#ifndef _ASM_IA64_PROCESSOR_H
|
2 |
|
|
#define _ASM_IA64_PROCESSOR_H
|
3 |
|
|
|
4 |
|
|
/*
|
5 |
|
|
* Copyright (C) 1998-2002 Hewlett-Packard Co
|
6 |
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
7 |
|
|
* Stephane Eranian <eranian@hpl.hp.com>
|
8 |
|
|
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
|
9 |
|
|
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
|
10 |
|
|
*
|
11 |
|
|
* 11/24/98 S.Eranian added ia64_set_iva()
|
12 |
|
|
* 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
|
13 |
|
|
* 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
|
14 |
|
|
*/
|
15 |
|
|
|
16 |
|
|
#include <linux/config.h>
|
17 |
|
|
#include <linux/cache.h>
|
18 |
|
|
|
19 |
|
|
#include <asm/ptrace.h>
|
20 |
|
|
#include <asm/kregs.h>
|
21 |
|
|
#include <asm/types.h>
|
22 |
|
|
#include <asm/ustack.h>
|
23 |
|
|
|
24 |
|
|
#define IA64_NUM_DBG_REGS 8
|
25 |
|
|
/*
|
26 |
|
|
* Limits for PMC and PMD are set to less than maximum architected values
|
27 |
|
|
* but should be sufficient for a while
|
28 |
|
|
*/
|
29 |
|
|
#define IA64_NUM_PMC_REGS 32
|
30 |
|
|
#define IA64_NUM_PMD_REGS 32
|
31 |
|
|
|
32 |
|
|
#define DEFAULT_MAP_BASE 0x2000000000000000
|
33 |
|
|
#define DEFAULT_TASK_SIZE 0xa000000000000000
|
34 |
|
|
|
35 |
|
|
/*
|
36 |
|
|
* TASK_SIZE really is a mis-named. It really is the maximum user
|
37 |
|
|
* space address (plus one). On IA-64, there are five regions of 2TB
|
38 |
|
|
* each (assuming 8KB page size), for a total of 8TB of user virtual
|
39 |
|
|
* address space.
|
40 |
|
|
*/
|
41 |
|
|
#define TASK_SIZE (current->thread.task_size)
|
42 |
|
|
|
43 |
|
|
/*
|
44 |
|
|
* This decides where the kernel will search for a free chunk of vm
|
45 |
|
|
* space during mmap's.
|
46 |
|
|
*/
|
47 |
|
|
#define TASK_UNMAPPED_BASE (current->thread.map_base)
|
48 |
|
|
|
49 |
|
|
/*
|
50 |
|
|
* Bus types
|
51 |
|
|
*/
|
52 |
|
|
#define EISA_bus 0
|
53 |
|
|
#define EISA_bus__is_a_macro /* for versions in ksyms.c */
|
54 |
|
|
#define MCA_bus 0
|
55 |
|
|
#define MCA_bus__is_a_macro /* for versions in ksyms.c */
|
56 |
|
|
|
57 |
|
|
#define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
|
58 |
|
|
#define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
|
59 |
|
|
#define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
|
60 |
|
|
#define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
|
61 |
|
|
#define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
|
62 |
|
|
#define IA64_THREAD_KRBS_SYNCED (__IA64_UL(1) << 5) /* krbs synced with process vm? */
|
63 |
|
|
#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
|
64 |
|
|
#define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
|
65 |
|
|
#define IA64_THREAD_XSTACK (__IA64_UL(1) << 8) /* stack executable by default? */
|
66 |
|
|
|
67 |
|
|
#define IA64_THREAD_UAC_SHIFT 3
|
68 |
|
|
#define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
|
69 |
|
|
#define IA64_THREAD_FPEMU_SHIFT 6
|
70 |
|
|
#define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
|
71 |
|
|
|
72 |
|
|
|
73 |
|
|
/*
|
74 |
|
|
* This shift should be large enough to be able to represent
|
75 |
|
|
* 1000000/itc_freq with good accuracy while being small enough to fit
|
76 |
|
|
* 1000000<<IA64_USEC_PER_CYC_SHIFT in 64 bits.
|
77 |
|
|
*/
|
78 |
|
|
#define IA64_USEC_PER_CYC_SHIFT 41
|
79 |
|
|
|
80 |
|
|
#ifndef __ASSEMBLY__
|
81 |
|
|
|
82 |
|
|
#include <linux/threads.h>
|
83 |
|
|
#include <linux/cache.h>
|
84 |
|
|
|
85 |
|
|
#include <asm/fpu.h>
|
86 |
|
|
#include <asm/offsets.h>
|
87 |
|
|
#include <asm/page.h>
|
88 |
|
|
#include <asm/rse.h>
|
89 |
|
|
#include <asm/unwind.h>
|
90 |
|
|
#include <asm/atomic.h>
|
91 |
|
|
#ifdef CONFIG_NUMA
|
92 |
|
|
#include <asm/nodedata.h>
|
93 |
|
|
#endif
|
94 |
|
|
|
95 |
|
|
/* like above but expressed as bitfields for more efficient access: */
|
96 |
|
|
struct ia64_psr {
|
97 |
|
|
__u64 reserved0 : 1;
|
98 |
|
|
__u64 be : 1;
|
99 |
|
|
__u64 up : 1;
|
100 |
|
|
__u64 ac : 1;
|
101 |
|
|
__u64 mfl : 1;
|
102 |
|
|
__u64 mfh : 1;
|
103 |
|
|
__u64 reserved1 : 7;
|
104 |
|
|
__u64 ic : 1;
|
105 |
|
|
__u64 i : 1;
|
106 |
|
|
__u64 pk : 1;
|
107 |
|
|
__u64 reserved2 : 1;
|
108 |
|
|
__u64 dt : 1;
|
109 |
|
|
__u64 dfl : 1;
|
110 |
|
|
__u64 dfh : 1;
|
111 |
|
|
__u64 sp : 1;
|
112 |
|
|
__u64 pp : 1;
|
113 |
|
|
__u64 di : 1;
|
114 |
|
|
__u64 si : 1;
|
115 |
|
|
__u64 db : 1;
|
116 |
|
|
__u64 lp : 1;
|
117 |
|
|
__u64 tb : 1;
|
118 |
|
|
__u64 rt : 1;
|
119 |
|
|
__u64 reserved3 : 4;
|
120 |
|
|
__u64 cpl : 2;
|
121 |
|
|
__u64 is : 1;
|
122 |
|
|
__u64 mc : 1;
|
123 |
|
|
__u64 it : 1;
|
124 |
|
|
__u64 id : 1;
|
125 |
|
|
__u64 da : 1;
|
126 |
|
|
__u64 dd : 1;
|
127 |
|
|
__u64 ss : 1;
|
128 |
|
|
__u64 ri : 2;
|
129 |
|
|
__u64 ed : 1;
|
130 |
|
|
__u64 bn : 1;
|
131 |
|
|
__u64 reserved4 : 19;
|
132 |
|
|
};
|
133 |
|
|
|
134 |
|
|
/*
|
135 |
|
|
* CPU type, hardware bug flags, and per-CPU state. Frequently used
|
136 |
|
|
* state comes earlier:
|
137 |
|
|
*/
|
138 |
|
|
struct cpuinfo_ia64 {
|
139 |
|
|
/* irq_stat must be 64-bit aligned */
|
140 |
|
|
union {
|
141 |
|
|
struct {
|
142 |
|
|
__u32 irq_count;
|
143 |
|
|
__u32 bh_count;
|
144 |
|
|
} f;
|
145 |
|
|
__u64 irq_and_bh_counts;
|
146 |
|
|
} irq_stat;
|
147 |
|
|
__u32 softirq_pending;
|
148 |
|
|
__u32 phys_stacked_size_p8; /* size of physical stacked registers + 8 */
|
149 |
|
|
__u64 itm_delta; /* # of clock cycles between clock ticks */
|
150 |
|
|
__u64 itm_next; /* interval timer mask value to use for next clock tick */
|
151 |
|
|
__u64 *pgd_quick;
|
152 |
|
|
__u64 *pmd_quick;
|
153 |
|
|
__u64 *pte_quick;
|
154 |
|
|
__u64 pgtable_cache_sz;
|
155 |
|
|
/* CPUID-derived information: */
|
156 |
|
|
__u64 ppn;
|
157 |
|
|
__u64 features;
|
158 |
|
|
__u8 number;
|
159 |
|
|
__u8 revision;
|
160 |
|
|
__u8 model;
|
161 |
|
|
__u8 family;
|
162 |
|
|
__u8 archrev;
|
163 |
|
|
char vendor[16];
|
164 |
|
|
__u8 need_tlb_flush;
|
165 |
|
|
__u64 itc_freq; /* frequency of ITC counter */
|
166 |
|
|
__u64 proc_freq; /* frequency of processor */
|
167 |
|
|
__u64 cyc_per_usec; /* itc_freq/1000000 */
|
168 |
|
|
__u64 usec_per_cyc; /* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */
|
169 |
|
|
__u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
|
170 |
|
|
__u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
|
171 |
|
|
__u64 ptce_base;
|
172 |
|
|
__u32 ptce_count[2];
|
173 |
|
|
__u32 ptce_stride[2];
|
174 |
|
|
struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
|
175 |
|
|
void *mmu_gathers;
|
176 |
|
|
# ifdef CONFIG_PERFMON
|
177 |
|
|
unsigned long pfm_syst_info;
|
178 |
|
|
# endif
|
179 |
|
|
#ifdef CONFIG_SMP
|
180 |
|
|
int processor;
|
181 |
|
|
__u64 loops_per_jiffy;
|
182 |
|
|
__u64 ipi_count;
|
183 |
|
|
__u64 prof_counter;
|
184 |
|
|
__u64 prof_multiplier;
|
185 |
|
|
union {
|
186 |
|
|
/*
|
187 |
|
|
* This is written to by *other* CPUs,
|
188 |
|
|
* so isolate it in its own cacheline.
|
189 |
|
|
*/
|
190 |
|
|
__u64 operation;
|
191 |
|
|
char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
|
192 |
|
|
} ipi;
|
193 |
|
|
#endif
|
194 |
|
|
#ifdef CONFIG_NUMA
|
195 |
|
|
struct ia64_node_data *node_data;
|
196 |
|
|
int nodeid;
|
197 |
|
|
struct cpuinfo_ia64 *cpu_data[NR_CPUS];
|
198 |
|
|
#endif
|
199 |
|
|
/* Platform specific word. MUST BE LAST IN STRUCT */
|
200 |
|
|
__u64 platform_specific;
|
201 |
|
|
} __attribute__ ((aligned (PAGE_SIZE)));
|
202 |
|
|
|
203 |
|
|
/*
|
204 |
|
|
* The "local" data pointer. It points to the per-CPU data of the currently executing
|
205 |
|
|
* CPU, much like "current" points to the per-task data of the currently executing task.
|
206 |
|
|
*/
|
207 |
|
|
#define local_cpu_data ((struct cpuinfo_ia64 *) PERCPU_ADDR)
|
208 |
|
|
|
209 |
|
|
/*
|
210 |
|
|
* On NUMA systems, cpu_data for each cpu is allocated during cpu_init() & is allocated on
|
211 |
|
|
* the node that contains the cpu. This minimizes off-node memory references. cpu_data
|
212 |
|
|
* for each cpu contains an array of pointers to the cpu_data structures of each of the
|
213 |
|
|
* other cpus.
|
214 |
|
|
*
|
215 |
|
|
* On non-NUMA systems, cpu_data is a static array allocated at compile time. References
|
216 |
|
|
* to the cpu_data of another cpu is done by direct references to the appropriate entry of
|
217 |
|
|
* the array.
|
218 |
|
|
*/
|
219 |
|
|
#ifdef CONFIG_NUMA
|
220 |
|
|
# define cpu_data(cpu) local_cpu_data->cpu_data[cpu]
|
221 |
|
|
# define numa_node_id() (local_cpu_data->nodeid)
|
222 |
|
|
#else
|
223 |
|
|
extern struct cpuinfo_ia64 _cpu_data[NR_CPUS];
|
224 |
|
|
# define cpu_data(cpu) (&_cpu_data[cpu])
|
225 |
|
|
#endif
|
226 |
|
|
|
227 |
|
|
extern void identify_cpu (struct cpuinfo_ia64 *);
|
228 |
|
|
extern void print_cpu_info (struct cpuinfo_ia64 *);
|
229 |
|
|
|
230 |
|
|
typedef struct {
|
231 |
|
|
unsigned long seg;
|
232 |
|
|
} mm_segment_t;
|
233 |
|
|
|
234 |
|
|
#define SET_UNALIGN_CTL(task,value) \
|
235 |
|
|
({ \
|
236 |
|
|
(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
|
237 |
|
|
| (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
|
238 |
|
|
0; \
|
239 |
|
|
})
|
240 |
|
|
#define GET_UNALIGN_CTL(task,addr) \
|
241 |
|
|
({ \
|
242 |
|
|
put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
|
243 |
|
|
(int *) (addr)); \
|
244 |
|
|
})
|
245 |
|
|
|
246 |
|
|
#define SET_FPEMU_CTL(task,value) \
|
247 |
|
|
({ \
|
248 |
|
|
(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
|
249 |
|
|
| (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
|
250 |
|
|
0; \
|
251 |
|
|
})
|
252 |
|
|
#define GET_FPEMU_CTL(task,addr) \
|
253 |
|
|
({ \
|
254 |
|
|
put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
|
255 |
|
|
(int *) (addr)); \
|
256 |
|
|
})
|
257 |
|
|
|
258 |
|
|
struct siginfo;
|
259 |
|
|
|
260 |
|
|
struct thread_struct {
|
261 |
|
|
__u64 ksp; /* kernel stack pointer */
|
262 |
|
|
unsigned long flags; /* various flags */
|
263 |
|
|
__u64 map_base; /* base address for get_unmapped_area() */
|
264 |
|
|
__u64 task_size; /* limit for task size */
|
265 |
|
|
__u64 rbs_bot; /* the base address for the RBS */
|
266 |
|
|
struct siginfo *siginfo; /* current siginfo struct for ptrace() */
|
267 |
|
|
|
268 |
|
|
#ifdef CONFIG_IA32_SUPPORT
|
269 |
|
|
__u64 eflag; /* IA32 EFLAGS reg */
|
270 |
|
|
__u64 fsr; /* IA32 floating pt status reg */
|
271 |
|
|
__u64 fcr; /* IA32 floating pt control reg */
|
272 |
|
|
__u64 fir; /* IA32 fp except. instr. reg */
|
273 |
|
|
__u64 fdr; /* IA32 fp except. data reg */
|
274 |
|
|
__u64 old_k1; /* old value of ar.k1 */
|
275 |
|
|
__u64 old_iob; /* old IOBase value */
|
276 |
|
|
# define INIT_THREAD_IA32 0, 0, 0x17800000037fULL, 0, 0, 0, 0,
|
277 |
|
|
#else
|
278 |
|
|
# define INIT_THREAD_IA32
|
279 |
|
|
#endif /* CONFIG_IA32_SUPPORT */
|
280 |
|
|
#ifdef CONFIG_PERFMON
|
281 |
|
|
__u64 pmc[IA64_NUM_PMC_REGS];
|
282 |
|
|
__u64 pmd[IA64_NUM_PMD_REGS];
|
283 |
|
|
unsigned long pfm_ovfl_block_reset;/* non-zero if we need to block or reset regs on ovfl */
|
284 |
|
|
void *pfm_context; /* pointer to detailed PMU context */
|
285 |
|
|
atomic_t pfm_notifiers_check; /* when >0, will cleanup ctx_notify_task in tasklist */
|
286 |
|
|
atomic_t pfm_owners_check; /* when >0, will cleanup ctx_owner in tasklist */
|
287 |
|
|
void *pfm_smpl_buf_list; /* list of sampling buffers to vfree */
|
288 |
|
|
# define INIT_THREAD_PM {0, }, {0, }, 0, NULL, {0}, {0}, NULL,
|
289 |
|
|
#else
|
290 |
|
|
# define INIT_THREAD_PM
|
291 |
|
|
#endif
|
292 |
|
|
__u64 dbr[IA64_NUM_DBG_REGS];
|
293 |
|
|
__u64 ibr[IA64_NUM_DBG_REGS];
|
294 |
|
|
struct ia64_fpreg fph[96]; /* saved/loaded on demand */
|
295 |
|
|
int last_fph_cpu;
|
296 |
|
|
};
|
297 |
|
|
|
298 |
|
|
#define INIT_THREAD { \
|
299 |
|
|
0, /* ksp */ \
|
300 |
|
|
0, /* flags */ \
|
301 |
|
|
DEFAULT_MAP_BASE, /* map_base */ \
|
302 |
|
|
DEFAULT_TASK_SIZE, /* task_size */ \
|
303 |
|
|
DEFAULT_USER_STACK_SIZE, /* rbs_bot */ \
|
304 |
|
|
0, /* siginfo */ \
|
305 |
|
|
INIT_THREAD_IA32 \
|
306 |
|
|
INIT_THREAD_PM \
|
307 |
|
|
{0, }, /* dbr */ \
|
308 |
|
|
{0, }, /* ibr */ \
|
309 |
|
|
{{{{0}}}, }, /* fph */ \
|
310 |
|
|
-1 /* last_fph_cpu*/ \
|
311 |
|
|
}
|
312 |
|
|
|
313 |
|
|
#define start_thread(regs,new_ip,new_sp) do { \
|
314 |
|
|
set_fs(USER_DS); \
|
315 |
|
|
regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
|
316 |
|
|
& ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
|
317 |
|
|
regs->cr_iip = new_ip; \
|
318 |
|
|
regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
|
319 |
|
|
regs->ar_rnat = 0; \
|
320 |
|
|
regs->ar_bspstore = current->thread.rbs_bot; \
|
321 |
|
|
regs->ar_fpsr = FPSR_DEFAULT; \
|
322 |
|
|
regs->loadrs = 0; \
|
323 |
|
|
regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
|
324 |
|
|
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
|
325 |
|
|
if (!__builtin_expect (current->mm->dumpable, 1)) { \
|
326 |
|
|
/* \
|
327 |
|
|
* Zap scratch regs to avoid leaking bits between processes with different \
|
328 |
|
|
* uid/privileges. \
|
329 |
|
|
*/ \
|
330 |
|
|
regs->ar_pfs = 0; \
|
331 |
|
|
regs->pr = 0; \
|
332 |
|
|
/* \
|
333 |
|
|
* XXX fix me: everything below can go away once we stop preserving scratch \
|
334 |
|
|
* regs on a system call. \
|
335 |
|
|
*/ \
|
336 |
|
|
regs->b6 = 0; \
|
337 |
|
|
regs->r1 = 0; regs->r2 = 0; regs->r3 = 0; \
|
338 |
|
|
regs->r13 = 0; regs->r14 = 0; regs->r15 = 0; \
|
339 |
|
|
regs->r9 = 0; regs->r11 = 0; \
|
340 |
|
|
regs->r16 = 0; regs->r17 = 0; regs->r18 = 0; regs->r19 = 0; \
|
341 |
|
|
regs->r20 = 0; regs->r21 = 0; regs->r22 = 0; regs->r23 = 0; \
|
342 |
|
|
regs->r24 = 0; regs->r25 = 0; regs->r26 = 0; regs->r27 = 0; \
|
343 |
|
|
regs->r28 = 0; regs->r29 = 0; regs->r30 = 0; regs->r31 = 0; \
|
344 |
|
|
regs->ar_ccv = 0; \
|
345 |
|
|
regs->ar_csd = 0; \
|
346 |
|
|
regs->ar_ssd = 0; \
|
347 |
|
|
regs->b0 = 0; regs->b7 = 0; \
|
348 |
|
|
regs->f6.u.bits[0] = 0; regs->f6.u.bits[1] = 0; \
|
349 |
|
|
regs->f7.u.bits[0] = 0; regs->f7.u.bits[1] = 0; \
|
350 |
|
|
regs->f8.u.bits[0] = 0; regs->f8.u.bits[1] = 0; \
|
351 |
|
|
regs->f9.u.bits[0] = 0; regs->f9.u.bits[1] = 0; \
|
352 |
|
|
regs->f10.u.bits[0] = 0; regs->f10.u.bits[1] = 0; \
|
353 |
|
|
regs->f11.u.bits[0] = 0; regs->f11.u.bits[1] = 0; \
|
354 |
|
|
} \
|
355 |
|
|
} while (0)
|
356 |
|
|
|
357 |
|
|
/* Forward declarations, a strange C thing... */
|
358 |
|
|
struct mm_struct;
|
359 |
|
|
struct task_struct;
|
360 |
|
|
|
361 |
|
|
/*
|
362 |
|
|
* Free all resources held by a thread. This is called after the
|
363 |
|
|
* parent of DEAD_TASK has collected the exist status of the task via
|
364 |
|
|
* wait().
|
365 |
|
|
*/
|
366 |
|
|
#ifdef CONFIG_PERFMON
|
367 |
|
|
extern void release_thread (struct task_struct *task);
|
368 |
|
|
#else
|
369 |
|
|
# define release_thread(dead_task)
|
370 |
|
|
#endif
|
371 |
|
|
|
372 |
|
|
/*
|
373 |
|
|
* This is the mechanism for creating a new kernel thread.
|
374 |
|
|
*
|
375 |
|
|
* NOTE 1: Only a kernel-only process (ie the swapper or direct
|
376 |
|
|
* descendants who haven't done an "execve()") should use this: it
|
377 |
|
|
* will work within a system call from a "real" process, but the
|
378 |
|
|
* process memory space will not be free'd until both the parent and
|
379 |
|
|
* the child have exited.
|
380 |
|
|
*
|
381 |
|
|
* NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
|
382 |
|
|
* into trouble in init/main.c when the child thread returns to
|
383 |
|
|
* do_basic_setup() and the timing is such that free_initmem() has
|
384 |
|
|
* been called already.
|
385 |
|
|
*/
|
386 |
|
|
extern int arch_kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
|
387 |
|
|
|
388 |
|
|
/* Copy and release all segment info associated with a VM */
|
389 |
|
|
#define copy_segments(tsk, mm) do { } while (0)
|
390 |
|
|
#define release_segments(mm) do { } while (0)
|
391 |
|
|
|
392 |
|
|
/* Get wait channel for task P. */
|
393 |
|
|
extern unsigned long get_wchan (struct task_struct *p);
|
394 |
|
|
|
395 |
|
|
/* Return instruction pointer of blocked task TSK. */
|
396 |
|
|
#define KSTK_EIP(tsk) \
|
397 |
|
|
({ \
|
398 |
|
|
struct pt_regs *_regs = ia64_task_regs(tsk); \
|
399 |
|
|
_regs->cr_iip + ia64_psr(_regs)->ri; \
|
400 |
|
|
})
|
401 |
|
|
|
402 |
|
|
/* Return stack pointer of blocked task TSK. */
|
403 |
|
|
#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
|
404 |
|
|
|
405 |
|
|
static inline unsigned long
|
406 |
|
|
ia64_get_kr (unsigned long regnum)
|
407 |
|
|
{
|
408 |
|
|
unsigned long r = 0;
|
409 |
|
|
|
410 |
|
|
switch (regnum) {
|
411 |
|
|
case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break;
|
412 |
|
|
case 1: asm volatile ("mov %0=ar.k1" : "=r"(r)); break;
|
413 |
|
|
case 2: asm volatile ("mov %0=ar.k2" : "=r"(r)); break;
|
414 |
|
|
case 3: asm volatile ("mov %0=ar.k3" : "=r"(r)); break;
|
415 |
|
|
case 4: asm volatile ("mov %0=ar.k4" : "=r"(r)); break;
|
416 |
|
|
case 5: asm volatile ("mov %0=ar.k5" : "=r"(r)); break;
|
417 |
|
|
case 6: asm volatile ("mov %0=ar.k6" : "=r"(r)); break;
|
418 |
|
|
case 7: asm volatile ("mov %0=ar.k7" : "=r"(r)); break;
|
419 |
|
|
}
|
420 |
|
|
return r;
|
421 |
|
|
}
|
422 |
|
|
|
423 |
|
|
static inline void
|
424 |
|
|
ia64_set_kr (unsigned long regnum, unsigned long r)
|
425 |
|
|
{
|
426 |
|
|
switch (regnum) {
|
427 |
|
|
case 0: asm volatile ("mov ar.k0=%0" :: "r"(r)); break;
|
428 |
|
|
case 1: asm volatile ("mov ar.k1=%0" :: "r"(r)); break;
|
429 |
|
|
case 2: asm volatile ("mov ar.k2=%0" :: "r"(r)); break;
|
430 |
|
|
case 3: asm volatile ("mov ar.k3=%0" :: "r"(r)); break;
|
431 |
|
|
case 4: asm volatile ("mov ar.k4=%0" :: "r"(r)); break;
|
432 |
|
|
case 5: asm volatile ("mov ar.k5=%0" :: "r"(r)); break;
|
433 |
|
|
case 6: asm volatile ("mov ar.k6=%0" :: "r"(r)); break;
|
434 |
|
|
case 7: asm volatile ("mov ar.k7=%0" :: "r"(r)); break;
|
435 |
|
|
}
|
436 |
|
|
}
|
437 |
|
|
/* Return TRUE if task T owns the fph partition of the CPU we're running on. */
|
438 |
|
|
#define ia64_is_local_fpu_owner(t) \
|
439 |
|
|
({ \
|
440 |
|
|
struct task_struct *__ia64_islfo_task = (t); \
|
441 |
|
|
(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
|
442 |
|
|
&& __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
|
443 |
|
|
})
|
444 |
|
|
|
445 |
|
|
/* Mark task T as owning the fph partition of the CPU we're running on. */
|
446 |
|
|
#define ia64_set_local_fpu_owner(t) do { \
|
447 |
|
|
struct task_struct *__ia64_slfo_task = (t); \
|
448 |
|
|
__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
|
449 |
|
|
ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
|
450 |
|
|
} while (0)
|
451 |
|
|
|
452 |
|
|
/* Mark the fph partition of task T as being invalid on all CPUs. */
|
453 |
|
|
#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
|
454 |
|
|
|
455 |
|
|
extern void __ia64_init_fpu (void);
|
456 |
|
|
extern void __ia64_save_fpu (struct ia64_fpreg *fph);
|
457 |
|
|
extern void __ia64_load_fpu (struct ia64_fpreg *fph);
|
458 |
|
|
extern void ia64_save_debug_regs (unsigned long *save_area);
|
459 |
|
|
extern void ia64_load_debug_regs (unsigned long *save_area);
|
460 |
|
|
|
461 |
|
|
#ifdef CONFIG_IA32_SUPPORT
|
462 |
|
|
extern void ia32_save_state (struct task_struct *task);
|
463 |
|
|
extern void ia32_load_state (struct task_struct *task);
|
464 |
|
|
#endif
|
465 |
|
|
|
466 |
|
|
#define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
|
467 |
|
|
#define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
|
468 |
|
|
|
469 |
|
|
/* load fp 0.0 into fph */
|
470 |
|
|
static inline void
|
471 |
|
|
ia64_init_fpu (void) {
|
472 |
|
|
ia64_fph_enable();
|
473 |
|
|
__ia64_init_fpu();
|
474 |
|
|
ia64_fph_disable();
|
475 |
|
|
}
|
476 |
|
|
|
477 |
|
|
/* save f32-f127 at FPH */
|
478 |
|
|
static inline void
|
479 |
|
|
ia64_save_fpu (struct ia64_fpreg *fph) {
|
480 |
|
|
ia64_fph_enable();
|
481 |
|
|
__ia64_save_fpu(fph);
|
482 |
|
|
ia64_fph_disable();
|
483 |
|
|
}
|
484 |
|
|
|
485 |
|
|
/* load f32-f127 from FPH */
|
486 |
|
|
static inline void
|
487 |
|
|
ia64_load_fpu (struct ia64_fpreg *fph) {
|
488 |
|
|
ia64_fph_enable();
|
489 |
|
|
__ia64_load_fpu(fph);
|
490 |
|
|
ia64_fph_disable();
|
491 |
|
|
}
|
492 |
|
|
|
493 |
|
|
static inline void
|
494 |
|
|
ia64_fc (void *addr)
|
495 |
|
|
{
|
496 |
|
|
asm volatile ("fc %0" :: "r"(addr) : "memory");
|
497 |
|
|
}
|
498 |
|
|
|
499 |
|
|
static inline void
|
500 |
|
|
ia64_sync_i (void)
|
501 |
|
|
{
|
502 |
|
|
asm volatile (";; sync.i" ::: "memory");
|
503 |
|
|
}
|
504 |
|
|
|
505 |
|
|
static inline void
|
506 |
|
|
ia64_srlz_i (void)
|
507 |
|
|
{
|
508 |
|
|
asm volatile (";; srlz.i ;;" ::: "memory");
|
509 |
|
|
}
|
510 |
|
|
|
511 |
|
|
static inline void
|
512 |
|
|
ia64_srlz_d (void)
|
513 |
|
|
{
|
514 |
|
|
asm volatile (";; srlz.d" ::: "memory");
|
515 |
|
|
}
|
516 |
|
|
|
517 |
|
|
static inline __u64
|
518 |
|
|
ia64_get_rr (__u64 reg_bits)
|
519 |
|
|
{
|
520 |
|
|
__u64 r;
|
521 |
|
|
asm volatile ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory");
|
522 |
|
|
return r;
|
523 |
|
|
}
|
524 |
|
|
|
525 |
|
|
static inline void
|
526 |
|
|
ia64_set_rr (__u64 reg_bits, __u64 rr_val)
|
527 |
|
|
{
|
528 |
|
|
asm volatile ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory");
|
529 |
|
|
}
|
530 |
|
|
|
531 |
|
|
static inline __u64
|
532 |
|
|
ia64_get_dcr (void)
|
533 |
|
|
{
|
534 |
|
|
__u64 r;
|
535 |
|
|
asm volatile ("mov %0=cr.dcr" : "=r"(r));
|
536 |
|
|
return r;
|
537 |
|
|
}
|
538 |
|
|
|
539 |
|
|
static inline void
|
540 |
|
|
ia64_set_dcr (__u64 val)
|
541 |
|
|
{
|
542 |
|
|
asm volatile ("mov cr.dcr=%0;;" :: "r"(val) : "memory");
|
543 |
|
|
ia64_srlz_d();
|
544 |
|
|
}
|
545 |
|
|
|
546 |
|
|
static inline __u64
|
547 |
|
|
ia64_get_lid (void)
|
548 |
|
|
{
|
549 |
|
|
__u64 r;
|
550 |
|
|
asm volatile ("mov %0=cr.lid" : "=r"(r));
|
551 |
|
|
return r;
|
552 |
|
|
}
|
553 |
|
|
|
554 |
|
|
static inline void
|
555 |
|
|
ia64_invala (void)
|
556 |
|
|
{
|
557 |
|
|
asm volatile ("invala" ::: "memory");
|
558 |
|
|
}
|
559 |
|
|
|
560 |
|
|
/*
|
561 |
|
|
* Save the processor status flags in FLAGS and then clear the interrupt collection and
|
562 |
|
|
* interrupt enable bits. Don't trigger any mandatory RSE references while this bit is
|
563 |
|
|
* off!
|
564 |
|
|
*/
|
565 |
|
|
static inline __u64
|
566 |
|
|
ia64_clear_ic (void)
|
567 |
|
|
{
|
568 |
|
|
__u64 psr;
|
569 |
|
|
asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory");
|
570 |
|
|
return psr;
|
571 |
|
|
}
|
572 |
|
|
|
573 |
|
|
/*
|
574 |
|
|
* Restore the psr.
|
575 |
|
|
*/
|
576 |
|
|
static inline void
|
577 |
|
|
ia64_set_psr (__u64 psr)
|
578 |
|
|
{
|
579 |
|
|
asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory");
|
580 |
|
|
}
|
581 |
|
|
|
582 |
|
|
/*
|
583 |
|
|
* Insert a translation into an instruction and/or data translation
|
584 |
|
|
* register.
|
585 |
|
|
*/
|
586 |
|
|
static inline void
|
587 |
|
|
ia64_itr (__u64 target_mask, __u64 tr_num,
|
588 |
|
|
__u64 vmaddr, __u64 pte,
|
589 |
|
|
__u64 log_page_size)
|
590 |
|
|
{
|
591 |
|
|
asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
|
592 |
|
|
asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
|
593 |
|
|
if (target_mask & 0x1)
|
594 |
|
|
asm volatile ("itr.i itr[%0]=%1"
|
595 |
|
|
:: "r"(tr_num), "r"(pte) : "memory");
|
596 |
|
|
if (target_mask & 0x2)
|
597 |
|
|
asm volatile (";;itr.d dtr[%0]=%1"
|
598 |
|
|
:: "r"(tr_num), "r"(pte) : "memory");
|
599 |
|
|
}
|
600 |
|
|
|
601 |
|
|
/*
|
602 |
|
|
* Insert a translation into the instruction and/or data translation
|
603 |
|
|
* cache.
|
604 |
|
|
*/
|
605 |
|
|
static inline void
|
606 |
|
|
ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
|
607 |
|
|
__u64 log_page_size)
|
608 |
|
|
{
|
609 |
|
|
asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
|
610 |
|
|
asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
|
611 |
|
|
/* as per EAS2.6, itc must be the last instruction in an instruction group */
|
612 |
|
|
if (target_mask & 0x1)
|
613 |
|
|
asm volatile ("itc.i %0;;" :: "r"(pte) : "memory");
|
614 |
|
|
if (target_mask & 0x2)
|
615 |
|
|
asm volatile (";;itc.d %0;;" :: "r"(pte) : "memory");
|
616 |
|
|
}
|
617 |
|
|
|
618 |
|
|
/*
|
619 |
|
|
* Purge a range of addresses from instruction and/or data translation
|
620 |
|
|
* register(s).
|
621 |
|
|
*/
|
622 |
|
|
static inline void
|
623 |
|
|
ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
|
624 |
|
|
{
|
625 |
|
|
if (target_mask & 0x1)
|
626 |
|
|
asm volatile ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
|
627 |
|
|
if (target_mask & 0x2)
|
628 |
|
|
asm volatile ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
|
629 |
|
|
}
|
630 |
|
|
|
631 |
|
|
/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
|
632 |
|
|
static inline void
|
633 |
|
|
ia64_set_iva (void *ivt_addr)
|
634 |
|
|
{
|
635 |
|
|
asm volatile ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory");
|
636 |
|
|
}
|
637 |
|
|
|
638 |
|
|
/* Set the page table address and control bits. */
|
639 |
|
|
static inline void
|
640 |
|
|
ia64_set_pta (__u64 pta)
|
641 |
|
|
{
|
642 |
|
|
/* Note: srlz.i implies srlz.d */
|
643 |
|
|
asm volatile ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory");
|
644 |
|
|
}
|
645 |
|
|
|
646 |
|
|
static inline __u64
|
647 |
|
|
ia64_get_cpuid (__u64 regnum)
|
648 |
|
|
{
|
649 |
|
|
__u64 r;
|
650 |
|
|
|
651 |
|
|
asm ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum));
|
652 |
|
|
return r;
|
653 |
|
|
}
|
654 |
|
|
|
655 |
|
|
static inline void
|
656 |
|
|
ia64_eoi (void)
|
657 |
|
|
{
|
658 |
|
|
asm ("mov cr.eoi=r0;; srlz.d;;" ::: "memory");
|
659 |
|
|
}
|
660 |
|
|
|
661 |
|
|
static inline void
|
662 |
|
|
ia64_set_lrr0 (unsigned long val)
|
663 |
|
|
{
|
664 |
|
|
asm volatile ("mov cr.lrr0=%0;; srlz.d" :: "r"(val) : "memory");
|
665 |
|
|
}
|
666 |
|
|
|
667 |
|
|
#ifdef GAS_HAS_HINT_INSN
|
668 |
|
|
static inline void
|
669 |
|
|
ia64_hint_pause (void)
|
670 |
|
|
{
|
671 |
|
|
asm volatile ("hint @pause" ::: "memory");
|
672 |
|
|
}
|
673 |
|
|
|
674 |
|
|
#define cpu_relax() ia64_hint_pause()
|
675 |
|
|
#else
|
676 |
|
|
#define cpu_relax() barrier()
|
677 |
|
|
#endif
|
678 |
|
|
|
679 |
|
|
static inline void
|
680 |
|
|
ia64_set_lrr1 (unsigned long val)
|
681 |
|
|
{
|
682 |
|
|
asm volatile ("mov cr.lrr1=%0;; srlz.d" :: "r"(val) : "memory");
|
683 |
|
|
}
|
684 |
|
|
|
685 |
|
|
static inline void
|
686 |
|
|
ia64_set_pmv (__u64 val)
|
687 |
|
|
{
|
688 |
|
|
asm volatile ("mov cr.pmv=%0" :: "r"(val) : "memory");
|
689 |
|
|
}
|
690 |
|
|
|
691 |
|
|
static inline __u64
|
692 |
|
|
ia64_get_pmc (__u64 regnum)
|
693 |
|
|
{
|
694 |
|
|
__u64 retval;
|
695 |
|
|
|
696 |
|
|
asm volatile ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum));
|
697 |
|
|
return retval;
|
698 |
|
|
}
|
699 |
|
|
|
700 |
|
|
static inline void
|
701 |
|
|
ia64_set_pmc (__u64 regnum, __u64 value)
|
702 |
|
|
{
|
703 |
|
|
asm volatile ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value));
|
704 |
|
|
}
|
705 |
|
|
|
706 |
|
|
static inline __u64
|
707 |
|
|
ia64_get_pmd (__u64 regnum)
|
708 |
|
|
{
|
709 |
|
|
__u64 retval;
|
710 |
|
|
|
711 |
|
|
asm volatile ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum));
|
712 |
|
|
return retval;
|
713 |
|
|
}
|
714 |
|
|
|
715 |
|
|
static inline void
|
716 |
|
|
ia64_set_pmd (__u64 regnum, __u64 value)
|
717 |
|
|
{
|
718 |
|
|
asm volatile ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value));
|
719 |
|
|
}
|
720 |
|
|
|
721 |
|
|
/*
|
722 |
|
|
* Given the address to which a spill occurred, return the unat bit
|
723 |
|
|
* number that corresponds to this address.
|
724 |
|
|
*/
|
725 |
|
|
static inline __u64
|
726 |
|
|
ia64_unat_pos (void *spill_addr)
|
727 |
|
|
{
|
728 |
|
|
return ((__u64) spill_addr >> 3) & 0x3f;
|
729 |
|
|
}
|
730 |
|
|
|
731 |
|
|
/*
|
732 |
|
|
* Set the NaT bit of an integer register which was spilled at address
|
733 |
|
|
* SPILL_ADDR. UNAT is the mask to be updated.
|
734 |
|
|
*/
|
735 |
|
|
static inline void
|
736 |
|
|
ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
|
737 |
|
|
{
|
738 |
|
|
__u64 bit = ia64_unat_pos(spill_addr);
|
739 |
|
|
__u64 mask = 1UL << bit;
|
740 |
|
|
|
741 |
|
|
*unat = (*unat & ~mask) | (nat << bit);
|
742 |
|
|
}
|
743 |
|
|
|
744 |
|
|
/*
|
745 |
|
|
* Return saved PC of a blocked thread.
|
746 |
|
|
* Note that the only way T can block is through a call to schedule() -> switch_to().
|
747 |
|
|
*/
|
748 |
|
|
static inline unsigned long
|
749 |
|
|
thread_saved_pc (struct thread_struct *t)
|
750 |
|
|
{
|
751 |
|
|
struct unw_frame_info info;
|
752 |
|
|
unsigned long ip;
|
753 |
|
|
|
754 |
|
|
/* XXX ouch: Linus, please pass the task pointer to thread_saved_pc() instead! */
|
755 |
|
|
struct task_struct *p = (void *) ((unsigned long) t - IA64_TASK_THREAD_OFFSET);
|
756 |
|
|
|
757 |
|
|
unw_init_from_blocked_task(&info, p);
|
758 |
|
|
if (unw_unwind(&info) < 0)
|
759 |
|
|
return 0;
|
760 |
|
|
unw_get_ip(&info, &ip);
|
761 |
|
|
return ip;
|
762 |
|
|
}
|
763 |
|
|
|
764 |
|
|
/*
|
765 |
|
|
* Get the current instruction/program counter value.
|
766 |
|
|
*/
|
767 |
|
|
#define current_text_addr() \
|
768 |
|
|
({ void *_pc; asm volatile ("mov %0=ip" : "=r" (_pc)); _pc; })
|
769 |
|
|
|
770 |
|
|
#define THREAD_SIZE IA64_STK_OFFSET
|
771 |
|
|
/* NOTE: The task struct and the stacks are allocated together. */
|
772 |
|
|
#define alloc_task_struct() \
|
773 |
|
|
((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES))
|
774 |
|
|
#define free_task_struct(p) free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES)
|
775 |
|
|
#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
|
776 |
|
|
|
777 |
|
|
#define init_task (init_task_union.task)
|
778 |
|
|
#define init_stack (init_task_union.stack)
|
779 |
|
|
|
780 |
|
|
static inline void
|
781 |
|
|
ia64_set_cmcv (__u64 val)
|
782 |
|
|
{
|
783 |
|
|
asm volatile ("mov cr.cmcv=%0" :: "r"(val) : "memory");
|
784 |
|
|
}
|
785 |
|
|
|
786 |
|
|
static inline __u64
|
787 |
|
|
ia64_get_cmcv (void)
|
788 |
|
|
{
|
789 |
|
|
__u64 val;
|
790 |
|
|
|
791 |
|
|
asm volatile ("mov %0=cr.cmcv" : "=r"(val) :: "memory");
|
792 |
|
|
return val;
|
793 |
|
|
}
|
794 |
|
|
|
795 |
|
|
static inline __u64
|
796 |
|
|
ia64_get_ivr (void)
|
797 |
|
|
{
|
798 |
|
|
__u64 r;
|
799 |
|
|
asm volatile ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r));
|
800 |
|
|
return r;
|
801 |
|
|
}
|
802 |
|
|
|
803 |
|
|
static inline void
|
804 |
|
|
ia64_set_tpr (__u64 val)
|
805 |
|
|
{
|
806 |
|
|
asm volatile ("mov cr.tpr=%0" :: "r"(val));
|
807 |
|
|
}
|
808 |
|
|
|
809 |
|
|
static inline __u64
|
810 |
|
|
ia64_get_tpr (void)
|
811 |
|
|
{
|
812 |
|
|
__u64 r;
|
813 |
|
|
asm volatile ("mov %0=cr.tpr" : "=r"(r));
|
814 |
|
|
return r;
|
815 |
|
|
}
|
816 |
|
|
|
817 |
|
|
static inline void
|
818 |
|
|
ia64_set_irr0 (__u64 val)
|
819 |
|
|
{
|
820 |
|
|
asm volatile("mov cr.irr0=%0;;" :: "r"(val) : "memory");
|
821 |
|
|
ia64_srlz_d();
|
822 |
|
|
}
|
823 |
|
|
|
824 |
|
|
static inline __u64
|
825 |
|
|
ia64_get_irr0 (void)
|
826 |
|
|
{
|
827 |
|
|
__u64 val;
|
828 |
|
|
|
829 |
|
|
/* this is volatile because irr may change unbeknownst to gcc... */
|
830 |
|
|
asm volatile("mov %0=cr.irr0" : "=r"(val));
|
831 |
|
|
return val;
|
832 |
|
|
}
|
833 |
|
|
|
834 |
|
|
static inline void
|
835 |
|
|
ia64_set_irr1 (__u64 val)
|
836 |
|
|
{
|
837 |
|
|
asm volatile("mov cr.irr1=%0;;" :: "r"(val) : "memory");
|
838 |
|
|
ia64_srlz_d();
|
839 |
|
|
}
|
840 |
|
|
|
841 |
|
|
static inline __u64
|
842 |
|
|
ia64_get_irr1 (void)
|
843 |
|
|
{
|
844 |
|
|
__u64 val;
|
845 |
|
|
|
846 |
|
|
/* this is volatile because irr may change unbeknownst to gcc... */
|
847 |
|
|
asm volatile("mov %0=cr.irr1" : "=r"(val));
|
848 |
|
|
return val;
|
849 |
|
|
}
|
850 |
|
|
|
851 |
|
|
static inline void
|
852 |
|
|
ia64_set_irr2 (__u64 val)
|
853 |
|
|
{
|
854 |
|
|
asm volatile("mov cr.irr2=%0;;" :: "r"(val) : "memory");
|
855 |
|
|
ia64_srlz_d();
|
856 |
|
|
}
|
857 |
|
|
|
858 |
|
|
static inline __u64
|
859 |
|
|
ia64_get_irr2 (void)
|
860 |
|
|
{
|
861 |
|
|
__u64 val;
|
862 |
|
|
|
863 |
|
|
/* this is volatile because irr may change unbeknownst to gcc... */
|
864 |
|
|
asm volatile("mov %0=cr.irr2" : "=r"(val));
|
865 |
|
|
return val;
|
866 |
|
|
}
|
867 |
|
|
|
868 |
|
|
static inline void
|
869 |
|
|
ia64_set_irr3 (__u64 val)
|
870 |
|
|
{
|
871 |
|
|
asm volatile("mov cr.irr3=%0;;" :: "r"(val) : "memory");
|
872 |
|
|
ia64_srlz_d();
|
873 |
|
|
}
|
874 |
|
|
|
875 |
|
|
static inline __u64
|
876 |
|
|
ia64_get_irr3 (void)
|
877 |
|
|
{
|
878 |
|
|
__u64 val;
|
879 |
|
|
|
880 |
|
|
/* this is volatile because irr may change unbeknownst to gcc... */
|
881 |
|
|
asm volatile ("mov %0=cr.irr3" : "=r"(val));
|
882 |
|
|
return val;
|
883 |
|
|
}
|
884 |
|
|
|
885 |
|
|
static inline __u64
|
886 |
|
|
ia64_get_gp(void)
|
887 |
|
|
{
|
888 |
|
|
__u64 val;
|
889 |
|
|
|
890 |
|
|
asm ("mov %0=gp" : "=r"(val));
|
891 |
|
|
return val;
|
892 |
|
|
}
|
893 |
|
|
|
894 |
|
|
static inline void
|
895 |
|
|
ia64_set_ibr (__u64 regnum, __u64 value)
|
896 |
|
|
{
|
897 |
|
|
asm volatile ("mov ibr[%0]=%1" :: "r"(regnum), "r"(value));
|
898 |
|
|
}
|
899 |
|
|
|
900 |
|
|
static inline void
|
901 |
|
|
ia64_set_dbr (__u64 regnum, __u64 value)
|
902 |
|
|
{
|
903 |
|
|
asm volatile ("mov dbr[%0]=%1" :: "r"(regnum), "r"(value));
|
904 |
|
|
#ifdef CONFIG_ITANIUM
|
905 |
|
|
asm volatile (";; srlz.d");
|
906 |
|
|
#endif
|
907 |
|
|
}
|
908 |
|
|
|
909 |
|
|
static inline __u64
|
910 |
|
|
ia64_get_ibr (__u64 regnum)
|
911 |
|
|
{
|
912 |
|
|
__u64 retval;
|
913 |
|
|
|
914 |
|
|
asm volatile ("mov %0=ibr[%1]" : "=r"(retval) : "r"(regnum));
|
915 |
|
|
return retval;
|
916 |
|
|
}
|
917 |
|
|
|
918 |
|
|
static inline __u64
|
919 |
|
|
ia64_get_dbr (__u64 regnum)
|
920 |
|
|
{
|
921 |
|
|
__u64 retval;
|
922 |
|
|
|
923 |
|
|
asm volatile ("mov %0=dbr[%1]" : "=r"(retval) : "r"(regnum));
|
924 |
|
|
#ifdef CONFIG_ITANIUM
|
925 |
|
|
asm volatile (";; srlz.d");
|
926 |
|
|
#endif
|
927 |
|
|
return retval;
|
928 |
|
|
}
|
929 |
|
|
|
930 |
|
|
static inline __u64
|
931 |
|
|
ia64_rotr (__u64 w, __u64 n)
|
932 |
|
|
{
|
933 |
|
|
return (w >> n) | (w << (64 - n));
|
934 |
|
|
}
|
935 |
|
|
|
936 |
|
|
#define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
|
937 |
|
|
|
938 |
|
|
static inline __u64
|
939 |
|
|
ia64_thash (__u64 addr)
|
940 |
|
|
{
|
941 |
|
|
__u64 result;
|
942 |
|
|
asm ("thash %0=%1" : "=r"(result) : "r" (addr));
|
943 |
|
|
return result;
|
944 |
|
|
}
|
945 |
|
|
|
946 |
|
|
static inline __u64
|
947 |
|
|
ia64_tpa (__u64 addr)
|
948 |
|
|
{
|
949 |
|
|
__u64 result;
|
950 |
|
|
asm ("tpa %0=%1" : "=r"(result) : "r"(addr));
|
951 |
|
|
return result;
|
952 |
|
|
}
|
953 |
|
|
|
954 |
|
|
/*
|
955 |
|
|
* Take a mapped kernel address and return the equivalent address
|
956 |
|
|
* in the region 7 identity mapped virtual area.
|
957 |
|
|
*/
|
958 |
|
|
static inline void *
|
959 |
|
|
ia64_imva (void *addr)
|
960 |
|
|
{
|
961 |
|
|
void *result;
|
962 |
|
|
asm ("tpa %0=%1" : "=r"(result) : "r"(addr));
|
963 |
|
|
return __va(result);
|
964 |
|
|
}
|
965 |
|
|
|
966 |
|
|
#define ARCH_HAS_PREFETCH
|
967 |
|
|
#define ARCH_HAS_PREFETCHW
|
968 |
|
|
#define ARCH_HAS_SPINLOCK_PREFETCH
|
969 |
|
|
#define PREFETCH_STRIDE 256
|
970 |
|
|
|
971 |
|
|
extern inline void
|
972 |
|
|
prefetch (const void *x)
|
973 |
|
|
{
|
974 |
|
|
__asm__ __volatile__ ("lfetch [%0]" : : "r"(x));
|
975 |
|
|
}
|
976 |
|
|
|
977 |
|
|
extern inline void
|
978 |
|
|
prefetchw (const void *x)
|
979 |
|
|
{
|
980 |
|
|
__asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x));
|
981 |
|
|
}
|
982 |
|
|
|
983 |
|
|
#define spin_lock_prefetch(x) prefetchw(x)
|
984 |
|
|
|
985 |
|
|
#endif /* !__ASSEMBLY__ */
|
986 |
|
|
|
987 |
|
|
#endif /* _ASM_IA64_PROCESSOR_H */
|