1 |
1275 |
phoenix |
#ifndef _ASM_IA64_SYSTEM_H
|
2 |
|
|
#define _ASM_IA64_SYSTEM_H
|
3 |
|
|
|
4 |
|
|
/*
|
5 |
|
|
* System defines. Note that this is included both from .c and .S
|
6 |
|
|
* files, so it does only defines, not any C code. This is based
|
7 |
|
|
* on information published in the Processor Abstraction Layer
|
8 |
|
|
* and the System Abstraction Layer manual.
|
9 |
|
|
*
|
10 |
|
|
* Copyright (C) 1998-2002 Hewlett-Packard Co
|
11 |
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
12 |
|
|
* Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
|
13 |
|
|
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
|
14 |
|
|
*/
|
15 |
|
|
#include <linux/config.h>
|
16 |
|
|
|
17 |
|
|
#include <asm/kregs.h>
|
18 |
|
|
#include <asm/page.h>
|
19 |
|
|
#include <asm/pal.h>
|
20 |
|
|
|
21 |
|
|
#define KERNEL_START (PAGE_OFFSET + 68*1024*1024)
|
22 |
|
|
|
23 |
|
|
#define GATE_ADDR (0xa000000000000000 + PAGE_SIZE)
|
24 |
|
|
#define PERCPU_ADDR (0xa000000000000000 + 2*PAGE_SIZE)
|
25 |
|
|
|
26 |
|
|
#ifndef __ASSEMBLY__
|
27 |
|
|
|
28 |
|
|
#include <linux/kernel.h>
|
29 |
|
|
#include <linux/types.h>
|
30 |
|
|
|
31 |
|
|
struct pci_vector_struct {
|
32 |
|
|
__u16 segment; /* PCI Segment number */
|
33 |
|
|
__u16 bus; /* PCI Bus number */
|
34 |
|
|
__u32 pci_id; /* ACPI split 16 bits device, 16 bits function (see section 6.1.1) */
|
35 |
|
|
__u8 pin; /* PCI PIN (0 = A, 1 = B, 2 = C, 3 = D) */
|
36 |
|
|
__u32 irq; /* IRQ assigned */
|
37 |
|
|
};
|
38 |
|
|
|
39 |
|
|
extern struct ia64_boot_param {
|
40 |
|
|
__u64 command_line; /* physical address of command line arguments */
|
41 |
|
|
__u64 efi_systab; /* physical address of EFI system table */
|
42 |
|
|
__u64 efi_memmap; /* physical address of EFI memory map */
|
43 |
|
|
__u64 efi_memmap_size; /* size of EFI memory map */
|
44 |
|
|
__u64 efi_memdesc_size; /* size of an EFI memory map descriptor */
|
45 |
|
|
__u32 efi_memdesc_version; /* memory descriptor version */
|
46 |
|
|
struct {
|
47 |
|
|
__u16 num_cols; /* number of columns on console output device */
|
48 |
|
|
__u16 num_rows; /* number of rows on console output device */
|
49 |
|
|
__u16 orig_x; /* cursor's x position */
|
50 |
|
|
__u16 orig_y; /* cursor's y position */
|
51 |
|
|
} console_info;
|
52 |
|
|
__u64 fpswa; /* physical address of the fpswa interface */
|
53 |
|
|
__u64 initrd_start;
|
54 |
|
|
__u64 initrd_size;
|
55 |
|
|
} *ia64_boot_param;
|
56 |
|
|
|
57 |
|
|
static inline void
|
58 |
|
|
ia64_insn_group_barrier (void)
|
59 |
|
|
{
|
60 |
|
|
__asm__ __volatile__ (";;" ::: "memory");
|
61 |
|
|
}
|
62 |
|
|
|
63 |
|
|
/*
|
64 |
|
|
* Macros to force memory ordering. In these descriptions, "previous"
|
65 |
|
|
* and "subsequent" refer to program order; "visible" means that all
|
66 |
|
|
* architecturally visible effects of a memory access have occurred
|
67 |
|
|
* (at a minimum, this means the memory has been read or written).
|
68 |
|
|
*
|
69 |
|
|
* wmb(): Guarantees that all preceding stores to memory-
|
70 |
|
|
* like regions are visible before any subsequent
|
71 |
|
|
* stores and that all following stores will be
|
72 |
|
|
* visible only after all previous stores.
|
73 |
|
|
* rmb(): Like wmb(), but for reads.
|
74 |
|
|
* mb(): wmb()/rmb() combo, i.e., all previous memory
|
75 |
|
|
* accesses are visible before all subsequent
|
76 |
|
|
* accesses and vice versa. This is also known as
|
77 |
|
|
* a "fence."
|
78 |
|
|
*
|
79 |
|
|
* Note: "mb()" and its variants cannot be used as a fence to order
|
80 |
|
|
* accesses to memory mapped I/O registers. For that, mf.a needs to
|
81 |
|
|
* be used. However, we don't want to always use mf.a because (a)
|
82 |
|
|
* it's (presumably) much slower than mf and (b) mf.a is supported for
|
83 |
|
|
* sequential memory pages only.
|
84 |
|
|
*/
|
85 |
|
|
#define mb() __asm__ __volatile__ ("mf" ::: "memory")
|
86 |
|
|
#define rmb() mb()
|
87 |
|
|
#define wmb() mb()
|
88 |
|
|
|
89 |
|
|
#ifdef CONFIG_SMP
|
90 |
|
|
# define smp_mb() mb()
|
91 |
|
|
# define smp_rmb() rmb()
|
92 |
|
|
# define smp_wmb() wmb()
|
93 |
|
|
#else
|
94 |
|
|
# define smp_mb() barrier()
|
95 |
|
|
# define smp_rmb() barrier()
|
96 |
|
|
# define smp_wmb() barrier()
|
97 |
|
|
#endif
|
98 |
|
|
|
99 |
|
|
/*
|
100 |
|
|
* XXX check on these---I suspect what Linus really wants here is
|
101 |
|
|
* acquire vs release semantics but we can't discuss this stuff with
|
102 |
|
|
* Linus just yet. Grrr...
|
103 |
|
|
*/
|
104 |
|
|
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
|
105 |
|
|
#define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
|
106 |
|
|
|
107 |
|
|
#define safe_halt() ia64_pal_halt_light() /* PAL_HALT_LIGHT */
|
108 |
|
|
|
109 |
|
|
/*
|
110 |
|
|
* The group barrier in front of the rsm & ssm are necessary to ensure
|
111 |
|
|
* that none of the previous instructions in the same group are
|
112 |
|
|
* affected by the rsm/ssm.
|
113 |
|
|
*/
|
114 |
|
|
/* For spinlocks etc */
|
115 |
|
|
|
116 |
|
|
#ifdef CONFIG_IA64_DEBUG_IRQ
|
117 |
|
|
|
118 |
|
|
extern unsigned long last_cli_ip;
|
119 |
|
|
|
120 |
|
|
# define local_irq_save(x) \
|
121 |
|
|
do { \
|
122 |
|
|
unsigned long ip, psr; \
|
123 |
|
|
\
|
124 |
|
|
__asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \
|
125 |
|
|
if (psr & IA64_PSR_I) { \
|
126 |
|
|
__asm__ ("mov %0=ip" : "=r"(ip)); \
|
127 |
|
|
last_cli_ip = ip; \
|
128 |
|
|
} \
|
129 |
|
|
(x) = psr; \
|
130 |
|
|
} while (0)
|
131 |
|
|
|
132 |
|
|
# define local_irq_disable() \
|
133 |
|
|
do { \
|
134 |
|
|
unsigned long ip, psr; \
|
135 |
|
|
\
|
136 |
|
|
__asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" : "=r" (psr) :: "memory"); \
|
137 |
|
|
if (psr & IA64_PSR_I) { \
|
138 |
|
|
__asm__ ("mov %0=ip" : "=r"(ip)); \
|
139 |
|
|
last_cli_ip = ip; \
|
140 |
|
|
} \
|
141 |
|
|
} while (0)
|
142 |
|
|
|
143 |
|
|
# define local_irq_set(x) \
|
144 |
|
|
do { \
|
145 |
|
|
unsigned long psr; \
|
146 |
|
|
\
|
147 |
|
|
__asm__ __volatile__ ("mov %0=psr;;" \
|
148 |
|
|
"ssm psr.i;;" \
|
149 |
|
|
"srlz.d" \
|
150 |
|
|
: "=r" (psr) :: "memory"); \
|
151 |
|
|
(x) = psr; \
|
152 |
|
|
} while (0)
|
153 |
|
|
|
154 |
|
|
# define local_irq_restore(x) \
|
155 |
|
|
do { \
|
156 |
|
|
unsigned long ip, old_psr, psr = (x); \
|
157 |
|
|
\
|
158 |
|
|
__asm__ __volatile__ ("mov %0=psr;" \
|
159 |
|
|
"cmp.ne p6,p7=%1,r0;;" \
|
160 |
|
|
"(p6) ssm psr.i;" \
|
161 |
|
|
"(p7) rsm psr.i;;" \
|
162 |
|
|
"(p6) srlz.d" \
|
163 |
|
|
: "=&r" (old_psr) : "r"((psr) & IA64_PSR_I) \
|
164 |
|
|
: "p6", "p7", "memory"); \
|
165 |
|
|
if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I)) { \
|
166 |
|
|
__asm__ ("mov %0=ip" : "=r"(ip)); \
|
167 |
|
|
last_cli_ip = ip; \
|
168 |
|
|
} \
|
169 |
|
|
} while (0)
|
170 |
|
|
|
171 |
|
|
#else /* !CONFIG_IA64_DEBUG_IRQ */
|
172 |
|
|
/* clearing of psr.i is implicitly serialized (visible by next insn) */
|
173 |
|
|
# define local_irq_save(x) __asm__ __volatile__ ("mov %0=psr;; rsm psr.i;;" \
|
174 |
|
|
: "=r" (x) :: "memory")
|
175 |
|
|
# define local_irq_disable() __asm__ __volatile__ (";; rsm psr.i;;" ::: "memory")
|
176 |
|
|
/* (potentially) setting psr.i requires data serialization: */
|
177 |
|
|
# define local_irq_set(x) __asm__ __volatile__ ("mov %0=psr;;" \
|
178 |
|
|
"ssm psr.i;;" \
|
179 |
|
|
"srlz.d" \
|
180 |
|
|
: "=r" (x) :: "memory")
|
181 |
|
|
# define local_irq_restore(x) __asm__ __volatile__ ("cmp.ne p6,p7=%0,r0;;" \
|
182 |
|
|
"(p6) ssm psr.i;" \
|
183 |
|
|
"(p7) rsm psr.i;;" \
|
184 |
|
|
"srlz.d" \
|
185 |
|
|
:: "r"((x) & IA64_PSR_I) \
|
186 |
|
|
: "p6", "p7", "memory")
|
187 |
|
|
#endif /* !CONFIG_IA64_DEBUG_IRQ */
|
188 |
|
|
|
189 |
|
|
#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
|
190 |
|
|
|
191 |
|
|
#define __cli() local_irq_disable ()
|
192 |
|
|
#define __save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
|
193 |
|
|
#define __save_and_cli(flags) local_irq_save(flags)
|
194 |
|
|
#define __save_and_sti(flags) local_irq_set(flags)
|
195 |
|
|
#define save_and_cli(flags) __save_and_cli(flags)
|
196 |
|
|
#define save_and_sti(flags) __save_and_sti(flags)
|
197 |
|
|
#define __sti() local_irq_enable ()
|
198 |
|
|
#define __restore_flags(flags) local_irq_restore(flags)
|
199 |
|
|
|
200 |
|
|
#ifdef CONFIG_SMP
|
201 |
|
|
extern void __global_cli (void);
|
202 |
|
|
extern void __global_sti (void);
|
203 |
|
|
extern unsigned long __global_save_flags (void);
|
204 |
|
|
extern void __global_restore_flags (unsigned long);
|
205 |
|
|
# define cli() __global_cli()
|
206 |
|
|
# define sti() __global_sti()
|
207 |
|
|
# define save_flags(flags) ((flags) = __global_save_flags())
|
208 |
|
|
# define restore_flags(flags) __global_restore_flags(flags)
|
209 |
|
|
#else /* !CONFIG_SMP */
|
210 |
|
|
# define cli() __cli()
|
211 |
|
|
# define sti() __sti()
|
212 |
|
|
# define save_flags(flags) __save_flags(flags)
|
213 |
|
|
# define restore_flags(flags) __restore_flags(flags)
|
214 |
|
|
#endif /* !CONFIG_SMP */
|
215 |
|
|
|
216 |
|
|
#ifdef __KERNEL__
|
217 |
|
|
|
218 |
|
|
#define prepare_to_switch() local_irq_disable()
|
219 |
|
|
|
220 |
|
|
#ifdef CONFIG_IA32_SUPPORT
|
221 |
|
|
# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
|
222 |
|
|
#else
|
223 |
|
|
# define IS_IA32_PROCESS(regs) 0
|
224 |
|
|
#endif
|
225 |
|
|
|
226 |
|
|
/*
|
227 |
|
|
* Context switch from one thread to another. If the two threads have
|
228 |
|
|
* different address spaces, schedule() has already taken care of
|
229 |
|
|
* switching to the new address space by calling switch_mm().
|
230 |
|
|
*
|
231 |
|
|
* Disabling access to the fph partition and the debug-register
|
232 |
|
|
* context switch MUST be done before calling ia64_switch_to() since a
|
233 |
|
|
* newly created thread returns directly to
|
234 |
|
|
* ia64_ret_from_syscall_clear_r8.
|
235 |
|
|
*/
|
236 |
|
|
extern struct task_struct *ia64_switch_to (void *next_task);
|
237 |
|
|
|
238 |
|
|
extern void ia64_save_extra (struct task_struct *task);
|
239 |
|
|
extern void ia64_load_extra (struct task_struct *task);
|
240 |
|
|
|
241 |
|
|
#ifdef CONFIG_PERFMON
|
242 |
|
|
# define PERFMON_IS_SYSWIDE() (local_cpu_data->pfm_syst_info & 0x1)
|
243 |
|
|
#else
|
244 |
|
|
# define PERFMON_IS_SYSWIDE() (0)
|
245 |
|
|
#endif
|
246 |
|
|
|
247 |
|
|
#define IA64_HAS_EXTRA_STATE(t) \
|
248 |
|
|
((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
|
249 |
|
|
|| IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
|
250 |
|
|
|
251 |
|
|
#define __switch_to(prev,next,last) do { \
|
252 |
|
|
if (IA64_HAS_EXTRA_STATE(prev)) \
|
253 |
|
|
ia64_save_extra(prev); \
|
254 |
|
|
if (IA64_HAS_EXTRA_STATE(next)) \
|
255 |
|
|
ia64_load_extra(next); \
|
256 |
|
|
ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
|
257 |
|
|
(last) = ia64_switch_to((next)); \
|
258 |
|
|
} while (0)
|
259 |
|
|
|
260 |
|
|
#ifdef CONFIG_SMP
|
261 |
|
|
|
262 |
|
|
/* Return true if this CPU can call the console drivers in printk() */
|
263 |
|
|
#define arch_consoles_callable() (cpu_online_map & (1UL << smp_processor_id()))
|
264 |
|
|
|
265 |
|
|
/*
|
266 |
|
|
* In the SMP case, we save the fph state when context-switching away from a thread that
|
267 |
|
|
* modified fph. This way, when the thread gets scheduled on another CPU, the CPU can
|
268 |
|
|
* pick up the state from task->thread.fph, avoiding the complication of having to fetch
|
269 |
|
|
* the latest fph state from another CPU. In other words: eager save, lazy restore.
|
270 |
|
|
*/
|
271 |
|
|
# define switch_to(prev,next,last) do { \
|
272 |
|
|
if (ia64_psr(ia64_task_regs(prev))->mfh) { \
|
273 |
|
|
ia64_psr(ia64_task_regs(prev))->mfh = 0; \
|
274 |
|
|
(prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
|
275 |
|
|
__ia64_save_fpu((prev)->thread.fph); \
|
276 |
|
|
} \
|
277 |
|
|
__switch_to(prev, next, last); \
|
278 |
|
|
} while (0)
|
279 |
|
|
#else
|
280 |
|
|
# define switch_to(prev,next,last) __switch_to(prev, next, last)
|
281 |
|
|
#endif
|
282 |
|
|
|
283 |
|
|
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
|
284 |
|
|
|
285 |
|
|
#endif /* __KERNEL__ */
|
286 |
|
|
|
287 |
|
|
#endif /* __ASSEMBLY__ */
|
288 |
|
|
|
289 |
|
|
#endif /* _ASM_IA64_SYSTEM_H */
|