1 |
2 |
alfik |
/////////////////////////////////////////////////////////////////////////
|
2 |
|
|
// $Id: init.cc 11674 2013-04-09 15:43:15Z sshwarts $
|
3 |
|
|
/////////////////////////////////////////////////////////////////////////
|
4 |
|
|
//
|
5 |
|
|
// Copyright (C) 2001-2012 The Bochs Project
|
6 |
|
|
//
|
7 |
|
|
// This library is free software; you can redistribute it and/or
|
8 |
|
|
// modify it under the terms of the GNU Lesser General Public
|
9 |
|
|
// License as published by the Free Software Foundation; either
|
10 |
|
|
// version 2 of the License, or (at your option) any later version.
|
11 |
|
|
//
|
12 |
|
|
// This library is distributed in the hope that it will be useful,
|
13 |
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
|
|
// Lesser General Public License for more details.
|
16 |
|
|
//
|
17 |
|
|
// You should have received a copy of the GNU Lesser General Public
|
18 |
|
|
// License along with this library; if not, write to the Free Software
|
19 |
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA Â 02110-1301 USA
|
20 |
|
|
//
|
21 |
|
|
/////////////////////////////////////////////////////////////////////////
|
22 |
|
|
|
23 |
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
24 |
|
|
#include "bochs.h"
|
25 |
|
|
#include "cpu.h"
|
26 |
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
27 |
|
|
|
28 |
|
|
#include "param_names.h"
|
29 |
|
|
|
30 |
|
|
#include <stdlib.h>
|
31 |
|
|
|
32 |
|
|
BX_CPU_C::BX_CPU_C(unsigned id): bx_cpuid(id)
|
33 |
|
|
#if BX_CPU_LEVEL >= 4
|
34 |
|
|
, cpuid(NULL)
|
35 |
|
|
#endif
|
36 |
|
|
#if BX_SUPPORT_APIC
|
37 |
|
|
,lapic (this, id)
|
38 |
|
|
#endif
|
39 |
|
|
{
|
40 |
|
|
// in case of SMF, you cannot reference any member data
|
41 |
|
|
// in the constructor because the only access to it is via
|
42 |
|
|
// global variables which aren't initialized quite yet.
|
43 |
|
|
char name[16], logname[16];
|
44 |
|
|
sprintf(name, "CPU%x", bx_cpuid);
|
45 |
|
|
sprintf(logname, "cpu%x", bx_cpuid);
|
46 |
|
|
put(logname, name);
|
47 |
|
|
|
48 |
|
|
isa_extensions_bitmask = BX_SUPPORT_FPU ? BX_ISA_X87 : 0;
|
49 |
|
|
cpu_extensions_bitmask = 0;
|
50 |
|
|
#if BX_SUPPORT_VMX
|
51 |
|
|
vmx_extensions_bitmask = 0;
|
52 |
|
|
#endif
|
53 |
|
|
#if BX_SUPPORT_SVM
|
54 |
|
|
svm_extensions_bitmask = 0;
|
55 |
|
|
#endif
|
56 |
|
|
|
57 |
|
|
srand(time(NULL)); // initialize random generator for RDRAND/RDSEED
|
58 |
|
|
}
|
59 |
|
|
|
60 |
|
|
#if BX_CPU_LEVEL >= 4
|
61 |
|
|
|
62 |
|
|
#include "generic_cpuid.h"
|
63 |
|
|
|
64 |
|
|
#define bx_define_cpudb(model) \
|
65 |
|
|
extern bx_cpuid_t *create_ ##model##_cpuid(BX_CPU_C *cpu);
|
66 |
|
|
|
67 |
|
|
#include "cpudb.h"
|
68 |
|
|
|
69 |
|
|
#undef bx_define_cpudb
|
70 |
|
|
|
71 |
|
|
static bx_cpuid_t *cpuid_factory(BX_CPU_C *cpu)
|
72 |
|
|
{
|
73 |
|
|
unsigned cpu_model = SIM->get_param_enum(BXPN_CPU_MODEL)->get();
|
74 |
|
|
|
75 |
|
|
#define bx_define_cpudb(model) \
|
76 |
|
|
case bx_cpudb_##model: \
|
77 |
|
|
return create_ ##model##_cpuid(cpu);
|
78 |
|
|
|
79 |
|
|
switch(cpu_model) {
|
80 |
|
|
#include "cpudb.h"
|
81 |
|
|
default:
|
82 |
|
|
return 0;
|
83 |
|
|
}
|
84 |
|
|
#undef bx_define_cpudb
|
85 |
|
|
}
|
86 |
|
|
|
87 |
|
|
#endif
|
88 |
|
|
|
89 |
|
|
// BX_CPU_C constructor
|
90 |
|
|
void BX_CPU_C::initialize(void)
|
91 |
|
|
{
|
92 |
|
|
#if BX_CPU_LEVEL >= 4
|
93 |
|
|
BX_CPU_THIS_PTR cpuid = cpuid_factory(this);
|
94 |
|
|
|
95 |
|
|
if (! BX_CPU_THIS_PTR cpuid)
|
96 |
|
|
BX_PANIC(("Failed to create CPUID module !"));
|
97 |
|
|
|
98 |
|
|
BX_CPU_THIS_PTR isa_extensions_bitmask = cpuid->get_isa_extensions_bitmask();
|
99 |
|
|
BX_CPU_THIS_PTR cpu_extensions_bitmask = cpuid->get_cpu_extensions_bitmask();
|
100 |
|
|
#if BX_SUPPORT_VMX
|
101 |
|
|
BX_CPU_THIS_PTR vmx_extensions_bitmask = cpuid->get_vmx_extensions_bitmask();
|
102 |
|
|
#endif
|
103 |
|
|
#if BX_SUPPORT_SVM
|
104 |
|
|
BX_CPU_THIS_PTR svm_extensions_bitmask = cpuid->get_svm_extensions_bitmask();
|
105 |
|
|
#endif
|
106 |
|
|
#endif
|
107 |
|
|
|
108 |
|
|
init_FetchDecodeTables(); // must be called after init_isa_features_bitmask()
|
109 |
|
|
|
110 |
|
|
#if BX_CONFIGURE_MSRS
|
111 |
|
|
for (unsigned n=0; n < BX_MSR_MAX_INDEX; n++) {
|
112 |
|
|
BX_CPU_THIS_PTR msrs[n] = 0;
|
113 |
|
|
}
|
114 |
|
|
const char *msrs_filename = SIM->get_param_string(BXPN_CONFIGURABLE_MSRS_PATH)->getptr();
|
115 |
|
|
load_MSRs(msrs_filename);
|
116 |
|
|
#endif
|
117 |
|
|
|
118 |
|
|
// ignore bad MSRS if user asked for it
|
119 |
|
|
#if BX_CPU_LEVEL >= 5
|
120 |
|
|
BX_CPU_THIS_PTR ignore_bad_msrs = SIM->get_param_bool(BXPN_IGNORE_BAD_MSRS)->get();
|
121 |
|
|
#endif
|
122 |
|
|
|
123 |
|
|
init_SMRAM();
|
124 |
|
|
|
125 |
|
|
#if BX_SUPPORT_VMX
|
126 |
|
|
init_VMCS();
|
127 |
|
|
#endif
|
128 |
|
|
}
|
129 |
|
|
|
130 |
|
|
// save/restore functionality
|
131 |
|
|
void BX_CPU_C::register_state(void)
|
132 |
|
|
{
|
133 |
|
|
unsigned n;
|
134 |
|
|
char name[10];
|
135 |
|
|
|
136 |
|
|
sprintf(name, "cpu%d", BX_CPU_ID);
|
137 |
|
|
|
138 |
|
|
bx_list_c *cpu = new bx_list_c(SIM->get_bochs_root(), name, name);
|
139 |
|
|
|
140 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, isa_extensions_bitmask);
|
141 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, cpu_extensions_bitmask);
|
142 |
|
|
#if BX_SUPPORT_VMX
|
143 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, vmx_extensions_bitmask);
|
144 |
|
|
#endif
|
145 |
|
|
#if BX_SUPPORT_SVM
|
146 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, svm_extensions_bitmask);
|
147 |
|
|
#endif
|
148 |
|
|
BXRS_DEC_PARAM_SIMPLE(cpu, cpu_mode);
|
149 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, activity_state);
|
150 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, inhibit_mask);
|
151 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, inhibit_icount);
|
152 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, debug_trap);
|
153 |
|
|
BXRS_DEC_PARAM_SIMPLE(cpu, icount);
|
154 |
|
|
BXRS_DEC_PARAM_SIMPLE(cpu, icount_last_sync);
|
155 |
|
|
#if BX_SUPPORT_X86_64
|
156 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, RAX);
|
157 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, RBX);
|
158 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, RCX);
|
159 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, RDX);
|
160 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, RSP);
|
161 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, RBP);
|
162 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, RSI);
|
163 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, RDI);
|
164 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, R8);
|
165 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, R9);
|
166 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, R10);
|
167 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, R11);
|
168 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, R12);
|
169 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, R13);
|
170 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, R14);
|
171 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, R15);
|
172 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, RIP);
|
173 |
|
|
#else
|
174 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, EAX);
|
175 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, EBX);
|
176 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, ECX);
|
177 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, EDX);
|
178 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, ESP);
|
179 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, EBP);
|
180 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, ESI);
|
181 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, EDI);
|
182 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, EIP);
|
183 |
|
|
#endif
|
184 |
|
|
BXRS_PARAM_SPECIAL32(cpu, EFLAGS,
|
185 |
|
|
param_save_handler, param_restore_handler);
|
186 |
|
|
#if BX_CPU_LEVEL >= 3
|
187 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, DR0, dr[0]);
|
188 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, DR1, dr[1]);
|
189 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, DR2, dr[2]);
|
190 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, DR3, dr[3]);
|
191 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, DR6, dr6.val32);
|
192 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, DR7, dr7.val32);
|
193 |
|
|
#endif
|
194 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, CR0, cr0.val32);
|
195 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, CR2, cr2);
|
196 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, CR3, cr3);
|
197 |
|
|
#if BX_CPU_LEVEL >= 5
|
198 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, CR4, cr4.val32);
|
199 |
|
|
#endif
|
200 |
|
|
#if BX_CPU_LEVEL >= 6
|
201 |
|
|
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_XSAVE)) {
|
202 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, XCR0, xcr0.val32);
|
203 |
|
|
}
|
204 |
|
|
#endif
|
205 |
|
|
#if BX_CPU_LEVEL >= 5
|
206 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, tsc_last_reset, tsc_last_reset);
|
207 |
|
|
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM
|
208 |
|
|
BXRS_HEX_PARAM_FIELD(cpu, tsc_offset, tsc_offset);
|
209 |
|
|
#endif
|
210 |
|
|
#endif
|
211 |
|
|
for(n=0; n<6; n++) {
|
212 |
|
|
bx_segment_reg_t *segment = &BX_CPU_THIS_PTR sregs[n];
|
213 |
|
|
bx_list_c *sreg = new bx_list_c(cpu, strseg(segment));
|
214 |
|
|
BXRS_PARAM_SPECIAL16(sreg, selector,
|
215 |
|
|
param_save_handler, param_restore_handler);
|
216 |
|
|
BXRS_HEX_PARAM_FIELD(sreg, valid, segment->cache.valid);
|
217 |
|
|
BXRS_PARAM_BOOL(sreg, p, segment->cache.p);
|
218 |
|
|
BXRS_HEX_PARAM_FIELD(sreg, dpl, segment->cache.dpl);
|
219 |
|
|
BXRS_PARAM_BOOL(sreg, segment, segment->cache.segment);
|
220 |
|
|
BXRS_HEX_PARAM_FIELD(sreg, type, segment->cache.type);
|
221 |
|
|
BXRS_HEX_PARAM_FIELD(sreg, base, segment->cache.u.segment.base);
|
222 |
|
|
BXRS_HEX_PARAM_FIELD(sreg, limit_scaled, segment->cache.u.segment.limit_scaled);
|
223 |
|
|
BXRS_PARAM_BOOL(sreg, granularity, segment->cache.u.segment.g);
|
224 |
|
|
BXRS_PARAM_BOOL(sreg, d_b, segment->cache.u.segment.d_b);
|
225 |
|
|
#if BX_SUPPORT_X86_64
|
226 |
|
|
BXRS_PARAM_BOOL(sreg, l, segment->cache.u.segment.l);
|
227 |
|
|
#endif
|
228 |
|
|
BXRS_PARAM_BOOL(sreg, avl, segment->cache.u.segment.avl);
|
229 |
|
|
}
|
230 |
|
|
|
231 |
|
|
bx_list_c *GDTR = new bx_list_c(cpu, "GDTR");
|
232 |
|
|
BXRS_HEX_PARAM_FIELD(GDTR, base, gdtr.base);
|
233 |
|
|
BXRS_HEX_PARAM_FIELD(GDTR, limit, gdtr.limit);
|
234 |
|
|
|
235 |
|
|
bx_list_c *IDTR = new bx_list_c(cpu, "IDTR");
|
236 |
|
|
BXRS_HEX_PARAM_FIELD(IDTR, base, idtr.base);
|
237 |
|
|
BXRS_HEX_PARAM_FIELD(IDTR, limit, idtr.limit);
|
238 |
|
|
|
239 |
|
|
bx_list_c *LDTR = new bx_list_c(cpu, "LDTR");
|
240 |
|
|
BXRS_PARAM_SPECIAL16(LDTR, selector, param_save_handler, param_restore_handler);
|
241 |
|
|
BXRS_HEX_PARAM_FIELD(LDTR, valid, ldtr.cache.valid);
|
242 |
|
|
BXRS_PARAM_BOOL(LDTR, p, ldtr.cache.p);
|
243 |
|
|
BXRS_HEX_PARAM_FIELD(LDTR, dpl, ldtr.cache.dpl);
|
244 |
|
|
BXRS_PARAM_BOOL(LDTR, segment, ldtr.cache.segment);
|
245 |
|
|
BXRS_HEX_PARAM_FIELD(LDTR, type, ldtr.cache.type);
|
246 |
|
|
BXRS_HEX_PARAM_FIELD(LDTR, base, ldtr.cache.u.segment.base);
|
247 |
|
|
BXRS_HEX_PARAM_FIELD(LDTR, limit_scaled, ldtr.cache.u.segment.limit_scaled);
|
248 |
|
|
BXRS_PARAM_BOOL(LDTR, granularity, ldtr.cache.u.segment.g);
|
249 |
|
|
BXRS_PARAM_BOOL(LDTR, d_b, ldtr.cache.u.segment.d_b);
|
250 |
|
|
BXRS_PARAM_BOOL(LDTR, avl, ldtr.cache.u.segment.avl);
|
251 |
|
|
|
252 |
|
|
bx_list_c *TR = new bx_list_c(cpu, "TR");
|
253 |
|
|
BXRS_PARAM_SPECIAL16(TR, selector, param_save_handler, param_restore_handler);
|
254 |
|
|
BXRS_HEX_PARAM_FIELD(TR, valid, tr.cache.valid);
|
255 |
|
|
BXRS_PARAM_BOOL(TR, p, tr.cache.p);
|
256 |
|
|
BXRS_HEX_PARAM_FIELD(TR, dpl, tr.cache.dpl);
|
257 |
|
|
BXRS_PARAM_BOOL(TR, segment, tr.cache.segment);
|
258 |
|
|
BXRS_HEX_PARAM_FIELD(TR, type, tr.cache.type);
|
259 |
|
|
BXRS_HEX_PARAM_FIELD(TR, base, tr.cache.u.segment.base);
|
260 |
|
|
BXRS_HEX_PARAM_FIELD(TR, limit_scaled, tr.cache.u.segment.limit_scaled);
|
261 |
|
|
BXRS_PARAM_BOOL(TR, granularity, tr.cache.u.segment.g);
|
262 |
|
|
BXRS_PARAM_BOOL(TR, d_b, tr.cache.u.segment.d_b);
|
263 |
|
|
BXRS_PARAM_BOOL(TR, avl, tr.cache.u.segment.avl);
|
264 |
|
|
|
265 |
|
|
BXRS_HEX_PARAM_SIMPLE(cpu, smbase);
|
266 |
|
|
|
267 |
|
|
#if BX_CPU_LEVEL >= 6
|
268 |
|
|
bx_list_c *PDPTRS = new bx_list_c(cpu, "PDPTR_CACHE");
|
269 |
|
|
BXRS_HEX_PARAM_FIELD(PDPTRS, entry0, PDPTR_CACHE.entry[0]);
|
270 |
|
|
BXRS_HEX_PARAM_FIELD(PDPTRS, entry1, PDPTR_CACHE.entry[1]);
|
271 |
|
|
BXRS_HEX_PARAM_FIELD(PDPTRS, entry2, PDPTR_CACHE.entry[2]);
|
272 |
|
|
BXRS_HEX_PARAM_FIELD(PDPTRS, entry3, PDPTR_CACHE.entry[3]);
|
273 |
|
|
#endif
|
274 |
|
|
|
275 |
|
|
#if BX_CPU_LEVEL >= 5
|
276 |
|
|
bx_list_c *MSR = new bx_list_c(cpu, "MSR");
|
277 |
|
|
|
278 |
|
|
#if BX_SUPPORT_APIC
|
279 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, apicbase, msr.apicbase);
|
280 |
|
|
#endif
|
281 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, EFER, efer.val32);
|
282 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, star, msr.star);
|
283 |
|
|
#if BX_SUPPORT_X86_64
|
284 |
|
|
if (BX_CPUID_SUPPORT_CPU_EXTENSION(BX_CPU_LONG_MODE)) {
|
285 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, lstar, msr.lstar);
|
286 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, cstar, msr.cstar);
|
287 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, fmask, msr.fmask);
|
288 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, kernelgsbase, msr.kernelgsbase);
|
289 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, tsc_aux, msr.tsc_aux);
|
290 |
|
|
}
|
291 |
|
|
#endif
|
292 |
|
|
#if BX_CPU_LEVEL >= 6
|
293 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, sysenter_cs_msr, msr.sysenter_cs_msr);
|
294 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, sysenter_esp_msr, msr.sysenter_esp_msr);
|
295 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, sysenter_eip_msr, msr.sysenter_eip_msr);
|
296 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase0, msr.mtrrphys[0]);
|
297 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask0, msr.mtrrphys[1]);
|
298 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase1, msr.mtrrphys[2]);
|
299 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask1, msr.mtrrphys[3]);
|
300 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase2, msr.mtrrphys[4]);
|
301 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask2, msr.mtrrphys[5]);
|
302 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase3, msr.mtrrphys[6]);
|
303 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask3, msr.mtrrphys[7]);
|
304 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase4, msr.mtrrphys[8]);
|
305 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask4, msr.mtrrphys[9]);
|
306 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase5, msr.mtrrphys[10]);
|
307 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask5, msr.mtrrphys[11]);
|
308 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase6, msr.mtrrphys[12]);
|
309 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask6, msr.mtrrphys[13]);
|
310 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysbase7, msr.mtrrphys[14]);
|
311 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrphysmask7, msr.mtrrphys[15]);
|
312 |
|
|
|
313 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix64k_00000, msr.mtrrfix64k_00000);
|
314 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix16k_80000, msr.mtrrfix16k[0]);
|
315 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix16k_a0000, msr.mtrrfix16k[1]);
|
316 |
|
|
|
317 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_c0000, msr.mtrrfix4k[0]);
|
318 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_c8000, msr.mtrrfix4k[1]);
|
319 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_d0000, msr.mtrrfix4k[2]);
|
320 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_d8000, msr.mtrrfix4k[3]);
|
321 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_e0000, msr.mtrrfix4k[4]);
|
322 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_e8000, msr.mtrrfix4k[5]);
|
323 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_f0000, msr.mtrrfix4k[6]);
|
324 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrrfix4k_f8000, msr.mtrrfix4k[7]);
|
325 |
|
|
|
326 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, pat, msr.pat);
|
327 |
|
|
BXRS_HEX_PARAM_FIELD(MSR, mtrr_deftype, msr.mtrr_deftype);
|
328 |
|
|
#endif
|
329 |
|
|
#if BX_CONFIGURE_MSRS
|
330 |
|
|
bx_list_c *MSRS = new bx_list_c(cpu, "USER_MSR");
|
331 |
|
|
for(n=0; n < BX_MSR_MAX_INDEX; n++) {
|
332 |
|
|
if (! msrs[n]) continue;
|
333 |
|
|
sprintf(name, "msr_0x%03x", n);
|
334 |
|
|
bx_list_c *m = new bx_list_c(MSRS, name);
|
335 |
|
|
BXRS_HEX_PARAM_FIELD(m, index, msrs[n]->index);
|
336 |
|
|
BXRS_DEC_PARAM_FIELD(m, type, msrs[n]->type);
|
337 |
|
|
BXRS_HEX_PARAM_FIELD(m, val64, msrs[n]->val64);
|
338 |
|
|
BXRS_HEX_PARAM_FIELD(m, reset, msrs[n]->reset_value);
|
339 |
|
|
BXRS_HEX_PARAM_FIELD(m, reserved, msrs[n]->reserved);
|
340 |
|
|
BXRS_HEX_PARAM_FIELD(m, ignored, msrs[n]->ignored);
|
341 |
|
|
}
|
342 |
|
|
#endif
|
343 |
|
|
#endif
|
344 |
|
|
|
345 |
|
|
#if BX_SUPPORT_FPU
|
346 |
|
|
bx_list_c *fpu = new bx_list_c(cpu, "FPU");
|
347 |
|
|
BXRS_HEX_PARAM_FIELD(fpu, cwd, the_i387.cwd);
|
348 |
|
|
BXRS_HEX_PARAM_FIELD(fpu, swd, the_i387.swd);
|
349 |
|
|
BXRS_HEX_PARAM_FIELD(fpu, twd, the_i387.twd);
|
350 |
|
|
BXRS_HEX_PARAM_FIELD(fpu, foo, the_i387.foo);
|
351 |
|
|
BXRS_HEX_PARAM_FIELD(fpu, fcs, the_i387.fcs);
|
352 |
|
|
BXRS_HEX_PARAM_FIELD(fpu, fip, the_i387.fip);
|
353 |
|
|
BXRS_HEX_PARAM_FIELD(fpu, fds, the_i387.fds);
|
354 |
|
|
BXRS_HEX_PARAM_FIELD(fpu, fdp, the_i387.fdp);
|
355 |
|
|
for (n=0; n<8; n++) {
|
356 |
|
|
sprintf(name, "st%d", n);
|
357 |
|
|
bx_list_c *STx = new bx_list_c(fpu, name);
|
358 |
|
|
BXRS_HEX_PARAM_FIELD(STx, exp, the_i387.st_space[n].exp);
|
359 |
|
|
BXRS_HEX_PARAM_FIELD(STx, fraction, the_i387.st_space[n].fraction);
|
360 |
|
|
}
|
361 |
|
|
BXRS_DEC_PARAM_FIELD(fpu, tos, the_i387.tos);
|
362 |
|
|
#endif
|
363 |
|
|
|
364 |
|
|
#if BX_CPU_LEVEL >= 6
|
365 |
|
|
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SSE)) {
|
366 |
|
|
bx_list_c *sse = new bx_list_c(cpu, "SSE");
|
367 |
|
|
BXRS_HEX_PARAM_FIELD(sse, mxcsr, mxcsr.mxcsr);
|
368 |
|
|
for (n=0; n<BX_XMM_REGISTERS; n++) {
|
369 |
|
|
for(unsigned j=0;j < BX_VLMAX*2;j++) {
|
370 |
|
|
sprintf(name, "xmm%02d_%d", n, j);
|
371 |
|
|
#if BX_SUPPORT_AVX
|
372 |
|
|
new bx_shadow_num_c(sse, name, &vmm[n].avx64u(j), BASE_HEX);
|
373 |
|
|
#else
|
374 |
|
|
new bx_shadow_num_c(sse, name, &vmm[n].xmm64u(j), BASE_HEX);
|
375 |
|
|
#endif
|
376 |
|
|
}
|
377 |
|
|
}
|
378 |
|
|
}
|
379 |
|
|
#endif
|
380 |
|
|
|
381 |
|
|
#if BX_SUPPORT_MONITOR_MWAIT
|
382 |
|
|
bx_list_c *monitor_list = new bx_list_c(cpu, "MONITOR");
|
383 |
|
|
BXRS_HEX_PARAM_FIELD(monitor_list, monitor_addr, monitor.monitor_addr);
|
384 |
|
|
BXRS_PARAM_BOOL(monitor_list, armed, monitor.armed);
|
385 |
|
|
#endif
|
386 |
|
|
|
387 |
|
|
#if BX_SUPPORT_APIC
|
388 |
|
|
lapic.register_state(cpu);
|
389 |
|
|
#endif
|
390 |
|
|
|
391 |
|
|
#if BX_SUPPORT_VMX
|
392 |
|
|
register_vmx_state(cpu);
|
393 |
|
|
#endif
|
394 |
|
|
|
395 |
|
|
#if BX_SUPPORT_SVM
|
396 |
|
|
register_svm_state(cpu);
|
397 |
|
|
#endif
|
398 |
|
|
|
399 |
|
|
BXRS_HEX_PARAM_SIMPLE32(cpu, pending_event);
|
400 |
|
|
BXRS_HEX_PARAM_SIMPLE32(cpu, event_mask);
|
401 |
|
|
BXRS_HEX_PARAM_SIMPLE32(cpu, async_event);
|
402 |
|
|
|
403 |
|
|
#if BX_X86_DEBUGGER
|
404 |
|
|
BXRS_PARAM_BOOL(cpu, in_repeat, in_repeat);
|
405 |
|
|
#endif
|
406 |
|
|
|
407 |
|
|
BXRS_PARAM_BOOL(cpu, in_smm, in_smm);
|
408 |
|
|
|
409 |
|
|
#if BX_DEBUGGER
|
410 |
|
|
bx_list_c *tlb = new bx_list_c(cpu, "TLB");
|
411 |
|
|
#if BX_CPU_LEVEL >= 5
|
412 |
|
|
BXRS_PARAM_BOOL(tlb, split_large, TLB.split_large);
|
413 |
|
|
#endif
|
414 |
|
|
for (n=0; n<BX_TLB_SIZE; n++) {
|
415 |
|
|
sprintf(name, "entry%d", n);
|
416 |
|
|
bx_list_c *tlb_entry = new bx_list_c(tlb, name);
|
417 |
|
|
BXRS_HEX_PARAM_FIELD(tlb_entry, lpf, TLB.entry[n].lpf);
|
418 |
|
|
BXRS_HEX_PARAM_FIELD(tlb_entry, lpf_mask, TLB.entry[n].lpf_mask);
|
419 |
|
|
BXRS_HEX_PARAM_FIELD(tlb_entry, ppf, TLB.entry[n].ppf);
|
420 |
|
|
BXRS_HEX_PARAM_FIELD(tlb_entry, accessBits, TLB.entry[n].accessBits);
|
421 |
|
|
}
|
422 |
|
|
#endif
|
423 |
|
|
}
|
424 |
|
|
|
425 |
|
|
Bit64s BX_CPU_C::param_save_handler(void *devptr, bx_param_c *param)
|
426 |
|
|
{
|
427 |
|
|
#if !BX_USE_CPU_SMF
|
428 |
|
|
BX_CPU_C *class_ptr = (BX_CPU_C *) devptr;
|
429 |
|
|
return class_ptr->param_save(param);
|
430 |
|
|
}
|
431 |
|
|
|
432 |
|
|
Bit64s BX_CPU_C::param_save(bx_param_c *param)
|
433 |
|
|
{
|
434 |
|
|
#else
|
435 |
|
|
UNUSED(devptr);
|
436 |
|
|
#endif // !BX_USE_CPU_SMF
|
437 |
|
|
const char *pname, *segname;
|
438 |
|
|
bx_segment_reg_t *segment = NULL;
|
439 |
|
|
Bit64s val = 0;
|
440 |
|
|
|
441 |
|
|
pname = param->get_name();
|
442 |
|
|
if (!strcmp(pname, "EFLAGS")) {
|
443 |
|
|
val = read_eflags();
|
444 |
|
|
} else if (!strcmp(pname, "selector")) {
|
445 |
|
|
segname = param->get_parent()->get_name();
|
446 |
|
|
if (!strcmp(segname, "CS")) {
|
447 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
|
448 |
|
|
} else if (!strcmp(segname, "DS")) {
|
449 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
|
450 |
|
|
} else if (!strcmp(segname, "SS")) {
|
451 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
|
452 |
|
|
} else if (!strcmp(segname, "ES")) {
|
453 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES];
|
454 |
|
|
} else if (!strcmp(segname, "FS")) {
|
455 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS];
|
456 |
|
|
} else if (!strcmp(segname, "GS")) {
|
457 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS];
|
458 |
|
|
} else if (!strcmp(segname, "LDTR")) {
|
459 |
|
|
segment = &BX_CPU_THIS_PTR ldtr;
|
460 |
|
|
} else if (!strcmp(segname, "TR")) {
|
461 |
|
|
segment = &BX_CPU_THIS_PTR tr;
|
462 |
|
|
}
|
463 |
|
|
if (segment != NULL) {
|
464 |
|
|
val = segment->selector.value;
|
465 |
|
|
}
|
466 |
|
|
}
|
467 |
|
|
else {
|
468 |
|
|
BX_PANIC(("Unknown param %s in param_save handler !", pname));
|
469 |
|
|
}
|
470 |
|
|
return val;
|
471 |
|
|
}
|
472 |
|
|
|
473 |
|
|
void BX_CPU_C::param_restore_handler(void *devptr, bx_param_c *param, Bit64s val)
|
474 |
|
|
{
|
475 |
|
|
#if !BX_USE_CPU_SMF
|
476 |
|
|
BX_CPU_C *class_ptr = (BX_CPU_C *) devptr;
|
477 |
|
|
class_ptr->param_restore(param, val);
|
478 |
|
|
}
|
479 |
|
|
|
480 |
|
|
void BX_CPU_C::param_restore(bx_param_c *param, Bit64s val)
|
481 |
|
|
{
|
482 |
|
|
#else
|
483 |
|
|
UNUSED(devptr);
|
484 |
|
|
#endif // !BX_USE_CPU_SMF
|
485 |
|
|
const char *pname, *segname;
|
486 |
|
|
bx_segment_reg_t *segment = NULL;
|
487 |
|
|
|
488 |
|
|
pname = param->get_name();
|
489 |
|
|
if (!strcmp(pname, "EFLAGS")) {
|
490 |
|
|
BX_CPU_THIS_PTR setEFlags((Bit32u)val);
|
491 |
|
|
} else if (!strcmp(pname, "selector")) {
|
492 |
|
|
segname = param->get_parent()->get_name();
|
493 |
|
|
if (!strcmp(segname, "CS")) {
|
494 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS];
|
495 |
|
|
} else if (!strcmp(segname, "DS")) {
|
496 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
|
497 |
|
|
} else if (!strcmp(segname, "SS")) {
|
498 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS];
|
499 |
|
|
} else if (!strcmp(segname, "ES")) {
|
500 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES];
|
501 |
|
|
} else if (!strcmp(segname, "FS")) {
|
502 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS];
|
503 |
|
|
} else if (!strcmp(segname, "GS")) {
|
504 |
|
|
segment = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS];
|
505 |
|
|
} else if (!strcmp(segname, "LDTR")) {
|
506 |
|
|
segment = &BX_CPU_THIS_PTR ldtr;
|
507 |
|
|
} else if (!strcmp(segname, "TR")) {
|
508 |
|
|
segment = &BX_CPU_THIS_PTR tr;
|
509 |
|
|
}
|
510 |
|
|
if (segment != NULL) {
|
511 |
|
|
bx_selector_t *selector = &(segment->selector);
|
512 |
|
|
parse_selector((Bit16u)val, selector);
|
513 |
|
|
}
|
514 |
|
|
}
|
515 |
|
|
else {
|
516 |
|
|
BX_PANIC(("Unknown param %s in param_restore handler !", pname));
|
517 |
|
|
}
|
518 |
|
|
}
|
519 |
|
|
|
520 |
|
|
void BX_CPU_C::after_restore_state(void)
|
521 |
|
|
{
|
522 |
|
|
handleCpuContextChange();
|
523 |
|
|
|
524 |
|
|
BX_CPU_THIS_PTR prev_rip = RIP;
|
525 |
|
|
|
526 |
|
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_IA32_REAL) CPL = 0;
|
527 |
|
|
else {
|
528 |
|
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_IA32_V8086) CPL = 3;
|
529 |
|
|
}
|
530 |
|
|
|
531 |
|
|
#if BX_SUPPORT_VMX
|
532 |
|
|
set_VMCSPTR(BX_CPU_THIS_PTR vmcsptr);
|
533 |
|
|
#endif
|
534 |
|
|
|
535 |
|
|
assert_checks();
|
536 |
|
|
debug(RIP);
|
537 |
|
|
}
|
538 |
|
|
// end of save/restore functionality
|
539 |
|
|
|
540 |
|
|
BX_CPU_C::~BX_CPU_C()
|
541 |
|
|
{
|
542 |
|
|
#if BX_CPU_LEVEL >= 4
|
543 |
|
|
delete cpuid;
|
544 |
|
|
#endif
|
545 |
|
|
|
546 |
|
|
BX_INSTR_EXIT(BX_CPU_ID);
|
547 |
|
|
BX_DEBUG(("Exit."));
|
548 |
|
|
}
|
549 |
|
|
|
550 |
|
|
void BX_CPU_C::reset(unsigned source)
|
551 |
|
|
{
|
552 |
|
|
unsigned n;
|
553 |
|
|
|
554 |
|
|
if (source == BX_RESET_HARDWARE)
|
555 |
|
|
BX_INFO(("cpu hardware reset"));
|
556 |
|
|
else if (source == BX_RESET_SOFTWARE)
|
557 |
|
|
BX_INFO(("cpu software reset"));
|
558 |
|
|
else
|
559 |
|
|
BX_INFO(("cpu reset"));
|
560 |
|
|
|
561 |
|
|
for (n=0;n<BX_GENERAL_REGISTERS;n++)
|
562 |
|
|
BX_WRITE_32BIT_REGZ(n, 0);
|
563 |
|
|
|
564 |
|
|
//BX_WRITE_32BIT_REGZ(BX_32BIT_REG_EDX, get_cpu_version_information());
|
565 |
|
|
|
566 |
|
|
// initialize NIL register
|
567 |
|
|
BX_WRITE_32BIT_REGZ(BX_NIL_REGISTER, 0);
|
568 |
|
|
|
569 |
|
|
BX_CPU_THIS_PTR eflags = 0x2; // Bit1 is always set
|
570 |
|
|
// clear lazy flags state to satisfy Valgrind uninitialized variables checker
|
571 |
|
|
memset(&BX_CPU_THIS_PTR oszapc, 0, sizeof(BX_CPU_THIS_PTR oszapc));
|
572 |
|
|
setEFlagsOSZAPC(0); // update lazy flags state
|
573 |
|
|
|
574 |
|
|
if (source == BX_RESET_HARDWARE)
|
575 |
|
|
BX_CPU_THIS_PTR icount = 0;
|
576 |
|
|
BX_CPU_THIS_PTR icount_last_sync = BX_CPU_THIS_PTR icount;
|
577 |
|
|
|
578 |
|
|
BX_CPU_THIS_PTR inhibit_mask = 0;
|
579 |
|
|
BX_CPU_THIS_PTR inhibit_icount = 0;
|
580 |
|
|
|
581 |
|
|
BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
|
582 |
|
|
BX_CPU_THIS_PTR debug_trap = 0;
|
583 |
|
|
|
584 |
|
|
/* instruction pointer */
|
585 |
|
|
#if BX_CPU_LEVEL < 2
|
586 |
|
|
BX_CPU_THIS_PTR prev_rip = EIP = 0x00000000;
|
587 |
|
|
#else /* from 286 up */
|
588 |
|
|
BX_CPU_THIS_PTR prev_rip = RIP = 0x0000FFF0;
|
589 |
|
|
#endif
|
590 |
|
|
|
591 |
|
|
/* CS (Code Segment) and descriptor cache */
|
592 |
|
|
/* Note: on a real cpu, CS initially points to upper memory. After
|
593 |
|
|
* the 1st jump, the descriptor base is zero'd out. Since I'm just
|
594 |
|
|
* going to jump to my BIOS, I don't need to do this.
|
595 |
|
|
* For future reference:
|
596 |
|
|
* processor cs.selector cs.base cs.limit EIP
|
597 |
|
|
* 8086 FFFF FFFF0 FFFF 0000
|
598 |
|
|
* 286 F000 FF0000 FFFF FFF0
|
599 |
|
|
* 386+ F000 FFFF0000 FFFF FFF0
|
600 |
|
|
*/
|
601 |
|
|
parse_selector(0xf000,
|
602 |
|
|
&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
|
603 |
|
|
|
604 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
|
605 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
|
606 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0;
|
607 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
|
608 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
|
609 |
|
|
|
610 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0xFFFF0000;
|
611 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFF;
|
612 |
|
|
|
613 |
|
|
#if BX_CPU_LEVEL >= 3
|
614 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 0; /* byte granular */
|
615 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = 0; /* 16bit default size */
|
616 |
|
|
#if BX_SUPPORT_X86_64
|
617 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = 0; /* 16bit default size */
|
618 |
|
|
#endif
|
619 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0;
|
620 |
|
|
#endif
|
621 |
|
|
|
622 |
|
|
flushICaches();
|
623 |
|
|
|
624 |
|
|
/* DS (Data Segment) and descriptor cache */
|
625 |
|
|
parse_selector(0x0000,
|
626 |
|
|
&BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector);
|
627 |
|
|
|
628 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
|
629 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.p = 1;
|
630 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.dpl = 0;
|
631 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.segment = 1; /* data/code segment */
|
632 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
|
633 |
|
|
|
634 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.base = 0x00000000;
|
635 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.limit_scaled = 0xFFFF;
|
636 |
|
|
#if BX_CPU_LEVEL >= 3
|
637 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.avl = 0;
|
638 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.g = 0; /* byte granular */
|
639 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.d_b = 0; /* 16bit default size */
|
640 |
|
|
#if BX_SUPPORT_X86_64
|
641 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.u.segment.l = 0; /* 16bit default size */
|
642 |
|
|
#endif
|
643 |
|
|
#endif
|
644 |
|
|
|
645 |
|
|
// use DS segment as template for the others
|
646 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
|
647 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
|
648 |
|
|
#if BX_CPU_LEVEL >= 3
|
649 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
|
650 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS] = BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS];
|
651 |
|
|
#endif
|
652 |
|
|
|
653 |
|
|
/* GDTR (Global Descriptor Table Register) */
|
654 |
|
|
BX_CPU_THIS_PTR gdtr.base = 0x00000000;
|
655 |
|
|
BX_CPU_THIS_PTR gdtr.limit = 0xFFFF;
|
656 |
|
|
|
657 |
|
|
/* IDTR (Interrupt Descriptor Table Register) */
|
658 |
|
|
BX_CPU_THIS_PTR idtr.base = 0x00000000;
|
659 |
|
|
BX_CPU_THIS_PTR idtr.limit = 0xFFFF; /* always byte granular */
|
660 |
|
|
|
661 |
|
|
/* LDTR (Local Descriptor Table Register) */
|
662 |
|
|
BX_CPU_THIS_PTR ldtr.selector.value = 0x0000;
|
663 |
|
|
BX_CPU_THIS_PTR ldtr.selector.index = 0x0000;
|
664 |
|
|
BX_CPU_THIS_PTR ldtr.selector.ti = 0;
|
665 |
|
|
BX_CPU_THIS_PTR ldtr.selector.rpl = 0;
|
666 |
|
|
|
667 |
|
|
BX_CPU_THIS_PTR ldtr.cache.valid = 1; /* valid */
|
668 |
|
|
BX_CPU_THIS_PTR ldtr.cache.p = 1; /* present */
|
669 |
|
|
BX_CPU_THIS_PTR ldtr.cache.dpl = 0; /* field not used */
|
670 |
|
|
BX_CPU_THIS_PTR ldtr.cache.segment = 0; /* system segment */
|
671 |
|
|
BX_CPU_THIS_PTR ldtr.cache.type = BX_SYS_SEGMENT_LDT;
|
672 |
|
|
BX_CPU_THIS_PTR ldtr.cache.u.segment.base = 0x00000000;
|
673 |
|
|
BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled = 0xFFFF;
|
674 |
|
|
BX_CPU_THIS_PTR ldtr.cache.u.segment.avl = 0;
|
675 |
|
|
BX_CPU_THIS_PTR ldtr.cache.u.segment.g = 0; /* byte granular */
|
676 |
|
|
|
677 |
|
|
/* TR (Task Register) */
|
678 |
|
|
BX_CPU_THIS_PTR tr.selector.value = 0x0000;
|
679 |
|
|
BX_CPU_THIS_PTR tr.selector.index = 0x0000; /* undefined */
|
680 |
|
|
BX_CPU_THIS_PTR tr.selector.ti = 0;
|
681 |
|
|
BX_CPU_THIS_PTR tr.selector.rpl = 0;
|
682 |
|
|
|
683 |
|
|
BX_CPU_THIS_PTR tr.cache.valid = 1; /* valid */
|
684 |
|
|
BX_CPU_THIS_PTR tr.cache.p = 1; /* present */
|
685 |
|
|
BX_CPU_THIS_PTR tr.cache.dpl = 0; /* field not used */
|
686 |
|
|
BX_CPU_THIS_PTR tr.cache.segment = 0; /* system segment */
|
687 |
|
|
BX_CPU_THIS_PTR tr.cache.type = BX_SYS_SEGMENT_BUSY_386_TSS;
|
688 |
|
|
BX_CPU_THIS_PTR tr.cache.u.segment.base = 0x00000000;
|
689 |
|
|
BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled = 0xFFFF;
|
690 |
|
|
BX_CPU_THIS_PTR tr.cache.u.segment.avl = 0;
|
691 |
|
|
BX_CPU_THIS_PTR tr.cache.u.segment.g = 0; /* byte granular */
|
692 |
|
|
|
693 |
|
|
BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_REAL;
|
694 |
|
|
|
695 |
|
|
// DR0 - DR7 (Debug Registers)
|
696 |
|
|
#if BX_CPU_LEVEL >= 3
|
697 |
|
|
for (n=0; n<4; n++)
|
698 |
|
|
BX_CPU_THIS_PTR dr[n] = 0;
|
699 |
|
|
#endif
|
700 |
|
|
|
701 |
|
|
#if BX_CPU_LEVEL >= 5
|
702 |
|
|
BX_CPU_THIS_PTR dr6.val32 = 0xFFFF0FF0;
|
703 |
|
|
#else
|
704 |
|
|
BX_CPU_THIS_PTR dr6.val32 = 0xFFFF1FF0;
|
705 |
|
|
#endif
|
706 |
|
|
BX_CPU_THIS_PTR dr7.val32 = 0x00000400;
|
707 |
|
|
|
708 |
|
|
#if BX_X86_DEBUGGER
|
709 |
|
|
BX_CPU_THIS_PTR in_repeat = 0;
|
710 |
|
|
#endif
|
711 |
|
|
BX_CPU_THIS_PTR in_smm = 0;
|
712 |
|
|
|
713 |
|
|
BX_CPU_THIS_PTR pending_event = 0;
|
714 |
|
|
BX_CPU_THIS_PTR event_mask = 0;
|
715 |
|
|
|
716 |
|
|
if (source == BX_RESET_HARDWARE) {
|
717 |
|
|
BX_CPU_THIS_PTR smbase = 0x30000; // do not change SMBASE on INIT
|
718 |
|
|
}
|
719 |
|
|
|
720 |
|
|
BX_CPU_THIS_PTR cr0.set32(0x60000010);
|
721 |
|
|
// handle reserved bits
|
722 |
|
|
#if BX_CPU_LEVEL == 3
|
723 |
|
|
// reserved bits all set to 1 on 386
|
724 |
|
|
BX_CPU_THIS_PTR cr0.val32 |= 0x7ffffff0;
|
725 |
|
|
#endif
|
726 |
|
|
|
727 |
|
|
#if BX_CPU_LEVEL >= 3
|
728 |
|
|
BX_CPU_THIS_PTR cr2 = 0;
|
729 |
|
|
BX_CPU_THIS_PTR cr3 = 0;
|
730 |
|
|
#endif
|
731 |
|
|
|
732 |
|
|
#if BX_CPU_LEVEL >= 5
|
733 |
|
|
BX_CPU_THIS_PTR cr4.set32(0);
|
734 |
|
|
BX_CPU_THIS_PTR cr4_suppmask = get_cr4_allow_mask();
|
735 |
|
|
#endif
|
736 |
|
|
|
737 |
|
|
#if BX_CPU_LEVEL >= 6
|
738 |
|
|
BX_CPU_THIS_PTR xcr0.set32(0x1);
|
739 |
|
|
BX_CPU_THIS_PTR xcr0_suppmask = 0x3;
|
740 |
|
|
#if BX_SUPPORT_AVX
|
741 |
|
|
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_AVX))
|
742 |
|
|
BX_CPU_THIS_PTR xcr0_suppmask |= BX_XCR0_AVX_MASK;
|
743 |
|
|
#endif
|
744 |
|
|
#endif
|
745 |
|
|
|
746 |
|
|
/* initialise MSR registers to defaults */
|
747 |
|
|
#if BX_CPU_LEVEL >= 5
|
748 |
|
|
#if BX_SUPPORT_APIC
|
749 |
|
|
/* APIC Address, APIC enabled and BSP is default, we'll fill in the rest later */
|
750 |
|
|
BX_CPU_THIS_PTR msr.apicbase = BX_LAPIC_BASE_ADDR;
|
751 |
|
|
BX_CPU_THIS_PTR lapic.reset(source);
|
752 |
|
|
BX_CPU_THIS_PTR msr.apicbase |= 0x900;
|
753 |
|
|
BX_CPU_THIS_PTR lapic.set_base(BX_CPU_THIS_PTR msr.apicbase);
|
754 |
|
|
#if BX_CPU_LEVEL >= 6
|
755 |
|
|
if (BX_CPUID_SUPPORT_CPU_EXTENSION(BX_CPU_XAPIC_EXT))
|
756 |
|
|
BX_CPU_THIS_PTR lapic.enable_xapic_extensions();
|
757 |
|
|
#endif
|
758 |
|
|
#endif
|
759 |
|
|
|
760 |
|
|
BX_CPU_THIS_PTR efer.set32(0);
|
761 |
|
|
BX_CPU_THIS_PTR efer_suppmask = 0;
|
762 |
|
|
if (BX_CPUID_SUPPORT_CPU_EXTENSION(BX_CPU_NX))
|
763 |
|
|
BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_NXE_MASK;
|
764 |
|
|
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SYSCALL_SYSRET_LEGACY))
|
765 |
|
|
BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_SCE_MASK;
|
766 |
|
|
#if BX_SUPPORT_X86_64
|
767 |
|
|
if (BX_CPUID_SUPPORT_CPU_EXTENSION(BX_CPU_LONG_MODE)) {
|
768 |
|
|
BX_CPU_THIS_PTR efer_suppmask |= (BX_EFER_SCE_MASK | BX_EFER_LME_MASK | BX_EFER_LMA_MASK);
|
769 |
|
|
if (BX_CPUID_SUPPORT_CPU_EXTENSION(BX_CPU_FFXSR))
|
770 |
|
|
BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_FFXSR_MASK;
|
771 |
|
|
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SVM))
|
772 |
|
|
BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_SVME_MASK;
|
773 |
|
|
}
|
774 |
|
|
#endif
|
775 |
|
|
|
776 |
|
|
BX_CPU_THIS_PTR msr.star = 0;
|
777 |
|
|
#if BX_SUPPORT_X86_64
|
778 |
|
|
if (BX_CPUID_SUPPORT_CPU_EXTENSION(BX_CPU_LONG_MODE)) {
|
779 |
|
|
BX_CPU_THIS_PTR msr.lstar = 0;
|
780 |
|
|
BX_CPU_THIS_PTR msr.cstar = 0;
|
781 |
|
|
BX_CPU_THIS_PTR msr.fmask = 0x00020200;
|
782 |
|
|
BX_CPU_THIS_PTR msr.kernelgsbase = 0;
|
783 |
|
|
BX_CPU_THIS_PTR msr.tsc_aux = 0;
|
784 |
|
|
}
|
785 |
|
|
#endif
|
786 |
|
|
|
787 |
|
|
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM
|
788 |
|
|
BX_CPU_THIS_PTR tsc_offset = 0;
|
789 |
|
|
#endif
|
790 |
|
|
if (source == BX_RESET_HARDWARE) {
|
791 |
|
|
BX_CPU_THIS_PTR set_TSC(0); // do not change TSC on INIT
|
792 |
|
|
}
|
793 |
|
|
#endif // BX_CPU_LEVEL >= 5
|
794 |
|
|
|
795 |
|
|
#if BX_CPU_LEVEL >= 6
|
796 |
|
|
BX_CPU_THIS_PTR msr.sysenter_cs_msr = 0;
|
797 |
|
|
BX_CPU_THIS_PTR msr.sysenter_esp_msr = 0;
|
798 |
|
|
BX_CPU_THIS_PTR msr.sysenter_eip_msr = 0;
|
799 |
|
|
#endif
|
800 |
|
|
|
801 |
|
|
// Do not change MTRR on INIT
|
802 |
|
|
#if BX_CPU_LEVEL >= 6
|
803 |
|
|
if (source == BX_RESET_HARDWARE) {
|
804 |
|
|
for (n=0; n<16; n++)
|
805 |
|
|
BX_CPU_THIS_PTR msr.mtrrphys[n] = 0;
|
806 |
|
|
|
807 |
|
|
BX_CPU_THIS_PTR msr.mtrrfix64k_00000 = 0; // all fix range MTRRs undefined according to manual
|
808 |
|
|
BX_CPU_THIS_PTR msr.mtrrfix16k[0] = 0;
|
809 |
|
|
BX_CPU_THIS_PTR msr.mtrrfix16k[1] = 0;
|
810 |
|
|
|
811 |
|
|
for (n=0; n<8; n++)
|
812 |
|
|
BX_CPU_THIS_PTR msr.mtrrfix4k[n] = 0;
|
813 |
|
|
|
814 |
|
|
BX_CPU_THIS_PTR msr.pat = BX_CONST64(0x0007040600070406);
|
815 |
|
|
BX_CPU_THIS_PTR msr.mtrr_deftype = 0;
|
816 |
|
|
}
|
817 |
|
|
#endif
|
818 |
|
|
|
819 |
|
|
// All configurable MSRs do not change on INIT
|
820 |
|
|
#if BX_CONFIGURE_MSRS
|
821 |
|
|
if (source == BX_RESET_HARDWARE) {
|
822 |
|
|
for (n=0; n < BX_MSR_MAX_INDEX; n++) {
|
823 |
|
|
if (BX_CPU_THIS_PTR msrs[n])
|
824 |
|
|
BX_CPU_THIS_PTR msrs[n]->reset();
|
825 |
|
|
}
|
826 |
|
|
}
|
827 |
|
|
#endif
|
828 |
|
|
|
829 |
|
|
BX_CPU_THIS_PTR EXT = 0;
|
830 |
|
|
BX_CPU_THIS_PTR last_exception_type = 0;
|
831 |
|
|
|
832 |
|
|
// invalidate the code prefetch queue
|
833 |
|
|
BX_CPU_THIS_PTR eipPageBias = 0;
|
834 |
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 0;
|
835 |
|
|
BX_CPU_THIS_PTR eipFetchPtr = NULL;
|
836 |
|
|
|
837 |
|
|
// invalidate current stack page
|
838 |
|
|
BX_CPU_THIS_PTR espPageBias = 0;
|
839 |
|
|
BX_CPU_THIS_PTR espPageWindowSize = 0;
|
840 |
|
|
BX_CPU_THIS_PTR espHostPtr = NULL;
|
841 |
|
|
|
842 |
|
|
#if BX_DEBUGGER
|
843 |
|
|
BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
|
844 |
|
|
BX_CPU_THIS_PTR magic_break = 0;
|
845 |
|
|
BX_CPU_THIS_PTR trace = 0;
|
846 |
|
|
BX_CPU_THIS_PTR trace_reg = 0;
|
847 |
|
|
BX_CPU_THIS_PTR trace_mem = 0;
|
848 |
|
|
BX_CPU_THIS_PTR mode_break = 0;
|
849 |
|
|
#if BX_SUPPORT_VMX
|
850 |
|
|
BX_CPU_THIS_PTR vmexit_break = 0;
|
851 |
|
|
#endif
|
852 |
|
|
#endif
|
853 |
|
|
|
854 |
|
|
// Reset the Floating Point Unit
|
855 |
|
|
#if BX_SUPPORT_FPU
|
856 |
|
|
if (source == BX_RESET_HARDWARE) {
|
857 |
|
|
BX_CPU_THIS_PTR the_i387.reset();
|
858 |
|
|
}
|
859 |
|
|
#endif
|
860 |
|
|
|
861 |
|
|
#if BX_CPU_LEVEL >= 6
|
862 |
|
|
BX_CPU_THIS_PTR sse_ok = 0;
|
863 |
|
|
#if BX_SUPPORT_AVX
|
864 |
|
|
BX_CPU_THIS_PTR avx_ok = 0;
|
865 |
|
|
#endif
|
866 |
|
|
|
867 |
|
|
// Reset XMM state - unchanged on #INIT
|
868 |
|
|
if (source == BX_RESET_HARDWARE) {
|
869 |
|
|
static BxPackedXmmRegister xmmnil; /* compiler will clear the variable */
|
870 |
|
|
for(n=0; n<BX_XMM_REGISTERS; n++)
|
871 |
|
|
BX_WRITE_XMM_REG_CLEAR_HIGH(n, xmmnil);
|
872 |
|
|
|
873 |
|
|
BX_CPU_THIS_PTR mxcsr.mxcsr = MXCSR_RESET;
|
874 |
|
|
BX_CPU_THIS_PTR mxcsr_mask = 0x0000ffbf;
|
875 |
|
|
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SSE2))
|
876 |
|
|
BX_CPU_THIS_PTR mxcsr_mask |= MXCSR_DAZ;
|
877 |
|
|
if (BX_CPUID_SUPPORT_CPU_EXTENSION(BX_CPU_MISALIGNED_SSE))
|
878 |
|
|
BX_CPU_THIS_PTR mxcsr_mask |= MXCSR_MISALIGNED_EXCEPTION_MASK;
|
879 |
|
|
}
|
880 |
|
|
#endif
|
881 |
|
|
|
882 |
|
|
#if BX_SUPPORT_VMX
|
883 |
|
|
BX_CPU_THIS_PTR in_vmx = BX_CPU_THIS_PTR in_vmx_guest = 0;
|
884 |
|
|
BX_CPU_THIS_PTR in_smm_vmx = BX_CPU_THIS_PTR in_smm_vmx_guest = 0;
|
885 |
|
|
BX_CPU_THIS_PTR vmcsptr = BX_CPU_THIS_PTR vmxonptr = BX_INVALID_VMCSPTR;
|
886 |
|
|
BX_CPU_THIS_PTR vmcshostptr = 0;
|
887 |
|
|
/* enable VMX, should be done in BIOS instead */
|
888 |
|
|
BX_CPU_THIS_PTR msr.ia32_feature_ctrl =
|
889 |
|
|
/*BX_IA32_FEATURE_CONTROL_LOCK_BIT | */BX_IA32_FEATURE_CONTROL_VMX_ENABLE_BIT;
|
890 |
|
|
#endif
|
891 |
|
|
|
892 |
|
|
#if BX_SUPPORT_SVM
|
893 |
|
|
BX_CPU_THIS_PTR in_svm_guest = 0;
|
894 |
|
|
BX_CPU_THIS_PTR svm_gif = 1;
|
895 |
|
|
BX_CPU_THIS_PTR vmcbptr = 0;
|
896 |
|
|
BX_CPU_THIS_PTR vmcbhostptr = 0;
|
897 |
|
|
#endif
|
898 |
|
|
|
899 |
|
|
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM
|
900 |
|
|
BX_CPU_THIS_PTR in_event = 0;
|
901 |
|
|
#endif
|
902 |
|
|
|
903 |
|
|
#if BX_SUPPORT_VMX
|
904 |
|
|
BX_CPU_THIS_PTR nmi_unblocking_iret = 0;
|
905 |
|
|
#endif
|
906 |
|
|
|
907 |
|
|
#if BX_SUPPORT_SMP
|
908 |
|
|
// notice if I'm the bootstrap processor. If not, do the equivalent of
|
909 |
|
|
// a HALT instruction.
|
910 |
|
|
int apic_id = lapic.get_id();
|
911 |
|
|
if (BX_BOOTSTRAP_PROCESSOR == apic_id) {
|
912 |
|
|
// boot normally
|
913 |
|
|
BX_CPU_THIS_PTR msr.apicbase |= 0x100; /* set bit 8 BSP */
|
914 |
|
|
BX_INFO(("CPU[%d] is the bootstrap processor", apic_id));
|
915 |
|
|
} else {
|
916 |
|
|
// it's an application processor, halt until IPI is heard.
|
917 |
|
|
BX_CPU_THIS_PTR msr.apicbase &= ~0x100; /* clear bit 8 BSP */
|
918 |
|
|
BX_INFO(("CPU[%d] is an application processor. Halting until SIPI.", apic_id));
|
919 |
|
|
enter_sleep_state(BX_ACTIVITY_STATE_WAIT_FOR_SIPI);
|
920 |
|
|
}
|
921 |
|
|
#endif
|
922 |
|
|
|
923 |
|
|
handleCpuContextChange();
|
924 |
|
|
|
925 |
|
|
#if BX_CPU_LEVEL >= 4
|
926 |
|
|
BX_CPU_THIS_PTR cpuid->dump_cpuid();
|
927 |
|
|
#endif
|
928 |
|
|
|
929 |
|
|
BX_INSTR_RESET(BX_CPU_ID, source);
|
930 |
|
|
}
|
931 |
|
|
|
932 |
|
|
void BX_CPU_C::sanity_checks(void)
|
933 |
|
|
{
|
934 |
|
|
Bit32u eax = EAX, ecx = ECX, edx = EDX, ebx = EBX, esp = ESP, ebp = EBP, esi = ESI, edi = EDI;
|
935 |
|
|
|
936 |
|
|
EAX = 0xFFEEDDCC;
|
937 |
|
|
ECX = 0xBBAA9988;
|
938 |
|
|
EDX = 0x77665544;
|
939 |
|
|
EBX = 0x332211FF;
|
940 |
|
|
ESP = 0xEEDDCCBB;
|
941 |
|
|
EBP = 0xAA998877;
|
942 |
|
|
ESI = 0x66554433;
|
943 |
|
|
EDI = 0x2211FFEE;
|
944 |
|
|
|
945 |
|
|
Bit8u al, cl, dl, bl, ah, ch, dh, bh;
|
946 |
|
|
|
947 |
|
|
al = AL;
|
948 |
|
|
cl = CL;
|
949 |
|
|
dl = DL;
|
950 |
|
|
bl = BL;
|
951 |
|
|
ah = AH;
|
952 |
|
|
ch = CH;
|
953 |
|
|
dh = DH;
|
954 |
|
|
bh = BH;
|
955 |
|
|
|
956 |
|
|
if ( al != (EAX & 0xFF) ||
|
957 |
|
|
cl != (ECX & 0xFF) ||
|
958 |
|
|
dl != (EDX & 0xFF) ||
|
959 |
|
|
bl != (EBX & 0xFF) ||
|
960 |
|
|
ah != ((EAX >> 8) & 0xFF) ||
|
961 |
|
|
ch != ((ECX >> 8) & 0xFF) ||
|
962 |
|
|
dh != ((EDX >> 8) & 0xFF) ||
|
963 |
|
|
bh != ((EBX >> 8) & 0xFF) )
|
964 |
|
|
{
|
965 |
|
|
BX_PANIC(("problems using BX_READ_8BIT_REGx()!"));
|
966 |
|
|
}
|
967 |
|
|
|
968 |
|
|
Bit16u ax, cx, dx, bx, sp, bp, si, di;
|
969 |
|
|
|
970 |
|
|
ax = AX;
|
971 |
|
|
cx = CX;
|
972 |
|
|
dx = DX;
|
973 |
|
|
bx = BX;
|
974 |
|
|
sp = SP;
|
975 |
|
|
bp = BP;
|
976 |
|
|
si = SI;
|
977 |
|
|
di = DI;
|
978 |
|
|
|
979 |
|
|
if ( ax != (EAX & 0xFFFF) ||
|
980 |
|
|
cx != (ECX & 0xFFFF) ||
|
981 |
|
|
dx != (EDX & 0xFFFF) ||
|
982 |
|
|
bx != (EBX & 0xFFFF) ||
|
983 |
|
|
sp != (ESP & 0xFFFF) ||
|
984 |
|
|
bp != (EBP & 0xFFFF) ||
|
985 |
|
|
si != (ESI & 0xFFFF) ||
|
986 |
|
|
di != (EDI & 0xFFFF) )
|
987 |
|
|
{
|
988 |
|
|
BX_PANIC(("problems using BX_READ_16BIT_REG()!"));
|
989 |
|
|
}
|
990 |
|
|
|
991 |
|
|
EAX = eax; /* restore registers */
|
992 |
|
|
ECX = ecx;
|
993 |
|
|
EDX = edx;
|
994 |
|
|
EBX = ebx;
|
995 |
|
|
ESP = esp;
|
996 |
|
|
EBP = ebp;
|
997 |
|
|
ESI = esi;
|
998 |
|
|
EDI = edi;
|
999 |
|
|
|
1000 |
|
|
if (sizeof(Bit8u) != 1 || sizeof(Bit8s) != 1)
|
1001 |
|
|
BX_PANIC(("data type Bit8u or Bit8s is not of length 1 byte!"));
|
1002 |
|
|
if (sizeof(Bit16u) != 2 || sizeof(Bit16s) != 2)
|
1003 |
|
|
BX_PANIC(("data type Bit16u or Bit16s is not of length 2 bytes!"));
|
1004 |
|
|
if (sizeof(Bit32u) != 4 || sizeof(Bit32s) != 4)
|
1005 |
|
|
BX_PANIC(("data type Bit32u or Bit32s is not of length 4 bytes!"));
|
1006 |
|
|
if (sizeof(Bit64u) != 8 || sizeof(Bit64s) != 8)
|
1007 |
|
|
BX_PANIC(("data type Bit64u or Bit64u is not of length 8 bytes!"));
|
1008 |
|
|
|
1009 |
|
|
BX_DEBUG(("#(%u)all sanity checks passed!", BX_CPU_ID));
|
1010 |
|
|
}
|
1011 |
|
|
|
1012 |
|
|
void BX_CPU_C::assert_checks(void)
|
1013 |
|
|
{
|
1014 |
|
|
// check CPU mode consistency
|
1015 |
|
|
#if BX_SUPPORT_X86_64
|
1016 |
|
|
if (BX_CPU_THIS_PTR efer.get_LMA()) {
|
1017 |
|
|
if (! BX_CPU_THIS_PTR cr0.get_PE()) {
|
1018 |
|
|
BX_PANIC(("assert_checks: EFER.LMA is set when CR0.PE=0 !"));
|
1019 |
|
|
}
|
1020 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l) {
|
1021 |
|
|
if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64)
|
1022 |
|
|
BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_LONG_64 !"));
|
1023 |
|
|
}
|
1024 |
|
|
else {
|
1025 |
|
|
if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_COMPAT)
|
1026 |
|
|
BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_LONG_COMPAT !"));
|
1027 |
|
|
}
|
1028 |
|
|
}
|
1029 |
|
|
else
|
1030 |
|
|
#endif
|
1031 |
|
|
{
|
1032 |
|
|
if (BX_CPU_THIS_PTR cr0.get_PE()) {
|
1033 |
|
|
if (BX_CPU_THIS_PTR get_VM()) {
|
1034 |
|
|
if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_IA32_V8086)
|
1035 |
|
|
BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_IA32_V8086 !"));
|
1036 |
|
|
}
|
1037 |
|
|
else {
|
1038 |
|
|
if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_IA32_PROTECTED)
|
1039 |
|
|
BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_IA32_PROTECTED !"));
|
1040 |
|
|
}
|
1041 |
|
|
}
|
1042 |
|
|
else {
|
1043 |
|
|
if (BX_CPU_THIS_PTR cpu_mode != BX_MODE_IA32_REAL)
|
1044 |
|
|
BX_PANIC(("assert_checks: unconsistent cpu_mode BX_MODE_IA32_REAL !"));
|
1045 |
|
|
}
|
1046 |
|
|
}
|
1047 |
|
|
|
1048 |
|
|
// check CR0 consistency
|
1049 |
|
|
if (! check_CR0(BX_CPU_THIS_PTR cr0.val32))
|
1050 |
|
|
BX_PANIC(("assert_checks: CR0 consistency checks failed !"));
|
1051 |
|
|
|
1052 |
|
|
#if BX_CPU_LEVEL >= 5
|
1053 |
|
|
// check CR4 consistency
|
1054 |
|
|
if (! check_CR4(BX_CPU_THIS_PTR cr4.val32))
|
1055 |
|
|
BX_PANIC(("assert_checks: CR4 consistency checks failed !"));
|
1056 |
|
|
#endif
|
1057 |
|
|
|
1058 |
|
|
#if BX_SUPPORT_X86_64
|
1059 |
|
|
// VM should be OFF in long mode
|
1060 |
|
|
if (long_mode()) {
|
1061 |
|
|
if (BX_CPU_THIS_PTR get_VM()) BX_PANIC(("assert_checks: VM is set in long mode !"));
|
1062 |
|
|
}
|
1063 |
|
|
|
1064 |
|
|
// CS.L and CS.D_B are mutualy exclusive
|
1065 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l &&
|
1066 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b)
|
1067 |
|
|
{
|
1068 |
|
|
BX_PANIC(("assert_checks: CS.l and CS.d_b set together !"));
|
1069 |
|
|
}
|
1070 |
|
|
#endif
|
1071 |
|
|
|
1072 |
|
|
// check LDTR type
|
1073 |
|
|
if (BX_CPU_THIS_PTR ldtr.cache.valid)
|
1074 |
|
|
{
|
1075 |
|
|
if (BX_CPU_THIS_PTR ldtr.cache.type != BX_SYS_SEGMENT_LDT)
|
1076 |
|
|
{
|
1077 |
|
|
BX_PANIC(("assert_checks: LDTR is not LDT type !"));
|
1078 |
|
|
}
|
1079 |
|
|
}
|
1080 |
|
|
|
1081 |
|
|
// check Task Register type
|
1082 |
|
|
if(BX_CPU_THIS_PTR tr.cache.valid)
|
1083 |
|
|
{
|
1084 |
|
|
switch(BX_CPU_THIS_PTR tr.cache.type)
|
1085 |
|
|
{
|
1086 |
|
|
case BX_SYS_SEGMENT_BUSY_286_TSS:
|
1087 |
|
|
case BX_SYS_SEGMENT_AVAIL_286_TSS:
|
1088 |
|
|
#if BX_CPU_LEVEL >= 3
|
1089 |
|
|
if (BX_CPU_THIS_PTR tr.cache.u.segment.g != 0)
|
1090 |
|
|
BX_PANIC(("assert_checks: tss286.g != 0 !"));
|
1091 |
|
|
if (BX_CPU_THIS_PTR tr.cache.u.segment.avl != 0)
|
1092 |
|
|
BX_PANIC(("assert_checks: tss286.avl != 0 !"));
|
1093 |
|
|
#endif
|
1094 |
|
|
break;
|
1095 |
|
|
case BX_SYS_SEGMENT_BUSY_386_TSS:
|
1096 |
|
|
case BX_SYS_SEGMENT_AVAIL_386_TSS:
|
1097 |
|
|
break;
|
1098 |
|
|
default:
|
1099 |
|
|
BX_PANIC(("assert_checks: TR is not TSS type !"));
|
1100 |
|
|
}
|
1101 |
|
|
}
|
1102 |
|
|
|
1103 |
|
|
#if BX_SUPPORT_X86_64 == 0 && BX_CPU_LEVEL >= 5
|
1104 |
|
|
if (BX_CPU_THIS_PTR efer_suppmask & (BX_EFER_SCE_MASK |
|
1105 |
|
|
BX_EFER_LME_MASK | BX_EFER_LMA_MASK | BX_EFER_FFXSR_MASK))
|
1106 |
|
|
{
|
1107 |
|
|
BX_PANIC(("assert_checks: EFER supports x86-64 specific bits !"));
|
1108 |
|
|
}
|
1109 |
|
|
#endif
|
1110 |
|
|
}
|