1 |
2 |
alfik |
/////////////////////////////////////////////////////////////////////////
|
2 |
|
|
// $Id: exception.cc 11580 2013-01-19 20:45:03Z sshwarts $
|
3 |
|
|
/////////////////////////////////////////////////////////////////////////
|
4 |
|
|
//
|
5 |
|
|
// Copyright (C) 2001-2013 The Bochs Project
|
6 |
|
|
//
|
7 |
|
|
// This library is free software; you can redistribute it and/or
|
8 |
|
|
// modify it under the terms of the GNU Lesser General Public
|
9 |
|
|
// License as published by the Free Software Foundation; either
|
10 |
|
|
// version 2 of the License, or (at your option) any later version.
|
11 |
|
|
//
|
12 |
|
|
// This library is distributed in the hope that it will be useful,
|
13 |
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
|
|
// Lesser General Public License for more details.
|
16 |
|
|
//
|
17 |
|
|
// You should have received a copy of the GNU Lesser General Public
|
18 |
|
|
// License along with this library; if not, write to the Free Software
|
19 |
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
20 |
|
|
//
|
21 |
|
|
/////////////////////////////////////////////////////////////////////////
|
22 |
|
|
|
23 |
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
24 |
|
|
#include "bochs.h"
|
25 |
|
|
#include "cpu.h"
|
26 |
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
27 |
|
|
|
28 |
|
|
#include "param_names.h"
|
29 |
|
|
#include "iodev/iodev.h"
|
30 |
|
|
|
31 |
|
|
#if BX_SUPPORT_X86_64==0
|
32 |
|
|
// Make life easier merging cpu64 & cpu code.
|
33 |
|
|
#define RIP EIP
|
34 |
|
|
#define RSP ESP
|
35 |
|
|
#endif
|
36 |
|
|
|
37 |
|
|
#if BX_SUPPORT_X86_64
|
38 |
|
|
void BX_CPU_C::long_mode_int(Bit8u vector, unsigned soft_int, bx_bool push_error, Bit16u error_code)
|
39 |
|
|
{
|
40 |
|
|
bx_descriptor_t gate_descriptor, cs_descriptor;
|
41 |
|
|
bx_selector_t cs_selector;
|
42 |
|
|
|
43 |
|
|
// interrupt vector must be within IDT table limits,
|
44 |
|
|
// else #GP(vector*8 + 2 + EXT)
|
45 |
|
|
if ((vector*16 + 15) > BX_CPU_THIS_PTR idtr.limit) {
|
46 |
|
|
BX_ERROR(("interrupt(long mode): vector must be within IDT table limits, IDT.limit = 0x%x", BX_CPU_THIS_PTR idtr.limit));
|
47 |
|
|
exception(BX_GP_EXCEPTION, vector*8 + 2);
|
48 |
|
|
}
|
49 |
|
|
|
50 |
|
|
Bit64u desctmp1 = system_read_qword(BX_CPU_THIS_PTR idtr.base + vector*16);
|
51 |
|
|
Bit64u desctmp2 = system_read_qword(BX_CPU_THIS_PTR idtr.base + vector*16 + 8);
|
52 |
|
|
|
53 |
|
|
if (desctmp2 & BX_CONST64(0x00001F0000000000)) {
|
54 |
|
|
BX_ERROR(("interrupt(long mode): IDT entry extended attributes DWORD4 TYPE != 0"));
|
55 |
|
|
exception(BX_GP_EXCEPTION, vector*8 + 2);
|
56 |
|
|
}
|
57 |
|
|
|
58 |
|
|
Bit32u dword1 = GET32L(desctmp1);
|
59 |
|
|
Bit32u dword2 = GET32H(desctmp1);
|
60 |
|
|
Bit32u dword3 = GET32L(desctmp2);
|
61 |
|
|
|
62 |
|
|
parse_descriptor(dword1, dword2, &gate_descriptor);
|
63 |
|
|
|
64 |
|
|
if ((gate_descriptor.valid==0) || gate_descriptor.segment)
|
65 |
|
|
{
|
66 |
|
|
BX_ERROR(("interrupt(long mode): gate descriptor is not valid sys seg"));
|
67 |
|
|
exception(BX_GP_EXCEPTION, vector*8 + 2);
|
68 |
|
|
}
|
69 |
|
|
|
70 |
|
|
// descriptor AR byte must indicate interrupt gate, trap gate,
|
71 |
|
|
// or task gate, else #GP(vector*8 + 2 + EXT)
|
72 |
|
|
if (gate_descriptor.type != BX_386_INTERRUPT_GATE &&
|
73 |
|
|
gate_descriptor.type != BX_386_TRAP_GATE)
|
74 |
|
|
{
|
75 |
|
|
BX_ERROR(("interrupt(long mode): unsupported gate type %u",
|
76 |
|
|
(unsigned) gate_descriptor.type));
|
77 |
|
|
exception(BX_GP_EXCEPTION, vector*8 + 2);
|
78 |
|
|
}
|
79 |
|
|
|
80 |
|
|
// if software interrupt, then gate descripor DPL must be >= CPL,
|
81 |
|
|
// else #GP(vector * 8 + 2 + EXT)
|
82 |
|
|
if (soft_int && gate_descriptor.dpl < CPL)
|
83 |
|
|
{
|
84 |
|
|
BX_ERROR(("interrupt(long mode): soft_int && gate.dpl < CPL"));
|
85 |
|
|
exception(BX_GP_EXCEPTION, vector*8 + 2);
|
86 |
|
|
}
|
87 |
|
|
|
88 |
|
|
// Gate must be present, else #NP(vector * 8 + 2 + EXT)
|
89 |
|
|
if (! IS_PRESENT(gate_descriptor)) {
|
90 |
|
|
BX_ERROR(("interrupt(long mode): gate.p == 0"));
|
91 |
|
|
exception(BX_NP_EXCEPTION, vector*8 + 2);
|
92 |
|
|
}
|
93 |
|
|
|
94 |
|
|
Bit16u gate_dest_selector = gate_descriptor.u.gate.dest_selector;
|
95 |
|
|
Bit64u gate_dest_offset = ((Bit64u)dword3 << 32) |
|
96 |
|
|
gate_descriptor.u.gate.dest_offset;
|
97 |
|
|
|
98 |
|
|
unsigned ist = gate_descriptor.u.gate.param_count & 0x7;
|
99 |
|
|
|
100 |
|
|
// examine CS selector and descriptor given in gate descriptor
|
101 |
|
|
// selector must be non-null else #GP(EXT)
|
102 |
|
|
if ((gate_dest_selector & 0xfffc) == 0) {
|
103 |
|
|
BX_ERROR(("int_trap_gate(long mode): selector null"));
|
104 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
105 |
|
|
}
|
106 |
|
|
|
107 |
|
|
parse_selector(gate_dest_selector, &cs_selector);
|
108 |
|
|
|
109 |
|
|
// selector must be within its descriptor table limits
|
110 |
|
|
// else #GP(selector+EXT)
|
111 |
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
112 |
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
113 |
|
|
|
114 |
|
|
// descriptor AR byte must indicate code seg
|
115 |
|
|
// and code segment descriptor DPL<=CPL, else #GP(selector+EXT)
|
116 |
|
|
if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
|
117 |
|
|
IS_DATA_SEGMENT(cs_descriptor.type) ||
|
118 |
|
|
cs_descriptor.dpl > CPL)
|
119 |
|
|
{
|
120 |
|
|
BX_ERROR(("interrupt(long mode): not accessible or not code segment"));
|
121 |
|
|
exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc);
|
122 |
|
|
}
|
123 |
|
|
|
124 |
|
|
// check that it's a 64 bit segment
|
125 |
|
|
if (! IS_LONG64_SEGMENT(cs_descriptor) || cs_descriptor.u.segment.d_b)
|
126 |
|
|
{
|
127 |
|
|
BX_ERROR(("interrupt(long mode): must be 64 bit segment"));
|
128 |
|
|
exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc);
|
129 |
|
|
}
|
130 |
|
|
|
131 |
|
|
// segment must be present, else #NP(selector + EXT)
|
132 |
|
|
if (! IS_PRESENT(cs_descriptor)) {
|
133 |
|
|
BX_ERROR(("interrupt(long mode): segment not present"));
|
134 |
|
|
exception(BX_NP_EXCEPTION, cs_selector.value & 0xfffc);
|
135 |
|
|
}
|
136 |
|
|
|
137 |
|
|
Bit64u RSP_for_cpl_x;
|
138 |
|
|
|
139 |
|
|
Bit64u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
140 |
|
|
Bit64u old_RIP = RIP;
|
141 |
|
|
Bit64u old_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
|
142 |
|
|
Bit64u old_RSP = RSP;
|
143 |
|
|
|
144 |
|
|
// if code segment is non-conforming and DPL < CPL then
|
145 |
|
|
// INTERRUPT TO INNER PRIVILEGE:
|
146 |
|
|
if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && cs_descriptor.dpl < CPL)
|
147 |
|
|
{
|
148 |
|
|
BX_DEBUG(("interrupt(long mode): INTERRUPT TO INNER PRIVILEGE"));
|
149 |
|
|
|
150 |
|
|
// check selector and descriptor for new stack in current TSS
|
151 |
|
|
if (ist > 0) {
|
152 |
|
|
BX_DEBUG(("interrupt(long mode): trap to IST, vector = %d", ist));
|
153 |
|
|
RSP_for_cpl_x = get_RSP_from_TSS(ist+3);
|
154 |
|
|
}
|
155 |
|
|
else {
|
156 |
|
|
RSP_for_cpl_x = get_RSP_from_TSS(cs_descriptor.dpl);
|
157 |
|
|
}
|
158 |
|
|
|
159 |
|
|
// align stack
|
160 |
|
|
RSP_for_cpl_x &= BX_CONST64(0xfffffffffffffff0);
|
161 |
|
|
|
162 |
|
|
// push old stack long pointer onto new stack
|
163 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 8, cs_descriptor.dpl, old_SS);
|
164 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 16, cs_descriptor.dpl, old_RSP);
|
165 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 24, cs_descriptor.dpl, read_eflags());
|
166 |
|
|
// push long pointer to return address onto new stack
|
167 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 32, cs_descriptor.dpl, old_CS);
|
168 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 40, cs_descriptor.dpl, old_RIP);
|
169 |
|
|
RSP_for_cpl_x -= 40;
|
170 |
|
|
|
171 |
|
|
if (push_error) {
|
172 |
|
|
RSP_for_cpl_x -= 8;
|
173 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x, cs_descriptor.dpl, error_code);
|
174 |
|
|
}
|
175 |
|
|
|
176 |
|
|
// load CS:RIP (guaranteed to be in 64 bit mode)
|
177 |
|
|
branch_far64(&cs_selector, &cs_descriptor, gate_dest_offset, cs_descriptor.dpl);
|
178 |
|
|
|
179 |
|
|
// set up null SS descriptor
|
180 |
|
|
load_null_selector(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS], cs_descriptor.dpl);
|
181 |
|
|
}
|
182 |
|
|
else if(IS_CODE_SEGMENT_CONFORMING(cs_descriptor.type) || cs_descriptor.dpl==CPL)
|
183 |
|
|
{
|
184 |
|
|
// if code segment is conforming OR code segment DPL = CPL then
|
185 |
|
|
// INTERRUPT TO SAME PRIVILEGE LEVEL:
|
186 |
|
|
|
187 |
|
|
BX_DEBUG(("interrupt(long mode): INTERRUPT TO SAME PRIVILEGE"));
|
188 |
|
|
|
189 |
|
|
// check selector and descriptor for new stack in current TSS
|
190 |
|
|
if (ist > 0) {
|
191 |
|
|
BX_DEBUG(("interrupt(long mode): trap to IST, vector = %d", ist));
|
192 |
|
|
RSP_for_cpl_x = get_RSP_from_TSS(ist+3);
|
193 |
|
|
}
|
194 |
|
|
else {
|
195 |
|
|
RSP_for_cpl_x = RSP;
|
196 |
|
|
}
|
197 |
|
|
|
198 |
|
|
// align stack
|
199 |
|
|
RSP_for_cpl_x &= BX_CONST64(0xfffffffffffffff0);
|
200 |
|
|
|
201 |
|
|
// push flags onto stack
|
202 |
|
|
// push current CS selector onto stack
|
203 |
|
|
// push return offset onto stack
|
204 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 8, cs_descriptor.dpl, old_SS);
|
205 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 16, cs_descriptor.dpl, old_RSP);
|
206 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 24, cs_descriptor.dpl, read_eflags());
|
207 |
|
|
// push long pointer to return address onto new stack
|
208 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 32, cs_descriptor.dpl, old_CS);
|
209 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 40, cs_descriptor.dpl, old_RIP);
|
210 |
|
|
RSP_for_cpl_x -= 40;
|
211 |
|
|
|
212 |
|
|
if (push_error) {
|
213 |
|
|
RSP_for_cpl_x -= 8;
|
214 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x, cs_descriptor.dpl, error_code);
|
215 |
|
|
}
|
216 |
|
|
|
217 |
|
|
// set the RPL field of CS to CPL
|
218 |
|
|
branch_far64(&cs_selector, &cs_descriptor, gate_dest_offset, CPL);
|
219 |
|
|
}
|
220 |
|
|
else {
|
221 |
|
|
BX_ERROR(("interrupt(long mode): bad descriptor type %u (CS.DPL=%u CPL=%u)",
|
222 |
|
|
(unsigned) cs_descriptor.type, (unsigned) cs_descriptor.dpl, (unsigned) CPL));
|
223 |
|
|
exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc);
|
224 |
|
|
}
|
225 |
|
|
|
226 |
|
|
RSP = RSP_for_cpl_x;
|
227 |
|
|
|
228 |
|
|
// if interrupt gate then set IF to 0
|
229 |
|
|
if (!(gate_descriptor.type & 1)) // even is int-gate
|
230 |
|
|
BX_CPU_THIS_PTR clear_IF();
|
231 |
|
|
BX_CPU_THIS_PTR clear_TF();
|
232 |
|
|
//BX_CPU_THIS_PTR clear_VM(); // VM is clear in long mode
|
233 |
|
|
BX_CPU_THIS_PTR clear_RF();
|
234 |
|
|
BX_CPU_THIS_PTR clear_NT();
|
235 |
|
|
}
|
236 |
|
|
#endif
|
237 |
|
|
|
238 |
|
|
void BX_CPU_C::protected_mode_int(Bit8u vector, unsigned soft_int, bx_bool push_error, Bit16u error_code)
|
239 |
|
|
{
|
240 |
|
|
bx_descriptor_t gate_descriptor, cs_descriptor;
|
241 |
|
|
bx_selector_t cs_selector;
|
242 |
|
|
|
243 |
|
|
Bit16u raw_tss_selector;
|
244 |
|
|
bx_selector_t tss_selector;
|
245 |
|
|
bx_descriptor_t tss_descriptor;
|
246 |
|
|
|
247 |
|
|
Bit16u gate_dest_selector;
|
248 |
|
|
Bit32u gate_dest_offset;
|
249 |
|
|
|
250 |
|
|
// interrupt vector must be within IDT table limits,
|
251 |
|
|
// else #GP(vector*8 + 2 + EXT)
|
252 |
|
|
if ((vector*8 + 7) > BX_CPU_THIS_PTR idtr.limit) {
|
253 |
|
|
BX_ERROR(("interrupt(): vector must be within IDT table limits, IDT.limit = 0x%x", BX_CPU_THIS_PTR idtr.limit));
|
254 |
|
|
exception(BX_GP_EXCEPTION, vector*8 + 2);
|
255 |
|
|
}
|
256 |
|
|
|
257 |
|
|
Bit64u desctmp = system_read_qword(BX_CPU_THIS_PTR idtr.base + vector*8);
|
258 |
|
|
|
259 |
|
|
Bit32u dword1 = GET32L(desctmp);
|
260 |
|
|
Bit32u dword2 = GET32H(desctmp);
|
261 |
|
|
|
262 |
|
|
parse_descriptor(dword1, dword2, &gate_descriptor);
|
263 |
|
|
|
264 |
|
|
if ((gate_descriptor.valid==0) || gate_descriptor.segment) {
|
265 |
|
|
BX_ERROR(("interrupt(): gate descriptor is not valid sys seg (vector=0x%02x)", vector));
|
266 |
|
|
exception(BX_GP_EXCEPTION, vector*8 + 2);
|
267 |
|
|
}
|
268 |
|
|
|
269 |
|
|
// descriptor AR byte must indicate interrupt gate, trap gate,
|
270 |
|
|
// or task gate, else #GP(vector*8 + 2 + EXT)
|
271 |
|
|
switch (gate_descriptor.type) {
|
272 |
|
|
case BX_TASK_GATE:
|
273 |
|
|
case BX_286_INTERRUPT_GATE:
|
274 |
|
|
case BX_286_TRAP_GATE:
|
275 |
|
|
case BX_386_INTERRUPT_GATE:
|
276 |
|
|
case BX_386_TRAP_GATE:
|
277 |
|
|
break;
|
278 |
|
|
default:
|
279 |
|
|
BX_ERROR(("interrupt(): gate.type(%u) != {5,6,7,14,15}",
|
280 |
|
|
(unsigned) gate_descriptor.type));
|
281 |
|
|
exception(BX_GP_EXCEPTION, vector*8 + 2);
|
282 |
|
|
}
|
283 |
|
|
|
284 |
|
|
// if software interrupt, then gate descripor DPL must be >= CPL,
|
285 |
|
|
// else #GP(vector * 8 + 2 + EXT)
|
286 |
|
|
if (soft_int && gate_descriptor.dpl < CPL) {
|
287 |
|
|
BX_ERROR(("interrupt(): soft_int && (gate.dpl < CPL)"));
|
288 |
|
|
exception(BX_GP_EXCEPTION, vector*8 + 2);
|
289 |
|
|
}
|
290 |
|
|
|
291 |
|
|
// Gate must be present, else #NP(vector * 8 + 2 + EXT)
|
292 |
|
|
if (! IS_PRESENT(gate_descriptor)) {
|
293 |
|
|
BX_ERROR(("interrupt(): gate not present"));
|
294 |
|
|
exception(BX_NP_EXCEPTION, vector*8 + 2);
|
295 |
|
|
}
|
296 |
|
|
|
297 |
|
|
switch (gate_descriptor.type) {
|
298 |
|
|
case BX_TASK_GATE:
|
299 |
|
|
// examine selector to TSS, given in task gate descriptor
|
300 |
|
|
raw_tss_selector = gate_descriptor.u.taskgate.tss_selector;
|
301 |
|
|
parse_selector(raw_tss_selector, &tss_selector);
|
302 |
|
|
|
303 |
|
|
// must specify global in the local/global bit,
|
304 |
|
|
// else #GP(TSS selector)
|
305 |
|
|
if (tss_selector.ti) {
|
306 |
|
|
BX_ERROR(("interrupt(): tss_selector.ti=1 from gate descriptor - #GP(tss_selector)"));
|
307 |
|
|
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc);
|
308 |
|
|
}
|
309 |
|
|
|
310 |
|
|
// index must be within GDT limits, else #TS(TSS selector)
|
311 |
|
|
fetch_raw_descriptor(&tss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
312 |
|
|
|
313 |
|
|
parse_descriptor(dword1, dword2, &tss_descriptor);
|
314 |
|
|
|
315 |
|
|
// AR byte must specify available TSS,
|
316 |
|
|
// else #GP(TSS selector)
|
317 |
|
|
if (tss_descriptor.valid==0 || tss_descriptor.segment) {
|
318 |
|
|
BX_ERROR(("interrupt(): TSS selector points to invalid or bad TSS - #GP(tss_selector)"));
|
319 |
|
|
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc);
|
320 |
|
|
}
|
321 |
|
|
|
322 |
|
|
if (tss_descriptor.type!=BX_SYS_SEGMENT_AVAIL_286_TSS &&
|
323 |
|
|
tss_descriptor.type!=BX_SYS_SEGMENT_AVAIL_386_TSS)
|
324 |
|
|
{
|
325 |
|
|
BX_ERROR(("interrupt(): TSS selector points to bad TSS - #GP(tss_selector)"));
|
326 |
|
|
exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc);
|
327 |
|
|
}
|
328 |
|
|
|
329 |
|
|
// TSS must be present, else #NP(TSS selector)
|
330 |
|
|
if (! IS_PRESENT(tss_descriptor)) {
|
331 |
|
|
BX_ERROR(("interrupt(): TSS descriptor.p == 0"));
|
332 |
|
|
exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc);
|
333 |
|
|
}
|
334 |
|
|
|
335 |
|
|
// switch tasks with nesting to TSS
|
336 |
|
|
task_switch(0, &tss_selector, &tss_descriptor,
|
337 |
|
|
BX_TASK_FROM_INT, dword1, dword2, push_error, error_code);
|
338 |
|
|
return;
|
339 |
|
|
|
340 |
|
|
case BX_286_INTERRUPT_GATE:
|
341 |
|
|
case BX_286_TRAP_GATE:
|
342 |
|
|
case BX_386_INTERRUPT_GATE:
|
343 |
|
|
case BX_386_TRAP_GATE:
|
344 |
|
|
gate_dest_selector = gate_descriptor.u.gate.dest_selector;
|
345 |
|
|
gate_dest_offset = gate_descriptor.u.gate.dest_offset;
|
346 |
|
|
|
347 |
|
|
// examine CS selector and descriptor given in gate descriptor
|
348 |
|
|
// selector must be non-null else #GP(EXT)
|
349 |
|
|
if ((gate_dest_selector & 0xfffc) == 0) {
|
350 |
|
|
BX_ERROR(("int_trap_gate(): selector null"));
|
351 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
352 |
|
|
}
|
353 |
|
|
|
354 |
|
|
parse_selector(gate_dest_selector, &cs_selector);
|
355 |
|
|
|
356 |
|
|
// selector must be within its descriptor table limits
|
357 |
|
|
// else #GP(selector+EXT)
|
358 |
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
359 |
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
360 |
|
|
|
361 |
|
|
// descriptor AR byte must indicate code seg
|
362 |
|
|
// and code segment descriptor DPL<=CPL, else #GP(selector+EXT)
|
363 |
|
|
if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
|
364 |
|
|
IS_DATA_SEGMENT(cs_descriptor.type) ||
|
365 |
|
|
cs_descriptor.dpl > CPL)
|
366 |
|
|
{
|
367 |
|
|
BX_ERROR(("interrupt(): not accessible or not code segment cs=0x%04x", cs_selector.value));
|
368 |
|
|
exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc);
|
369 |
|
|
}
|
370 |
|
|
|
371 |
|
|
// segment must be present, else #NP(selector + EXT)
|
372 |
|
|
if (! IS_PRESENT(cs_descriptor)) {
|
373 |
|
|
BX_ERROR(("interrupt(): segment not present"));
|
374 |
|
|
exception(BX_NP_EXCEPTION, cs_selector.value & 0xfffc);
|
375 |
|
|
}
|
376 |
|
|
|
377 |
|
|
// if code segment is non-conforming and DPL < CPL then
|
378 |
|
|
// INTERRUPT TO INNER PRIVILEGE
|
379 |
|
|
if(IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && cs_descriptor.dpl < CPL)
|
380 |
|
|
{
|
381 |
|
|
Bit16u old_SS, old_CS, SS_for_cpl_x;
|
382 |
|
|
Bit32u ESP_for_cpl_x, old_EIP, old_ESP;
|
383 |
|
|
bx_descriptor_t ss_descriptor;
|
384 |
|
|
bx_selector_t ss_selector;
|
385 |
|
|
int is_v8086_mode = v8086_mode();
|
386 |
|
|
|
387 |
|
|
BX_DEBUG(("interrupt(): INTERRUPT TO INNER PRIVILEGE"));
|
388 |
|
|
|
389 |
|
|
// check selector and descriptor for new stack in current TSS
|
390 |
|
|
get_SS_ESP_from_TSS(cs_descriptor.dpl,
|
391 |
|
|
&SS_for_cpl_x, &ESP_for_cpl_x);
|
392 |
|
|
|
393 |
|
|
if (is_v8086_mode && cs_descriptor.dpl != 0) {
|
394 |
|
|
// if code segment DPL != 0 then #GP(new code segment selector)
|
395 |
|
|
BX_ERROR(("interrupt(): code segment DPL(%d) != 0 in v8086 mode", cs_descriptor.dpl));
|
396 |
|
|
exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc);
|
397 |
|
|
}
|
398 |
|
|
|
399 |
|
|
// Selector must be non-null else #TS(EXT)
|
400 |
|
|
if ((SS_for_cpl_x & 0xfffc) == 0) {
|
401 |
|
|
BX_ERROR(("interrupt(): SS selector null"));
|
402 |
|
|
exception(BX_TS_EXCEPTION, 0); /* TS(ext) */
|
403 |
|
|
}
|
404 |
|
|
|
405 |
|
|
// selector index must be within its descriptor table limits
|
406 |
|
|
// else #TS(SS selector + EXT)
|
407 |
|
|
parse_selector(SS_for_cpl_x, &ss_selector);
|
408 |
|
|
// fetch 2 dwords of descriptor; call handles out of limits checks
|
409 |
|
|
fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_TS_EXCEPTION);
|
410 |
|
|
parse_descriptor(dword1, dword2, &ss_descriptor);
|
411 |
|
|
|
412 |
|
|
// selector rpl must = dpl of code segment,
|
413 |
|
|
// else #TS(SS selector + ext)
|
414 |
|
|
if (ss_selector.rpl != cs_descriptor.dpl) {
|
415 |
|
|
BX_ERROR(("interrupt(): SS.rpl != CS.dpl"));
|
416 |
|
|
exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
|
417 |
|
|
}
|
418 |
|
|
|
419 |
|
|
// stack seg DPL must = DPL of code segment,
|
420 |
|
|
// else #TS(SS selector + ext)
|
421 |
|
|
if (ss_descriptor.dpl != cs_descriptor.dpl) {
|
422 |
|
|
BX_ERROR(("interrupt(): SS.dpl != CS.dpl"));
|
423 |
|
|
exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
|
424 |
|
|
}
|
425 |
|
|
|
426 |
|
|
// descriptor must indicate writable data segment,
|
427 |
|
|
// else #TS(SS selector + EXT)
|
428 |
|
|
if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
|
429 |
|
|
IS_CODE_SEGMENT(ss_descriptor.type) ||
|
430 |
|
|
!IS_DATA_SEGMENT_WRITEABLE(ss_descriptor.type))
|
431 |
|
|
{
|
432 |
|
|
BX_ERROR(("interrupt(): SS is not writable data segment"));
|
433 |
|
|
exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
|
434 |
|
|
}
|
435 |
|
|
|
436 |
|
|
// seg must be present, else #SS(SS selector + ext)
|
437 |
|
|
if (! IS_PRESENT(ss_descriptor)) {
|
438 |
|
|
BX_ERROR(("interrupt(): SS not present"));
|
439 |
|
|
exception(BX_SS_EXCEPTION, SS_for_cpl_x & 0xfffc);
|
440 |
|
|
}
|
441 |
|
|
|
442 |
|
|
// IP must be within CS segment boundaries, else #GP(0)
|
443 |
|
|
if (gate_dest_offset > cs_descriptor.u.segment.limit_scaled) {
|
444 |
|
|
BX_ERROR(("interrupt(): gate EIP > CS.limit"));
|
445 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
446 |
|
|
}
|
447 |
|
|
|
448 |
|
|
old_ESP = ESP;
|
449 |
|
|
old_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
|
450 |
|
|
old_EIP = EIP;
|
451 |
|
|
old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
452 |
|
|
|
453 |
|
|
// Prepare new stack segment
|
454 |
|
|
bx_segment_reg_t new_stack;
|
455 |
|
|
new_stack.selector = ss_selector;
|
456 |
|
|
new_stack.cache = ss_descriptor;
|
457 |
|
|
new_stack.selector.rpl = cs_descriptor.dpl;
|
458 |
|
|
// add cpl to the selector value
|
459 |
|
|
new_stack.selector.value = (0xfffc & new_stack.selector.value) |
|
460 |
|
|
new_stack.selector.rpl;
|
461 |
|
|
|
462 |
|
|
if (ss_descriptor.u.segment.d_b) {
|
463 |
|
|
Bit32u temp_ESP = ESP_for_cpl_x;
|
464 |
|
|
|
465 |
|
|
if (is_v8086_mode)
|
466 |
|
|
{
|
467 |
|
|
if (gate_descriptor.type>=14) { // 386 int/trap gate
|
468 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-4, cs_descriptor.dpl,
|
469 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
|
470 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-8, cs_descriptor.dpl,
|
471 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
|
472 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-12, cs_descriptor.dpl,
|
473 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
|
474 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-16, cs_descriptor.dpl,
|
475 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
|
476 |
|
|
temp_ESP -= 16;
|
477 |
|
|
}
|
478 |
|
|
else {
|
479 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-2, cs_descriptor.dpl,
|
480 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
|
481 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-4, cs_descriptor.dpl,
|
482 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
|
483 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-6, cs_descriptor.dpl,
|
484 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
|
485 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-8, cs_descriptor.dpl,
|
486 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
|
487 |
|
|
temp_ESP -= 8;
|
488 |
|
|
}
|
489 |
|
|
}
|
490 |
|
|
|
491 |
|
|
if (gate_descriptor.type>=14) { // 386 int/trap gate
|
492 |
|
|
// push long pointer to old stack onto new stack
|
493 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, old_SS);
|
494 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-8, cs_descriptor.dpl, old_ESP);
|
495 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-12, cs_descriptor.dpl, read_eflags());
|
496 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-16, cs_descriptor.dpl, old_CS);
|
497 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-20, cs_descriptor.dpl, old_EIP);
|
498 |
|
|
temp_ESP -= 20;
|
499 |
|
|
|
500 |
|
|
if (push_error) {
|
501 |
|
|
temp_ESP -= 4;
|
502 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP, cs_descriptor.dpl, error_code);
|
503 |
|
|
}
|
504 |
|
|
}
|
505 |
|
|
else { // 286 int/trap gate
|
506 |
|
|
// push long pointer to old stack onto new stack
|
507 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-2, cs_descriptor.dpl, old_SS);
|
508 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) old_ESP);
|
509 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-6, cs_descriptor.dpl, (Bit16u) read_eflags());
|
510 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-8, cs_descriptor.dpl, old_CS);
|
511 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-10, cs_descriptor.dpl, (Bit16u) old_EIP);
|
512 |
|
|
temp_ESP -= 10;
|
513 |
|
|
|
514 |
|
|
if (push_error) {
|
515 |
|
|
temp_ESP -= 2;
|
516 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP, cs_descriptor.dpl, error_code);
|
517 |
|
|
}
|
518 |
|
|
}
|
519 |
|
|
|
520 |
|
|
ESP = temp_ESP;
|
521 |
|
|
}
|
522 |
|
|
else {
|
523 |
|
|
Bit16u temp_SP = (Bit16u) ESP_for_cpl_x;
|
524 |
|
|
|
525 |
|
|
if (is_v8086_mode)
|
526 |
|
|
{
|
527 |
|
|
if (gate_descriptor.type>=14) { // 386 int/trap gate
|
528 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl,
|
529 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
|
530 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl,
|
531 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
|
532 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-12), cs_descriptor.dpl,
|
533 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
|
534 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-16), cs_descriptor.dpl,
|
535 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
|
536 |
|
|
temp_SP -= 16;
|
537 |
|
|
}
|
538 |
|
|
else {
|
539 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-2), cs_descriptor.dpl,
|
540 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
|
541 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl,
|
542 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
|
543 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-6), cs_descriptor.dpl,
|
544 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
|
545 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl,
|
546 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
|
547 |
|
|
temp_SP -= 8;
|
548 |
|
|
}
|
549 |
|
|
}
|
550 |
|
|
|
551 |
|
|
if (gate_descriptor.type>=14) { // 386 int/trap gate
|
552 |
|
|
// push long pointer to old stack onto new stack
|
553 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, old_SS);
|
554 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl, old_ESP);
|
555 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-12), cs_descriptor.dpl, read_eflags());
|
556 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-16), cs_descriptor.dpl, old_CS);
|
557 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-20), cs_descriptor.dpl, old_EIP);
|
558 |
|
|
temp_SP -= 20;
|
559 |
|
|
|
560 |
|
|
if (push_error) {
|
561 |
|
|
temp_SP -= 4;
|
562 |
|
|
write_new_stack_dword_32(&new_stack, temp_SP, cs_descriptor.dpl, error_code);
|
563 |
|
|
}
|
564 |
|
|
}
|
565 |
|
|
else { // 286 int/trap gate
|
566 |
|
|
// push long pointer to old stack onto new stack
|
567 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-2), cs_descriptor.dpl, old_SS);
|
568 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, (Bit16u) old_ESP);
|
569 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-6), cs_descriptor.dpl, (Bit16u) read_eflags());
|
570 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl, old_CS);
|
571 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-10), cs_descriptor.dpl, (Bit16u) old_EIP);
|
572 |
|
|
temp_SP -= 10;
|
573 |
|
|
|
574 |
|
|
if (push_error) {
|
575 |
|
|
temp_SP -= 2;
|
576 |
|
|
write_new_stack_word_32(&new_stack, temp_SP, cs_descriptor.dpl, error_code);
|
577 |
|
|
}
|
578 |
|
|
}
|
579 |
|
|
|
580 |
|
|
SP = temp_SP;
|
581 |
|
|
}
|
582 |
|
|
|
583 |
|
|
// load new CS:eIP values from gate
|
584 |
|
|
// set CPL to new code segment DPL
|
585 |
|
|
// set RPL of CS to CPL
|
586 |
|
|
load_cs(&cs_selector, &cs_descriptor, cs_descriptor.dpl);
|
587 |
|
|
|
588 |
|
|
// load new SS:eSP values from TSS
|
589 |
|
|
load_ss(&ss_selector, &ss_descriptor, cs_descriptor.dpl);
|
590 |
|
|
|
591 |
|
|
if (is_v8086_mode)
|
592 |
|
|
{
|
593 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.valid = 0;
|
594 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value = 0;
|
595 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.valid = 0;
|
596 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value = 0;
|
597 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = 0;
|
598 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value = 0;
|
599 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].cache.valid = 0;
|
600 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value = 0;
|
601 |
|
|
}
|
602 |
|
|
}
|
603 |
|
|
else
|
604 |
|
|
{
|
605 |
|
|
BX_DEBUG(("interrupt(): INTERRUPT TO SAME PRIVILEGE"));
|
606 |
|
|
|
607 |
|
|
if (v8086_mode() && (IS_CODE_SEGMENT_CONFORMING(cs_descriptor.type) || cs_descriptor.dpl != 0)) {
|
608 |
|
|
// if code segment DPL != 0 then #GP(new code segment selector)
|
609 |
|
|
BX_ERROR(("interrupt(): code segment conforming or DPL(%d) != 0 in v8086 mode", cs_descriptor.dpl));
|
610 |
|
|
exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc);
|
611 |
|
|
}
|
612 |
|
|
|
613 |
|
|
// EIP must be in CS limit else #GP(0)
|
614 |
|
|
if (gate_dest_offset > cs_descriptor.u.segment.limit_scaled) {
|
615 |
|
|
BX_ERROR(("interrupt(): IP > CS descriptor limit"));
|
616 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
617 |
|
|
}
|
618 |
|
|
|
619 |
|
|
// push flags onto stack
|
620 |
|
|
// push current CS selector onto stack
|
621 |
|
|
// push return offset onto stack
|
622 |
|
|
if (gate_descriptor.type >= 14) { // 386 gate
|
623 |
|
|
push_32(read_eflags());
|
624 |
|
|
push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
625 |
|
|
push_32(EIP);
|
626 |
|
|
if (push_error)
|
627 |
|
|
push_32(error_code);
|
628 |
|
|
}
|
629 |
|
|
else { // 286 gate
|
630 |
|
|
push_16((Bit16u) read_eflags());
|
631 |
|
|
push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
632 |
|
|
push_16(IP);
|
633 |
|
|
if (push_error)
|
634 |
|
|
push_16(error_code);
|
635 |
|
|
}
|
636 |
|
|
|
637 |
|
|
// load CS:IP from gate
|
638 |
|
|
// load CS descriptor
|
639 |
|
|
// set the RPL field of CS to CPL
|
640 |
|
|
load_cs(&cs_selector, &cs_descriptor, CPL);
|
641 |
|
|
}
|
642 |
|
|
|
643 |
|
|
EIP = gate_dest_offset;
|
644 |
|
|
|
645 |
|
|
// if interrupt gate then set IF to 0
|
646 |
|
|
if (!(gate_descriptor.type & 1)) // even is int-gate
|
647 |
|
|
BX_CPU_THIS_PTR clear_IF();
|
648 |
|
|
BX_CPU_THIS_PTR clear_TF();
|
649 |
|
|
BX_CPU_THIS_PTR clear_NT();
|
650 |
|
|
BX_CPU_THIS_PTR clear_VM();
|
651 |
|
|
BX_CPU_THIS_PTR clear_RF();
|
652 |
|
|
return;
|
653 |
|
|
|
654 |
|
|
default:
|
655 |
|
|
BX_PANIC(("bad descriptor type in interrupt()!"));
|
656 |
|
|
break;
|
657 |
|
|
}
|
658 |
|
|
}
|
659 |
|
|
|
660 |
|
|
void BX_CPU_C::real_mode_int(Bit8u vector, bx_bool push_error, Bit16u error_code)
|
661 |
|
|
{
|
662 |
|
|
if ((vector*4+3) > BX_CPU_THIS_PTR idtr.limit) {
|
663 |
|
|
BX_ERROR(("interrupt(real mode) vector > idtr.limit"));
|
664 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
665 |
|
|
}
|
666 |
|
|
|
667 |
|
|
push_16((Bit16u) read_eflags());
|
668 |
|
|
push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
669 |
|
|
push_16(IP);
|
670 |
|
|
|
671 |
|
|
Bit16u new_ip = system_read_word(BX_CPU_THIS_PTR idtr.base + 4 * vector);
|
672 |
|
|
// CS.LIMIT can't change when in real/v8086 mode
|
673 |
|
|
if (new_ip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
674 |
|
|
BX_ERROR(("interrupt(real mode): instruction pointer not within code segment limits"));
|
675 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
676 |
|
|
}
|
677 |
|
|
|
678 |
|
|
Bit16u cs_selector = system_read_word(BX_CPU_THIS_PTR idtr.base + 4 * vector + 2);
|
679 |
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_selector);
|
680 |
|
|
EIP = new_ip;
|
681 |
|
|
|
682 |
|
|
/* INT affects the following flags: I,T */
|
683 |
|
|
BX_CPU_THIS_PTR clear_IF();
|
684 |
|
|
BX_CPU_THIS_PTR clear_TF();
|
685 |
|
|
#if BX_CPU_LEVEL >= 4
|
686 |
|
|
BX_CPU_THIS_PTR clear_AC();
|
687 |
|
|
#endif
|
688 |
|
|
BX_CPU_THIS_PTR clear_RF();
|
689 |
|
|
}
|
690 |
|
|
|
691 |
|
|
void BX_CPU_C::interrupt(Bit8u vector, unsigned type, bx_bool push_error, Bit16u error_code)
|
692 |
|
|
{
|
693 |
|
|
#if BX_DEBUGGER
|
694 |
|
|
BX_CPU_THIS_PTR show_flag |= Flag_intsig;
|
695 |
|
|
#if BX_DEBUG_LINUX
|
696 |
|
|
if (bx_dbg.linux_syscall) {
|
697 |
|
|
if (vector == 0x80) bx_dbg_linux_syscall(BX_CPU_ID);
|
698 |
|
|
}
|
699 |
|
|
#endif
|
700 |
|
|
bx_dbg_interrupt(BX_CPU_ID, vector, error_code);
|
701 |
|
|
#endif
|
702 |
|
|
//AO extended; was id,vector
|
703 |
|
|
BX_INSTR_INTERRUPT(BX_CPU_ID, vector, type, push_error, error_code);
|
704 |
|
|
|
705 |
|
|
invalidate_prefetch_q();
|
706 |
|
|
|
707 |
|
|
bx_bool soft_int = 0;
|
708 |
|
|
switch(type) {
|
709 |
|
|
case BX_SOFTWARE_INTERRUPT:
|
710 |
|
|
case BX_SOFTWARE_EXCEPTION:
|
711 |
|
|
soft_int = 1;
|
712 |
|
|
break;
|
713 |
|
|
case BX_PRIVILEGED_SOFTWARE_INTERRUPT:
|
714 |
|
|
case BX_EXTERNAL_INTERRUPT:
|
715 |
|
|
case BX_NMI:
|
716 |
|
|
case BX_HARDWARE_EXCEPTION:
|
717 |
|
|
break;
|
718 |
|
|
|
719 |
|
|
default:
|
720 |
|
|
BX_PANIC(("interrupt(): unknown exception type %d", type));
|
721 |
|
|
}
|
722 |
|
|
|
723 |
|
|
BX_DEBUG(("interrupt(): vector = %02x, TYPE = %u, EXT = %u",
|
724 |
|
|
vector, type, (unsigned) BX_CPU_THIS_PTR EXT));
|
725 |
|
|
|
726 |
|
|
// Discard any traps and inhibits for new context; traps will
|
727 |
|
|
// resume upon return.
|
728 |
|
|
BX_CPU_THIS_PTR debug_trap = 0;
|
729 |
|
|
BX_CPU_THIS_PTR inhibit_mask = 0;
|
730 |
|
|
|
731 |
|
|
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM
|
732 |
|
|
BX_CPU_THIS_PTR in_event = 1;
|
733 |
|
|
#endif
|
734 |
|
|
|
735 |
|
|
#if BX_SUPPORT_X86_64
|
736 |
|
|
if (long_mode()) {
|
737 |
|
|
long_mode_int(vector, soft_int, push_error, error_code);
|
738 |
|
|
}
|
739 |
|
|
else
|
740 |
|
|
#endif
|
741 |
|
|
{
|
742 |
|
|
RSP_SPECULATIVE;
|
743 |
|
|
|
744 |
|
|
// software interrupt can be redirefcted in v8086 mode
|
745 |
|
|
if (type != BX_SOFTWARE_INTERRUPT || !v8086_mode() || !v86_redirect_interrupt(vector))
|
746 |
|
|
{
|
747 |
|
|
if(real_mode()) {
|
748 |
|
|
real_mode_int(vector, push_error, error_code);
|
749 |
|
|
}
|
750 |
|
|
else {
|
751 |
|
|
protected_mode_int(vector, soft_int, push_error, error_code);
|
752 |
|
|
}
|
753 |
|
|
}
|
754 |
|
|
|
755 |
|
|
RSP_COMMIT;
|
756 |
|
|
}
|
757 |
|
|
|
758 |
|
|
#if BX_X86_DEBUGGER
|
759 |
|
|
BX_CPU_THIS_PTR in_repeat = 0;
|
760 |
|
|
#endif
|
761 |
|
|
|
762 |
|
|
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM
|
763 |
|
|
BX_CPU_THIS_PTR in_event = 0;
|
764 |
|
|
#endif
|
765 |
|
|
|
766 |
|
|
BX_CPU_THIS_PTR EXT = 0;
|
767 |
|
|
}
|
768 |
|
|
|
769 |
|
|
/* Exception classes. These are used as indexes into the 'is_exception_OK'
|
770 |
|
|
* array below, and are stored in the 'exception' array also
|
771 |
|
|
*/
|
772 |
|
|
#define BX_ET_BENIGN 0
|
773 |
|
|
#define BX_ET_CONTRIBUTORY 1
|
774 |
|
|
#define BX_ET_PAGE_FAULT 2
|
775 |
|
|
|
776 |
|
|
#define BX_ET_DOUBLE_FAULT 10
|
777 |
|
|
|
778 |
|
|
static const bx_bool is_exception_OK[3][3] = {
|
779 |
|
|
{ 1, 1, 1 }, /* 1st exception is BENIGN */
|
780 |
|
|
{ 1, 0, 1 }, /* 1st exception is CONTRIBUTORY */
|
781 |
|
|
{ 1, 0, 0 } /* 1st exception is PAGE_FAULT */
|
782 |
|
|
};
|
783 |
|
|
|
784 |
|
|
#define BX_EXCEPTION_CLASS_TRAP 0
|
785 |
|
|
#define BX_EXCEPTION_CLASS_FAULT 1
|
786 |
|
|
#define BX_EXCEPTION_CLASS_ABORT 2
|
787 |
|
|
|
788 |
|
|
struct BxExceptionInfo exceptions_info[BX_CPU_HANDLED_EXCEPTIONS] = {
|
789 |
|
|
/* DE */ { BX_ET_CONTRIBUTORY, BX_EXCEPTION_CLASS_FAULT, 0 },
|
790 |
|
|
/* DB */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
791 |
|
|
/* 02 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 }, // NMI
|
792 |
|
|
/* BP */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_TRAP, 0 },
|
793 |
|
|
/* OF */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_TRAP, 0 },
|
794 |
|
|
/* BR */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
795 |
|
|
/* UD */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
796 |
|
|
/* NM */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
797 |
|
|
/* DF */ { BX_ET_DOUBLE_FAULT, BX_EXCEPTION_CLASS_FAULT, 1 },
|
798 |
|
|
// coprocessor segment overrun (286,386 only)
|
799 |
|
|
/* 09 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
800 |
|
|
/* TS */ { BX_ET_CONTRIBUTORY, BX_EXCEPTION_CLASS_FAULT, 1 },
|
801 |
|
|
/* NP */ { BX_ET_CONTRIBUTORY, BX_EXCEPTION_CLASS_FAULT, 1 },
|
802 |
|
|
/* SS */ { BX_ET_CONTRIBUTORY, BX_EXCEPTION_CLASS_FAULT, 1 },
|
803 |
|
|
/* GP */ { BX_ET_CONTRIBUTORY, BX_EXCEPTION_CLASS_FAULT, 1 },
|
804 |
|
|
/* PF */ { BX_ET_PAGE_FAULT, BX_EXCEPTION_CLASS_FAULT, 1 },
|
805 |
|
|
/* 15 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 }, // reserved
|
806 |
|
|
/* MF */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
807 |
|
|
/* AC */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 1 },
|
808 |
|
|
/* MC */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_ABORT, 0 },
|
809 |
|
|
/* XM */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
810 |
|
|
/* VE */ { BX_ET_PAGE_FAULT, BX_EXCEPTION_CLASS_FAULT, 0 },
|
811 |
|
|
/* 21 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
812 |
|
|
/* 22 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
813 |
|
|
/* 23 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
814 |
|
|
/* 24 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
815 |
|
|
/* 25 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
816 |
|
|
/* 26 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
817 |
|
|
/* 27 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
818 |
|
|
/* 28 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
819 |
|
|
/* 29 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 },
|
820 |
|
|
/* 30 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 }, // FIXME: SVM #SF
|
821 |
|
|
/* 31 */ { BX_ET_BENIGN, BX_EXCEPTION_CLASS_FAULT, 0 }
|
822 |
|
|
};
|
823 |
|
|
|
824 |
|
|
// vector: 0..255: vector in IDT
|
825 |
|
|
// error_code: if exception generates and error, push this error code
|
826 |
|
|
void BX_CPU_C::exception(unsigned vector, Bit16u error_code)
|
827 |
|
|
{
|
828 |
|
|
BX_INSTR_EXCEPTION(BX_CPU_ID, vector, error_code);
|
829 |
|
|
|
830 |
|
|
#if BX_DEBUGGER
|
831 |
|
|
bx_dbg_exception(BX_CPU_ID, vector, error_code);
|
832 |
|
|
#endif
|
833 |
|
|
|
834 |
|
|
BX_DEBUG(("exception(0x%02x): error_code=%04x", vector, error_code));
|
835 |
|
|
|
836 |
|
|
unsigned exception_type = 0;
|
837 |
|
|
unsigned exception_class = BX_EXCEPTION_CLASS_FAULT;
|
838 |
|
|
bx_bool push_error = 0;
|
839 |
|
|
|
840 |
|
|
if (vector < BX_CPU_HANDLED_EXCEPTIONS) {
|
841 |
|
|
push_error = exceptions_info[vector].push_error;
|
842 |
|
|
exception_class = exceptions_info[vector].exception_class;
|
843 |
|
|
exception_type = exceptions_info[vector].exception_type;
|
844 |
|
|
}
|
845 |
|
|
else {
|
846 |
|
|
BX_PANIC(("exception(%u): bad vector", vector));
|
847 |
|
|
}
|
848 |
|
|
|
849 |
|
|
if (vector != BX_PF_EXCEPTION && vector != BX_DF_EXCEPTION) {
|
850 |
|
|
// Page faults have different format
|
851 |
|
|
error_code = (error_code & 0xfffe) | BX_CPU_THIS_PTR EXT;
|
852 |
|
|
}
|
853 |
|
|
|
854 |
|
|
#if BX_SUPPORT_VMX
|
855 |
|
|
VMexit_Event(BX_HARDWARE_EXCEPTION, vector, error_code, push_error);
|
856 |
|
|
#endif
|
857 |
|
|
|
858 |
|
|
#if BX_SUPPORT_SVM
|
859 |
|
|
SvmInterceptException(BX_HARDWARE_EXCEPTION, vector, error_code, push_error);
|
860 |
|
|
#endif
|
861 |
|
|
|
862 |
|
|
if (exception_class == BX_EXCEPTION_CLASS_FAULT)
|
863 |
|
|
{
|
864 |
|
|
// restore RIP/RSP to value before error occurred
|
865 |
|
|
RIP = BX_CPU_THIS_PTR prev_rip;
|
866 |
|
|
if (BX_CPU_THIS_PTR speculative_rsp)
|
867 |
|
|
RSP = BX_CPU_THIS_PTR prev_rsp;
|
868 |
|
|
|
869 |
|
|
if (BX_CPU_THIS_PTR last_exception_type == BX_ET_DOUBLE_FAULT)
|
870 |
|
|
{
|
871 |
|
|
debug(BX_CPU_THIS_PTR prev_rip); // print debug information to the log
|
872 |
|
|
#if BX_SUPPORT_VMX
|
873 |
|
|
VMexit_TripleFault();
|
874 |
|
|
#endif
|
875 |
|
|
#if BX_DEBUGGER
|
876 |
|
|
// trap into debugger (similar as done when PANIC occured)
|
877 |
|
|
bx_debug_break();
|
878 |
|
|
#endif
|
879 |
|
|
if (SIM->get_param_bool(BXPN_RESET_ON_TRIPLE_FAULT)->get()) {
|
880 |
|
|
BX_ERROR(("exception(): 3rd (%d) exception with no resolution, shutdown status is %02xh, resetting", vector, DEV_cmos_get_reg(0x0f)));
|
881 |
|
|
bx_pc_system.Reset(BX_RESET_HARDWARE);
|
882 |
|
|
}
|
883 |
|
|
else {
|
884 |
|
|
BX_PANIC(("exception(): 3rd (%d) exception with no resolution", vector));
|
885 |
|
|
BX_ERROR(("WARNING: Any simulation after this point is completely bogus !"));
|
886 |
|
|
shutdown();
|
887 |
|
|
}
|
888 |
|
|
longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
|
889 |
|
|
}
|
890 |
|
|
|
891 |
|
|
if (vector != BX_DB_EXCEPTION) BX_CPU_THIS_PTR assert_RF();
|
892 |
|
|
}
|
893 |
|
|
|
894 |
|
|
if (vector == BX_DB_EXCEPTION) {
|
895 |
|
|
// Commit debug events to DR6: preserve DR5.BS and DR6.BD values,
|
896 |
|
|
// only software can clear them
|
897 |
|
|
BX_CPU_THIS_PTR dr6.val32 = (BX_CPU_THIS_PTR dr6.val32 & 0xffff6ff0) |
|
898 |
|
|
(BX_CPU_THIS_PTR debug_trap & 0x0000e00f);
|
899 |
|
|
|
900 |
|
|
// clear GD flag in the DR7 prior entering debug exception handler
|
901 |
|
|
BX_CPU_THIS_PTR dr7.set_GD(0);
|
902 |
|
|
}
|
903 |
|
|
|
904 |
|
|
BX_CPU_THIS_PTR EXT = 1;
|
905 |
|
|
|
906 |
|
|
/* if we've already had 1st exception, see if 2nd causes a
|
907 |
|
|
* Double Fault instead. Otherwise, just record 1st exception.
|
908 |
|
|
*/
|
909 |
|
|
if (exception_type != BX_ET_DOUBLE_FAULT) {
|
910 |
|
|
if (! is_exception_OK[BX_CPU_THIS_PTR last_exception_type][exception_type]) {
|
911 |
|
|
exception(BX_DF_EXCEPTION, 0);
|
912 |
|
|
}
|
913 |
|
|
}
|
914 |
|
|
|
915 |
|
|
BX_CPU_THIS_PTR last_exception_type = exception_type;
|
916 |
|
|
|
917 |
|
|
if (real_mode()) {
|
918 |
|
|
push_error = 0; // not INT, no error code pushed
|
919 |
|
|
error_code = 0;
|
920 |
|
|
}
|
921 |
|
|
|
922 |
|
|
interrupt(vector, BX_HARDWARE_EXCEPTION, push_error, error_code);
|
923 |
|
|
|
924 |
|
|
BX_CPU_THIS_PTR last_exception_type = 0; // error resolved
|
925 |
|
|
|
926 |
|
|
longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
|
927 |
|
|
}
|