1 |
2 |
alfik |
////////////////////////////////////////////////////////////////////////
|
2 |
|
|
// $Id: iret.cc 11106 2012-03-25 11:54:32Z sshwarts $
|
3 |
|
|
/////////////////////////////////////////////////////////////////////////
|
4 |
|
|
//
|
5 |
|
|
// Copyright (c) 2005-2012 Stanislav Shwartsman
|
6 |
|
|
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
|
7 |
|
|
//
|
8 |
|
|
// This library is free software; you can redistribute it and/or
|
9 |
|
|
// modify it under the terms of the GNU Lesser General Public
|
10 |
|
|
// License as published by the Free Software Foundation; either
|
11 |
|
|
// version 2 of the License, or (at your option) any later version.
|
12 |
|
|
//
|
13 |
|
|
// This library is distributed in the hope that it will be useful,
|
14 |
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
16 |
|
|
// Lesser General Public License for more details.
|
17 |
|
|
//
|
18 |
|
|
// You should have received a copy of the GNU Lesser General Public
|
19 |
|
|
// License along with this library; if not, write to the Free Software
|
20 |
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
21 |
|
|
//
|
22 |
|
|
/////////////////////////////////////////////////////////////////////////
|
23 |
|
|
|
24 |
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
25 |
|
|
#include "bochs.h"
|
26 |
|
|
#include "cpu.h"
|
27 |
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
28 |
|
|
|
29 |
|
|
void BX_CPP_AttrRegparmN(1)
|
30 |
|
|
BX_CPU_C::iret_protected(bxInstruction_c *i)
|
31 |
|
|
{
|
32 |
|
|
Bit16u raw_cs_selector, raw_ss_selector;
|
33 |
|
|
bx_selector_t cs_selector, ss_selector;
|
34 |
|
|
Bit32u dword1, dword2;
|
35 |
|
|
bx_descriptor_t cs_descriptor, ss_descriptor;
|
36 |
|
|
|
37 |
|
|
#if BX_SUPPORT_X86_64
|
38 |
|
|
if (long_mode()) {
|
39 |
|
|
long_iret(i);
|
40 |
|
|
return;
|
41 |
|
|
}
|
42 |
|
|
#endif
|
43 |
|
|
|
44 |
|
|
if (BX_CPU_THIS_PTR get_NT()) /* NT = 1: RETURN FROM NESTED TASK */
|
45 |
|
|
{
|
46 |
|
|
/* what's the deal with NT & VM ? */
|
47 |
|
|
Bit16u raw_link_selector;
|
48 |
|
|
bx_selector_t link_selector;
|
49 |
|
|
bx_descriptor_t tss_descriptor;
|
50 |
|
|
|
51 |
|
|
if (BX_CPU_THIS_PTR get_VM())
|
52 |
|
|
BX_PANIC(("iret_protected: VM sholdn't be set here !"));
|
53 |
|
|
|
54 |
|
|
BX_DEBUG(("IRET: nested task return"));
|
55 |
|
|
|
56 |
|
|
if (BX_CPU_THIS_PTR tr.cache.valid==0)
|
57 |
|
|
BX_PANIC(("IRET: TR not valid"));
|
58 |
|
|
|
59 |
|
|
// examine back link selector in TSS addressed by current TR
|
60 |
|
|
raw_link_selector = system_read_word(BX_CPU_THIS_PTR tr.cache.u.segment.base);
|
61 |
|
|
|
62 |
|
|
// must specify global, else #TS(new TSS selector)
|
63 |
|
|
parse_selector(raw_link_selector, &link_selector);
|
64 |
|
|
|
65 |
|
|
if (link_selector.ti) {
|
66 |
|
|
BX_ERROR(("iret: link selector.ti=1"));
|
67 |
|
|
exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc);
|
68 |
|
|
}
|
69 |
|
|
|
70 |
|
|
// index must be within GDT limits, else #TS(new TSS selector)
|
71 |
|
|
fetch_raw_descriptor(&link_selector, &dword1, &dword2, BX_TS_EXCEPTION);
|
72 |
|
|
|
73 |
|
|
// AR byte must specify TSS, else #TS(new TSS selector)
|
74 |
|
|
// new TSS must be busy, else #TS(new TSS selector)
|
75 |
|
|
parse_descriptor(dword1, dword2, &tss_descriptor);
|
76 |
|
|
if (tss_descriptor.valid==0 || tss_descriptor.segment) {
|
77 |
|
|
BX_ERROR(("iret: TSS selector points to bad TSS"));
|
78 |
|
|
exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc);
|
79 |
|
|
}
|
80 |
|
|
if (tss_descriptor.type != BX_SYS_SEGMENT_BUSY_286_TSS &&
|
81 |
|
|
tss_descriptor.type != BX_SYS_SEGMENT_BUSY_386_TSS)
|
82 |
|
|
{
|
83 |
|
|
BX_ERROR(("iret: TSS selector points to bad TSS"));
|
84 |
|
|
exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc);
|
85 |
|
|
}
|
86 |
|
|
|
87 |
|
|
// TSS must be present, else #NP(new TSS selector)
|
88 |
|
|
if (! IS_PRESENT(tss_descriptor)) {
|
89 |
|
|
BX_ERROR(("iret: task descriptor.p == 0"));
|
90 |
|
|
exception(BX_NP_EXCEPTION, raw_link_selector & 0xfffc);
|
91 |
|
|
}
|
92 |
|
|
|
93 |
|
|
// switch tasks (without nesting) to TSS specified by back link selector
|
94 |
|
|
task_switch(i, &link_selector, &tss_descriptor,
|
95 |
|
|
BX_TASK_FROM_IRET, dword1, dword2);
|
96 |
|
|
return;
|
97 |
|
|
}
|
98 |
|
|
|
99 |
|
|
/* NT = 0: INTERRUPT RETURN ON STACK -or STACK_RETURN_TO_V86 */
|
100 |
|
|
unsigned top_nbytes_same;
|
101 |
|
|
Bit32u new_eip = 0, new_esp, temp_ESP, new_eflags = 0;
|
102 |
|
|
Bit16u new_ip = 0, new_flags = 0;
|
103 |
|
|
|
104 |
|
|
/* 16bit opsize | 32bit opsize
|
105 |
|
|
* ==============================
|
106 |
|
|
* SS eSP+8 | SS eSP+16
|
107 |
|
|
* SP eSP+6 | ESP eSP+12
|
108 |
|
|
* -------------------------------
|
109 |
|
|
* FLAGS eSP+4 | EFLAGS eSP+8
|
110 |
|
|
* CS eSP+2 | CS eSP+4
|
111 |
|
|
* IP eSP+0 | EIP eSP+0
|
112 |
|
|
*/
|
113 |
|
|
|
114 |
|
|
if (i->os32L()) {
|
115 |
|
|
top_nbytes_same = 12;
|
116 |
|
|
}
|
117 |
|
|
else {
|
118 |
|
|
top_nbytes_same = 6;
|
119 |
|
|
}
|
120 |
|
|
|
121 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
122 |
|
|
temp_ESP = ESP;
|
123 |
|
|
else
|
124 |
|
|
temp_ESP = SP;
|
125 |
|
|
|
126 |
|
|
if (i->os32L()) {
|
127 |
|
|
new_eflags = stack_read_dword(temp_ESP + 8);
|
128 |
|
|
raw_cs_selector = (Bit16u) stack_read_dword(temp_ESP + 4);
|
129 |
|
|
new_eip = stack_read_dword(temp_ESP + 0);
|
130 |
|
|
|
131 |
|
|
// if VM=1 in flags image on stack then STACK_RETURN_TO_V86
|
132 |
|
|
if (new_eflags & EFlagsVMMask) {
|
133 |
|
|
if (CPL == 0) {
|
134 |
|
|
stack_return_to_v86(new_eip, raw_cs_selector, new_eflags);
|
135 |
|
|
return;
|
136 |
|
|
}
|
137 |
|
|
else BX_INFO(("iret: VM set on stack, CPL!=0"));
|
138 |
|
|
}
|
139 |
|
|
}
|
140 |
|
|
else {
|
141 |
|
|
new_flags = stack_read_word(temp_ESP + 4);
|
142 |
|
|
raw_cs_selector = stack_read_word(temp_ESP + 2);
|
143 |
|
|
new_ip = stack_read_word(temp_ESP + 0);
|
144 |
|
|
}
|
145 |
|
|
|
146 |
|
|
parse_selector(raw_cs_selector, &cs_selector);
|
147 |
|
|
|
148 |
|
|
// return CS selector must be non-null, else #GP(0)
|
149 |
|
|
if ((raw_cs_selector & 0xfffc) == 0) {
|
150 |
|
|
BX_ERROR(("iret: return CS selector null"));
|
151 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
152 |
|
|
}
|
153 |
|
|
|
154 |
|
|
// selector index must be within descriptor table limits,
|
155 |
|
|
// else #GP(return selector)
|
156 |
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
157 |
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
158 |
|
|
|
159 |
|
|
// return CS selector RPL must be >= CPL, else #GP(return selector)
|
160 |
|
|
if (cs_selector.rpl < CPL) {
|
161 |
|
|
BX_ERROR(("iret: return selector RPL < CPL"));
|
162 |
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc);
|
163 |
|
|
}
|
164 |
|
|
|
165 |
|
|
// check code-segment descriptor
|
166 |
|
|
check_cs(&cs_descriptor, raw_cs_selector, 0, cs_selector.rpl);
|
167 |
|
|
|
168 |
|
|
if (cs_selector.rpl == CPL) { /* INTERRUPT RETURN TO SAME LEVEL */
|
169 |
|
|
/* top 6/12 bytes on stack must be within limits, else #SS(0) */
|
170 |
|
|
/* satisfied above */
|
171 |
|
|
if (i->os32L()) {
|
172 |
|
|
/* load CS-cache with new code segment descriptor */
|
173 |
|
|
branch_far32(&cs_selector, &cs_descriptor, new_eip, cs_selector.rpl);
|
174 |
|
|
|
175 |
|
|
// ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
|
176 |
|
|
Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask |
|
177 |
|
|
EFlagsDFMask | EFlagsNTMask | EFlagsRFMask;
|
178 |
|
|
#if BX_CPU_LEVEL >= 4
|
179 |
|
|
changeMask |= (EFlagsIDMask | EFlagsACMask); // ID/AC
|
180 |
|
|
#endif
|
181 |
|
|
if (CPL <= BX_CPU_THIS_PTR get_IOPL())
|
182 |
|
|
changeMask |= EFlagsIFMask;
|
183 |
|
|
if (CPL == 0)
|
184 |
|
|
changeMask |= EFlagsVIPMask | EFlagsVIFMask | EFlagsIOPLMask;
|
185 |
|
|
|
186 |
|
|
// IF only changed if (CPL <= EFLAGS.IOPL)
|
187 |
|
|
// VIF, VIP, IOPL only changed if CPL == 0
|
188 |
|
|
// VM unaffected
|
189 |
|
|
writeEFlags(new_eflags, changeMask);
|
190 |
|
|
}
|
191 |
|
|
else {
|
192 |
|
|
/* load CS-cache with new code segment descriptor */
|
193 |
|
|
branch_far32(&cs_selector, &cs_descriptor, (Bit32u) new_ip, cs_selector.rpl);
|
194 |
|
|
|
195 |
|
|
/* load flags with third word on stack */
|
196 |
|
|
write_flags(new_flags, CPL==0, CPL<=BX_CPU_THIS_PTR get_IOPL());
|
197 |
|
|
}
|
198 |
|
|
|
199 |
|
|
/* increment stack by 6/12 */
|
200 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
201 |
|
|
ESP += top_nbytes_same;
|
202 |
|
|
else
|
203 |
|
|
SP += top_nbytes_same;
|
204 |
|
|
return;
|
205 |
|
|
}
|
206 |
|
|
else { /* INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL */
|
207 |
|
|
|
208 |
|
|
/* 16bit opsize | 32bit opsize
|
209 |
|
|
* ==============================
|
210 |
|
|
* SS eSP+8 | SS eSP+16
|
211 |
|
|
* SP eSP+6 | ESP eSP+12
|
212 |
|
|
* FLAGS eSP+4 | EFLAGS eSP+8
|
213 |
|
|
* CS eSP+2 | CS eSP+4
|
214 |
|
|
* IP eSP+0 | EIP eSP+0
|
215 |
|
|
*/
|
216 |
|
|
|
217 |
|
|
/* examine return SS selector and associated descriptor */
|
218 |
|
|
if (i->os32L()) {
|
219 |
|
|
raw_ss_selector = stack_read_word(temp_ESP + 16);
|
220 |
|
|
}
|
221 |
|
|
else {
|
222 |
|
|
raw_ss_selector = stack_read_word(temp_ESP + 8);
|
223 |
|
|
}
|
224 |
|
|
|
225 |
|
|
/* selector must be non-null, else #GP(0) */
|
226 |
|
|
if ((raw_ss_selector & 0xfffc) == 0) {
|
227 |
|
|
BX_ERROR(("iret: SS selector null"));
|
228 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
229 |
|
|
}
|
230 |
|
|
|
231 |
|
|
parse_selector(raw_ss_selector, &ss_selector);
|
232 |
|
|
|
233 |
|
|
/* selector RPL must = RPL of return CS selector,
|
234 |
|
|
* else #GP(SS selector) */
|
235 |
|
|
if (ss_selector.rpl != cs_selector.rpl) {
|
236 |
|
|
BX_ERROR(("iret: SS.rpl != CS.rpl"));
|
237 |
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
|
238 |
|
|
}
|
239 |
|
|
|
240 |
|
|
/* selector index must be within its descriptor table limits,
|
241 |
|
|
* else #GP(SS selector) */
|
242 |
|
|
fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
243 |
|
|
|
244 |
|
|
parse_descriptor(dword1, dword2, &ss_descriptor);
|
245 |
|
|
|
246 |
|
|
/* AR byte must indicate a writable data segment,
|
247 |
|
|
* else #GP(SS selector) */
|
248 |
|
|
if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
|
249 |
|
|
IS_CODE_SEGMENT(ss_descriptor.type) ||
|
250 |
|
|
!IS_DATA_SEGMENT_WRITEABLE(ss_descriptor.type))
|
251 |
|
|
{
|
252 |
|
|
BX_ERROR(("iret: SS AR byte not writable or code segment"));
|
253 |
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
|
254 |
|
|
}
|
255 |
|
|
|
256 |
|
|
/* stack segment DPL must equal the RPL of the return CS selector,
|
257 |
|
|
* else #GP(SS selector) */
|
258 |
|
|
if (ss_descriptor.dpl != cs_selector.rpl) {
|
259 |
|
|
BX_ERROR(("iret: SS.dpl != CS selector RPL"));
|
260 |
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
|
261 |
|
|
}
|
262 |
|
|
|
263 |
|
|
/* SS must be present, else #NP(SS selector) */
|
264 |
|
|
if (! IS_PRESENT(ss_descriptor)) {
|
265 |
|
|
BX_ERROR(("iret: SS not present!"));
|
266 |
|
|
exception(BX_NP_EXCEPTION, raw_ss_selector & 0xfffc);
|
267 |
|
|
}
|
268 |
|
|
|
269 |
|
|
if (i->os32L()) {
|
270 |
|
|
new_esp = stack_read_dword(temp_ESP + 12);
|
271 |
|
|
new_eflags = stack_read_dword(temp_ESP + 8);
|
272 |
|
|
new_eip = stack_read_dword(temp_ESP + 0);
|
273 |
|
|
}
|
274 |
|
|
else {
|
275 |
|
|
new_esp = stack_read_word(temp_ESP + 6);
|
276 |
|
|
new_eflags = stack_read_word(temp_ESP + 4);
|
277 |
|
|
new_eip = stack_read_word(temp_ESP + 0);
|
278 |
|
|
}
|
279 |
|
|
|
280 |
|
|
// ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
|
281 |
|
|
Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask |
|
282 |
|
|
EFlagsDFMask | EFlagsNTMask | EFlagsRFMask;
|
283 |
|
|
#if BX_CPU_LEVEL >= 4
|
284 |
|
|
changeMask |= (EFlagsIDMask | EFlagsACMask); // ID/AC
|
285 |
|
|
#endif
|
286 |
|
|
if (CPL <= BX_CPU_THIS_PTR get_IOPL())
|
287 |
|
|
changeMask |= EFlagsIFMask;
|
288 |
|
|
if (CPL == 0)
|
289 |
|
|
changeMask |= EFlagsVIPMask | EFlagsVIFMask | EFlagsIOPLMask;
|
290 |
|
|
|
291 |
|
|
if (! i->os32L()) // 16 bit
|
292 |
|
|
changeMask &= 0xffff;
|
293 |
|
|
|
294 |
|
|
/* load CS:EIP from stack */
|
295 |
|
|
/* load the CS-cache with CS descriptor */
|
296 |
|
|
/* set CPL to the RPL of the return CS selector */
|
297 |
|
|
branch_far32(&cs_selector, &cs_descriptor, new_eip, cs_selector.rpl);
|
298 |
|
|
|
299 |
|
|
// IF only changed if (prev_CPL <= EFLAGS.IOPL)
|
300 |
|
|
// VIF, VIP, IOPL only changed if prev_CPL == 0
|
301 |
|
|
// VM unaffected
|
302 |
|
|
writeEFlags(new_eflags, changeMask);
|
303 |
|
|
|
304 |
|
|
// load SS:eSP from stack
|
305 |
|
|
// load the SS-cache with SS descriptor
|
306 |
|
|
load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
|
307 |
|
|
if (ss_descriptor.u.segment.d_b)
|
308 |
|
|
ESP = new_esp;
|
309 |
|
|
else
|
310 |
|
|
SP = new_esp;
|
311 |
|
|
|
312 |
|
|
validate_seg_regs();
|
313 |
|
|
}
|
314 |
|
|
}
|
315 |
|
|
|
316 |
|
|
#if BX_SUPPORT_X86_64
|
317 |
|
|
void BX_CPP_AttrRegparmN(1)
|
318 |
|
|
BX_CPU_C::long_iret(bxInstruction_c *i)
|
319 |
|
|
{
|
320 |
|
|
Bit16u raw_cs_selector, raw_ss_selector;
|
321 |
|
|
bx_selector_t cs_selector, ss_selector;
|
322 |
|
|
Bit32u dword1, dword2;
|
323 |
|
|
bx_descriptor_t cs_descriptor, ss_descriptor;
|
324 |
|
|
Bit32u new_eflags;
|
325 |
|
|
Bit64u new_rip, new_rsp, temp_RSP;
|
326 |
|
|
|
327 |
|
|
BX_DEBUG (("LONG MODE IRET"));
|
328 |
|
|
|
329 |
|
|
if (BX_CPU_THIS_PTR get_NT()) {
|
330 |
|
|
BX_ERROR(("iret64: return from nested task in x86-64 mode !"));
|
331 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
332 |
|
|
}
|
333 |
|
|
|
334 |
|
|
/* 64bit opsize
|
335 |
|
|
* ============
|
336 |
|
|
* SS eSP+32
|
337 |
|
|
* ESP eSP+24
|
338 |
|
|
* -------------
|
339 |
|
|
* EFLAGS eSP+16
|
340 |
|
|
* CS eSP+8
|
341 |
|
|
* EIP eSP+0
|
342 |
|
|
*/
|
343 |
|
|
|
344 |
|
|
if (long64_mode()) temp_RSP = RSP;
|
345 |
|
|
else {
|
346 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) temp_RSP = ESP;
|
347 |
|
|
else temp_RSP = SP;
|
348 |
|
|
}
|
349 |
|
|
|
350 |
|
|
unsigned top_nbytes_same = 0; /* stop compiler warnings */
|
351 |
|
|
|
352 |
|
|
#if BX_SUPPORT_X86_64
|
353 |
|
|
if (i->os64L()) {
|
354 |
|
|
new_eflags = (Bit32u) stack_read_qword(temp_RSP + 16);
|
355 |
|
|
raw_cs_selector = (Bit16u) stack_read_qword(temp_RSP + 8);
|
356 |
|
|
new_rip = stack_read_qword(temp_RSP + 0);
|
357 |
|
|
top_nbytes_same = 24;
|
358 |
|
|
}
|
359 |
|
|
else
|
360 |
|
|
#endif
|
361 |
|
|
if (i->os32L()) {
|
362 |
|
|
new_eflags = stack_read_dword(temp_RSP + 8);
|
363 |
|
|
raw_cs_selector = (Bit16u) stack_read_dword(temp_RSP + 4);
|
364 |
|
|
new_rip = (Bit64u) stack_read_dword(temp_RSP + 0);
|
365 |
|
|
top_nbytes_same = 12;
|
366 |
|
|
}
|
367 |
|
|
else {
|
368 |
|
|
new_eflags = stack_read_word(temp_RSP + 4);
|
369 |
|
|
raw_cs_selector = stack_read_word(temp_RSP + 2);
|
370 |
|
|
new_rip = (Bit64u) stack_read_word(temp_RSP + 0);
|
371 |
|
|
top_nbytes_same = 6;
|
372 |
|
|
}
|
373 |
|
|
|
374 |
|
|
// ignore VM flag in long mode
|
375 |
|
|
new_eflags &= ~EFlagsVMMask;
|
376 |
|
|
|
377 |
|
|
parse_selector(raw_cs_selector, &cs_selector);
|
378 |
|
|
|
379 |
|
|
// return CS selector must be non-null, else #GP(0)
|
380 |
|
|
if ((raw_cs_selector & 0xfffc) == 0) {
|
381 |
|
|
BX_ERROR(("iret64: return CS selector null"));
|
382 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
383 |
|
|
}
|
384 |
|
|
|
385 |
|
|
// selector index must be within descriptor table limits,
|
386 |
|
|
// else #GP(return selector)
|
387 |
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
388 |
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
389 |
|
|
|
390 |
|
|
// return CS selector RPL must be >= CPL, else #GP(return selector)
|
391 |
|
|
if (cs_selector.rpl < CPL) {
|
392 |
|
|
BX_ERROR(("iret64: return selector RPL < CPL"));
|
393 |
|
|
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc);
|
394 |
|
|
}
|
395 |
|
|
|
396 |
|
|
// check code-segment descriptor
|
397 |
|
|
check_cs(&cs_descriptor, raw_cs_selector, 0, cs_selector.rpl);
|
398 |
|
|
|
399 |
|
|
/* INTERRUPT RETURN TO SAME PRIVILEGE LEVEL */
|
400 |
|
|
if (cs_selector.rpl == CPL && !i->os64L())
|
401 |
|
|
{
|
402 |
|
|
/* top 24 bytes on stack must be within limits, else #SS(0) */
|
403 |
|
|
/* satisfied above */
|
404 |
|
|
|
405 |
|
|
/* load CS:EIP from stack */
|
406 |
|
|
/* load CS-cache with new code segment descriptor */
|
407 |
|
|
if(cs_descriptor.u.segment.l) {
|
408 |
|
|
branch_far64(&cs_selector, &cs_descriptor, new_rip, CPL);
|
409 |
|
|
}
|
410 |
|
|
else {
|
411 |
|
|
branch_far32(&cs_selector, &cs_descriptor, (Bit32u) new_rip, CPL);
|
412 |
|
|
}
|
413 |
|
|
|
414 |
|
|
// ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
|
415 |
|
|
Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask | EFlagsDFMask |
|
416 |
|
|
EFlagsNTMask | EFlagsRFMask | EFlagsIDMask | EFlagsACMask;
|
417 |
|
|
if (CPL <= BX_CPU_THIS_PTR get_IOPL())
|
418 |
|
|
changeMask |= EFlagsIFMask;
|
419 |
|
|
if (CPL == 0)
|
420 |
|
|
changeMask |= EFlagsVIPMask | EFlagsVIFMask | EFlagsIOPLMask;
|
421 |
|
|
|
422 |
|
|
if (! i->os32L()) // 16 bit
|
423 |
|
|
changeMask &= 0xffff;
|
424 |
|
|
|
425 |
|
|
// IF only changed if (CPL <= EFLAGS.IOPL)
|
426 |
|
|
// VIF, VIP, IOPL only changed if CPL == 0
|
427 |
|
|
// VM unaffected
|
428 |
|
|
writeEFlags(new_eflags, changeMask);
|
429 |
|
|
|
430 |
|
|
/* we are NOT in 64-bit mode */
|
431 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
432 |
|
|
ESP += top_nbytes_same;
|
433 |
|
|
else
|
434 |
|
|
SP += top_nbytes_same;
|
435 |
|
|
}
|
436 |
|
|
else { /* INTERRUPT RETURN TO OUTER PRIVILEGE LEVEL or 64 BIT MODE */
|
437 |
|
|
/* 64bit opsize
|
438 |
|
|
* ============
|
439 |
|
|
* SS eSP+32
|
440 |
|
|
* ESP eSP+24
|
441 |
|
|
* EFLAGS eSP+16
|
442 |
|
|
* CS eSP+8
|
443 |
|
|
* EIP eSP+0
|
444 |
|
|
*/
|
445 |
|
|
|
446 |
|
|
/* examine return SS selector and associated descriptor */
|
447 |
|
|
#if BX_SUPPORT_X86_64
|
448 |
|
|
if (i->os64L()) {
|
449 |
|
|
raw_ss_selector = (Bit16u) stack_read_qword(temp_RSP + 32);
|
450 |
|
|
new_rsp = stack_read_qword(temp_RSP + 24);
|
451 |
|
|
}
|
452 |
|
|
else
|
453 |
|
|
#endif
|
454 |
|
|
{
|
455 |
|
|
if (i->os32L()) {
|
456 |
|
|
raw_ss_selector = (Bit16u) stack_read_dword(temp_RSP + 16);
|
457 |
|
|
new_rsp = (Bit64u) stack_read_dword(temp_RSP + 12);
|
458 |
|
|
}
|
459 |
|
|
else {
|
460 |
|
|
raw_ss_selector = stack_read_word(temp_RSP + 8);
|
461 |
|
|
new_rsp = (Bit64u) stack_read_word(temp_RSP + 6);
|
462 |
|
|
}
|
463 |
|
|
}
|
464 |
|
|
|
465 |
|
|
if ((raw_ss_selector & 0xfffc) == 0) {
|
466 |
|
|
if (! IS_LONG64_SEGMENT(cs_descriptor) || cs_selector.rpl == 3) {
|
467 |
|
|
BX_ERROR(("iret64: SS selector null"));
|
468 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
469 |
|
|
}
|
470 |
|
|
}
|
471 |
|
|
else {
|
472 |
|
|
parse_selector(raw_ss_selector, &ss_selector);
|
473 |
|
|
|
474 |
|
|
/* selector RPL must = RPL of return CS selector,
|
475 |
|
|
* else #GP(SS selector) */
|
476 |
|
|
if (ss_selector.rpl != cs_selector.rpl) {
|
477 |
|
|
BX_ERROR(("iret64: SS.rpl != CS.rpl"));
|
478 |
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
|
479 |
|
|
}
|
480 |
|
|
|
481 |
|
|
/* selector index must be within its descriptor table limits,
|
482 |
|
|
* else #GP(SS selector) */
|
483 |
|
|
fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
484 |
|
|
parse_descriptor(dword1, dword2, &ss_descriptor);
|
485 |
|
|
|
486 |
|
|
/* AR byte must indicate a writable data segment,
|
487 |
|
|
* else #GP(SS selector) */
|
488 |
|
|
if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
|
489 |
|
|
IS_CODE_SEGMENT(ss_descriptor.type) ||
|
490 |
|
|
!IS_DATA_SEGMENT_WRITEABLE(ss_descriptor.type))
|
491 |
|
|
{
|
492 |
|
|
BX_ERROR(("iret64: SS AR byte not writable or code segment"));
|
493 |
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
|
494 |
|
|
}
|
495 |
|
|
|
496 |
|
|
/* stack segment DPL must equal the RPL of the return CS selector,
|
497 |
|
|
* else #GP(SS selector) */
|
498 |
|
|
if (ss_descriptor.dpl != cs_selector.rpl) {
|
499 |
|
|
BX_ERROR(("iret64: SS.dpl != CS selector RPL"));
|
500 |
|
|
exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc);
|
501 |
|
|
}
|
502 |
|
|
|
503 |
|
|
/* SS must be present, else #NP(SS selector) */
|
504 |
|
|
if (! IS_PRESENT(ss_descriptor)) {
|
505 |
|
|
BX_ERROR(("iret64: SS not present!"));
|
506 |
|
|
exception(BX_NP_EXCEPTION, raw_ss_selector & 0xfffc);
|
507 |
|
|
}
|
508 |
|
|
}
|
509 |
|
|
|
510 |
|
|
Bit8u prev_cpl = CPL; /* previous CPL */
|
511 |
|
|
|
512 |
|
|
// ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
|
513 |
|
|
Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask | EFlagsDFMask |
|
514 |
|
|
EFlagsNTMask | EFlagsRFMask | EFlagsIDMask | EFlagsACMask;
|
515 |
|
|
if (prev_cpl <= BX_CPU_THIS_PTR get_IOPL())
|
516 |
|
|
changeMask |= EFlagsIFMask;
|
517 |
|
|
if (prev_cpl == 0)
|
518 |
|
|
changeMask |= EFlagsVIPMask | EFlagsVIFMask | EFlagsIOPLMask;
|
519 |
|
|
|
520 |
|
|
if (! i->os32L()) // 16 bit
|
521 |
|
|
changeMask &= 0xffff;
|
522 |
|
|
|
523 |
|
|
/* set CPL to the RPL of the return CS selector */
|
524 |
|
|
branch_far64(&cs_selector, &cs_descriptor, new_rip, cs_selector.rpl);
|
525 |
|
|
|
526 |
|
|
// IF only changed if (prev_CPL <= EFLAGS.IOPL)
|
527 |
|
|
// VIF, VIP, IOPL only changed if prev_CPL == 0
|
528 |
|
|
// VM unaffected
|
529 |
|
|
writeEFlags(new_eflags, changeMask);
|
530 |
|
|
|
531 |
|
|
if ((raw_ss_selector & 0xfffc) != 0) {
|
532 |
|
|
// load SS:RSP from stack
|
533 |
|
|
// load the SS-cache with SS descriptor
|
534 |
|
|
load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
|
535 |
|
|
}
|
536 |
|
|
else {
|
537 |
|
|
// we are in 64-bit mode !
|
538 |
|
|
load_null_selector(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS], raw_ss_selector);
|
539 |
|
|
}
|
540 |
|
|
|
541 |
|
|
if (long64_mode()) RSP = new_rsp;
|
542 |
|
|
else {
|
543 |
|
|
if (ss_descriptor.u.segment.d_b) ESP = (Bit32u) new_rsp;
|
544 |
|
|
else SP = (Bit16u) new_rsp;
|
545 |
|
|
}
|
546 |
|
|
|
547 |
|
|
if (prev_cpl != CPL) validate_seg_regs();
|
548 |
|
|
}
|
549 |
|
|
}
|
550 |
|
|
#endif
|