1 |
2 |
alfik |
////////////////////////////////////////////////////////////////////////
|
2 |
|
|
// $Id: call_far.cc 11106 2012-03-25 11:54:32Z sshwarts $
|
3 |
|
|
/////////////////////////////////////////////////////////////////////////
|
4 |
|
|
//
|
5 |
|
|
// Copyright (c) 2005-2012 Stanislav Shwartsman
|
6 |
|
|
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
|
7 |
|
|
//
|
8 |
|
|
// This library is free software; you can redistribute it and/or
|
9 |
|
|
// modify it under the terms of the GNU Lesser General Public
|
10 |
|
|
// License as published by the Free Software Foundation; either
|
11 |
|
|
// version 2 of the License, or (at your option) any later version.
|
12 |
|
|
//
|
13 |
|
|
// This library is distributed in the hope that it will be useful,
|
14 |
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
16 |
|
|
// Lesser General Public License for more details.
|
17 |
|
|
//
|
18 |
|
|
// You should have received a copy of the GNU Lesser General Public
|
19 |
|
|
// License along with this library; if not, write to the Free Software
|
20 |
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
21 |
|
|
//
|
22 |
|
|
/////////////////////////////////////////////////////////////////////////
|
23 |
|
|
|
24 |
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
25 |
|
|
#include "bochs.h"
|
26 |
|
|
#include "cpu.h"
|
27 |
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
28 |
|
|
|
29 |
|
|
void BX_CPP_AttrRegparmN(3)
|
30 |
|
|
BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
|
31 |
|
|
{
|
32 |
|
|
bx_selector_t cs_selector;
|
33 |
|
|
Bit32u dword1, dword2;
|
34 |
|
|
bx_descriptor_t cs_descriptor;
|
35 |
|
|
|
36 |
|
|
/* new cs selector must not be null, else #GP(0) */
|
37 |
|
|
if ((cs_raw & 0xfffc) == 0) {
|
38 |
|
|
BX_ERROR(("call_protected: CS selector null"));
|
39 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
40 |
|
|
}
|
41 |
|
|
|
42 |
|
|
parse_selector(cs_raw, &cs_selector);
|
43 |
|
|
// check new CS selector index within its descriptor limits,
|
44 |
|
|
// else #GP(new CS selector)
|
45 |
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
46 |
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
47 |
|
|
|
48 |
|
|
// examine AR byte of selected descriptor for various legal values
|
49 |
|
|
if (cs_descriptor.valid==0) {
|
50 |
|
|
BX_ERROR(("call_protected: invalid CS descriptor"));
|
51 |
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
|
52 |
|
|
}
|
53 |
|
|
|
54 |
|
|
if (cs_descriptor.segment) // normal segment
|
55 |
|
|
{
|
56 |
|
|
check_cs(&cs_descriptor, cs_raw, BX_SELECTOR_RPL(cs_raw), CPL);
|
57 |
|
|
|
58 |
|
|
#if BX_SUPPORT_X86_64
|
59 |
|
|
if (long_mode() && cs_descriptor.u.segment.l) {
|
60 |
|
|
Bit64u temp_rsp = RSP;
|
61 |
|
|
// moving to long mode, push return address onto 64-bit stack
|
62 |
|
|
if (i->os64L()) {
|
63 |
|
|
write_new_stack_qword_64(temp_rsp - 8, cs_descriptor.dpl,
|
64 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
65 |
|
|
write_new_stack_qword_64(temp_rsp - 16, cs_descriptor.dpl, RIP);
|
66 |
|
|
temp_rsp -= 16;
|
67 |
|
|
}
|
68 |
|
|
else if (i->os32L()) {
|
69 |
|
|
write_new_stack_dword_64(temp_rsp - 4, cs_descriptor.dpl,
|
70 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
71 |
|
|
write_new_stack_dword_64(temp_rsp - 8, cs_descriptor.dpl, EIP);
|
72 |
|
|
temp_rsp -= 8;
|
73 |
|
|
}
|
74 |
|
|
else {
|
75 |
|
|
write_new_stack_word_64(temp_rsp - 2, cs_descriptor.dpl,
|
76 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
77 |
|
|
write_new_stack_word_64(temp_rsp - 4, cs_descriptor.dpl, IP);
|
78 |
|
|
temp_rsp -= 4;
|
79 |
|
|
}
|
80 |
|
|
|
81 |
|
|
// load code segment descriptor into CS cache
|
82 |
|
|
// load CS with new code segment selector
|
83 |
|
|
// set RPL of CS to CPL
|
84 |
|
|
branch_far64(&cs_selector, &cs_descriptor, disp, CPL);
|
85 |
|
|
|
86 |
|
|
RSP = temp_rsp;
|
87 |
|
|
}
|
88 |
|
|
else
|
89 |
|
|
#endif
|
90 |
|
|
{
|
91 |
|
|
Bit32u temp_RSP;
|
92 |
|
|
|
93 |
|
|
// moving to legacy mode, push return address onto 32-bit stack
|
94 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
95 |
|
|
temp_RSP = ESP;
|
96 |
|
|
else
|
97 |
|
|
temp_RSP = SP;
|
98 |
|
|
|
99 |
|
|
#if BX_SUPPORT_X86_64
|
100 |
|
|
if (i->os64L()) {
|
101 |
|
|
write_new_stack_qword_32(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
|
102 |
|
|
temp_RSP - 8, cs_descriptor.dpl,
|
103 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
104 |
|
|
write_new_stack_qword_32(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
|
105 |
|
|
temp_RSP - 16, cs_descriptor.dpl, RIP);
|
106 |
|
|
temp_RSP -= 16;
|
107 |
|
|
}
|
108 |
|
|
else
|
109 |
|
|
#endif
|
110 |
|
|
if (i->os32L()) {
|
111 |
|
|
write_new_stack_dword_32(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
|
112 |
|
|
temp_RSP - 4, cs_descriptor.dpl,
|
113 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
114 |
|
|
write_new_stack_dword_32(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
|
115 |
|
|
temp_RSP - 8, cs_descriptor.dpl, EIP);
|
116 |
|
|
temp_RSP -= 8;
|
117 |
|
|
}
|
118 |
|
|
else {
|
119 |
|
|
write_new_stack_word_32(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
|
120 |
|
|
temp_RSP - 2, cs_descriptor.dpl,
|
121 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
122 |
|
|
write_new_stack_word_32(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS],
|
123 |
|
|
temp_RSP - 4, cs_descriptor.dpl, IP);
|
124 |
|
|
temp_RSP -= 4;
|
125 |
|
|
}
|
126 |
|
|
|
127 |
|
|
// load code segment descriptor into CS cache
|
128 |
|
|
// load CS with new code segment selector
|
129 |
|
|
// set RPL of CS to CPL
|
130 |
|
|
branch_far64(&cs_selector, &cs_descriptor, disp, CPL);
|
131 |
|
|
|
132 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
133 |
|
|
ESP = (Bit32u) temp_RSP;
|
134 |
|
|
else
|
135 |
|
|
SP = (Bit16u) temp_RSP;
|
136 |
|
|
}
|
137 |
|
|
|
138 |
|
|
return;
|
139 |
|
|
}
|
140 |
|
|
else { // gate & special segment
|
141 |
|
|
bx_descriptor_t gate_descriptor = cs_descriptor;
|
142 |
|
|
bx_selector_t gate_selector = cs_selector;
|
143 |
|
|
|
144 |
|
|
// descriptor DPL must be >= CPL else #GP(gate selector)
|
145 |
|
|
if (gate_descriptor.dpl < CPL) {
|
146 |
|
|
BX_ERROR(("call_protected: descriptor.dpl < CPL"));
|
147 |
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
|
148 |
|
|
}
|
149 |
|
|
|
150 |
|
|
// descriptor DPL must be >= gate selector RPL else #GP(gate selector)
|
151 |
|
|
if (gate_descriptor.dpl < gate_selector.rpl) {
|
152 |
|
|
BX_ERROR(("call_protected: descriptor.dpl < selector.rpl"));
|
153 |
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
|
154 |
|
|
}
|
155 |
|
|
|
156 |
|
|
#if BX_SUPPORT_X86_64
|
157 |
|
|
if (long_mode()) {
|
158 |
|
|
// call gate type is higher priority than non-present bit check
|
159 |
|
|
if (gate_descriptor.type != BX_386_CALL_GATE) {
|
160 |
|
|
BX_ERROR(("call_protected: gate type %u unsupported in long mode", (unsigned) gate_descriptor.type));
|
161 |
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
|
162 |
|
|
}
|
163 |
|
|
// gate descriptor must be present else #NP(gate selector)
|
164 |
|
|
if (! IS_PRESENT(gate_descriptor)) {
|
165 |
|
|
BX_ERROR(("call_protected: call gate not present"));
|
166 |
|
|
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc);
|
167 |
|
|
}
|
168 |
|
|
|
169 |
|
|
call_gate64(&gate_selector);
|
170 |
|
|
return;
|
171 |
|
|
}
|
172 |
|
|
#endif
|
173 |
|
|
|
174 |
|
|
switch (gate_descriptor.type) {
|
175 |
|
|
case BX_SYS_SEGMENT_AVAIL_286_TSS:
|
176 |
|
|
case BX_SYS_SEGMENT_AVAIL_386_TSS:
|
177 |
|
|
if (gate_descriptor.type==BX_SYS_SEGMENT_AVAIL_286_TSS)
|
178 |
|
|
BX_DEBUG(("call_protected: 16bit available TSS"));
|
179 |
|
|
else
|
180 |
|
|
BX_DEBUG(("call_protected: 32bit available TSS"));
|
181 |
|
|
|
182 |
|
|
if (gate_descriptor.valid==0 || gate_selector.ti) {
|
183 |
|
|
BX_ERROR(("call_protected: call bad TSS selector !"));
|
184 |
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
|
185 |
|
|
}
|
186 |
|
|
|
187 |
|
|
// TSS must be present, else #NP(TSS selector)
|
188 |
|
|
if (! IS_PRESENT(gate_descriptor)) {
|
189 |
|
|
BX_ERROR(("call_protected: call not present TSS !"));
|
190 |
|
|
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc);
|
191 |
|
|
}
|
192 |
|
|
|
193 |
|
|
// SWITCH_TASKS _without_ nesting to TSS
|
194 |
|
|
task_switch(i, &gate_selector, &gate_descriptor,
|
195 |
|
|
BX_TASK_FROM_CALL, dword1, dword2);
|
196 |
|
|
return;
|
197 |
|
|
|
198 |
|
|
case BX_TASK_GATE:
|
199 |
|
|
task_gate(i, &gate_selector, &gate_descriptor, BX_TASK_FROM_CALL);
|
200 |
|
|
return;
|
201 |
|
|
|
202 |
|
|
case BX_286_CALL_GATE:
|
203 |
|
|
case BX_386_CALL_GATE:
|
204 |
|
|
// gate descriptor must be present else #NP(gate selector)
|
205 |
|
|
if (! IS_PRESENT(gate_descriptor)) {
|
206 |
|
|
BX_ERROR(("call_protected: gate not present"));
|
207 |
|
|
exception(BX_NP_EXCEPTION, cs_raw & 0xfffc);
|
208 |
|
|
}
|
209 |
|
|
call_gate(&gate_descriptor);
|
210 |
|
|
return;
|
211 |
|
|
|
212 |
|
|
default: // can't get here
|
213 |
|
|
BX_ERROR(("call_protected(): gate.type(%u) unsupported", (unsigned) gate_descriptor.type));
|
214 |
|
|
exception(BX_GP_EXCEPTION, cs_raw & 0xfffc);
|
215 |
|
|
}
|
216 |
|
|
}
|
217 |
|
|
}
|
218 |
|
|
|
219 |
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::call_gate(bx_descriptor_t *gate_descriptor)
|
220 |
|
|
{
|
221 |
|
|
bx_selector_t cs_selector;
|
222 |
|
|
Bit32u dword1, dword2;
|
223 |
|
|
bx_descriptor_t cs_descriptor;
|
224 |
|
|
|
225 |
|
|
// examine code segment selector in call gate descriptor
|
226 |
|
|
BX_DEBUG(("call_protected: call gate"));
|
227 |
|
|
|
228 |
|
|
Bit16u dest_selector = gate_descriptor->u.gate.dest_selector;
|
229 |
|
|
Bit32u new_EIP = gate_descriptor->u.gate.dest_offset;
|
230 |
|
|
|
231 |
|
|
// selector must not be null else #GP(0)
|
232 |
|
|
if ((dest_selector & 0xfffc) == 0) {
|
233 |
|
|
BX_ERROR(("call_protected: selector in gate null"));
|
234 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
235 |
|
|
}
|
236 |
|
|
|
237 |
|
|
parse_selector(dest_selector, &cs_selector);
|
238 |
|
|
// selector must be within its descriptor table limits,
|
239 |
|
|
// else #GP(code segment selector)
|
240 |
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
241 |
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
242 |
|
|
|
243 |
|
|
// AR byte of selected descriptor must indicate code segment,
|
244 |
|
|
// else #GP(code segment selector)
|
245 |
|
|
// DPL of selected descriptor must be <= CPL,
|
246 |
|
|
// else #GP(code segment selector)
|
247 |
|
|
if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
|
248 |
|
|
IS_DATA_SEGMENT(cs_descriptor.type) || cs_descriptor.dpl > CPL)
|
249 |
|
|
{
|
250 |
|
|
BX_ERROR(("call_protected: selected descriptor is not code"));
|
251 |
|
|
exception(BX_GP_EXCEPTION, dest_selector & 0xfffc);
|
252 |
|
|
}
|
253 |
|
|
|
254 |
|
|
// code segment must be present else #NP(selector)
|
255 |
|
|
if (! IS_PRESENT(cs_descriptor)) {
|
256 |
|
|
BX_ERROR(("call_protected: code segment not present !"));
|
257 |
|
|
exception(BX_NP_EXCEPTION, dest_selector & 0xfffc);
|
258 |
|
|
}
|
259 |
|
|
|
260 |
|
|
// CALL GATE TO MORE PRIVILEGE
|
261 |
|
|
// if non-conforming code segment and DPL < CPL then
|
262 |
|
|
if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && (cs_descriptor.dpl < CPL))
|
263 |
|
|
{
|
264 |
|
|
Bit16u SS_for_cpl_x;
|
265 |
|
|
Bit32u ESP_for_cpl_x;
|
266 |
|
|
bx_selector_t ss_selector;
|
267 |
|
|
bx_descriptor_t ss_descriptor;
|
268 |
|
|
Bit16u return_SS, return_CS;
|
269 |
|
|
Bit32u return_ESP, return_EIP;
|
270 |
|
|
|
271 |
|
|
BX_DEBUG(("CALL GATE TO MORE PRIVILEGE LEVEL"));
|
272 |
|
|
|
273 |
|
|
// get new SS selector for new privilege level from TSS
|
274 |
|
|
get_SS_ESP_from_TSS(cs_descriptor.dpl, &SS_for_cpl_x, &ESP_for_cpl_x);
|
275 |
|
|
|
276 |
|
|
// check selector & descriptor for new SS:
|
277 |
|
|
// selector must not be null, else #TS(0)
|
278 |
|
|
if ((SS_for_cpl_x & 0xfffc) == 0) {
|
279 |
|
|
BX_ERROR(("call_protected: new SS null"));
|
280 |
|
|
exception(BX_TS_EXCEPTION, 0);
|
281 |
|
|
}
|
282 |
|
|
|
283 |
|
|
// selector index must be within its descriptor table limits,
|
284 |
|
|
// else #TS(SS selector)
|
285 |
|
|
parse_selector(SS_for_cpl_x, &ss_selector);
|
286 |
|
|
fetch_raw_descriptor(&ss_selector, &dword1, &dword2, BX_TS_EXCEPTION);
|
287 |
|
|
parse_descriptor(dword1, dword2, &ss_descriptor);
|
288 |
|
|
|
289 |
|
|
// selector's RPL must equal DPL of code segment,
|
290 |
|
|
// else #TS(SS selector)
|
291 |
|
|
if (ss_selector.rpl != cs_descriptor.dpl) {
|
292 |
|
|
BX_ERROR(("call_protected: SS selector.rpl != CS descr.dpl"));
|
293 |
|
|
exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
|
294 |
|
|
}
|
295 |
|
|
|
296 |
|
|
// stack segment DPL must equal DPL of code segment,
|
297 |
|
|
// else #TS(SS selector)
|
298 |
|
|
if (ss_descriptor.dpl != cs_descriptor.dpl) {
|
299 |
|
|
BX_ERROR(("call_protected: SS descr.rpl != CS descr.dpl"));
|
300 |
|
|
exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
|
301 |
|
|
}
|
302 |
|
|
|
303 |
|
|
// descriptor must indicate writable data segment,
|
304 |
|
|
// else #TS(SS selector)
|
305 |
|
|
if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
|
306 |
|
|
IS_CODE_SEGMENT(ss_descriptor.type) || !IS_DATA_SEGMENT_WRITEABLE(ss_descriptor.type))
|
307 |
|
|
{
|
308 |
|
|
BX_ERROR(("call_protected: ss descriptor is not writable data seg"));
|
309 |
|
|
exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc);
|
310 |
|
|
}
|
311 |
|
|
|
312 |
|
|
// segment must be present, else #SS(SS selector)
|
313 |
|
|
if (! IS_PRESENT(ss_descriptor)) {
|
314 |
|
|
BX_ERROR(("call_protected: ss descriptor not present"));
|
315 |
|
|
exception(BX_SS_EXCEPTION, SS_for_cpl_x & 0xfffc);
|
316 |
|
|
}
|
317 |
|
|
|
318 |
|
|
// get word count from call gate, mask to 5 bits
|
319 |
|
|
unsigned param_count = gate_descriptor->u.gate.param_count & 0x1f;
|
320 |
|
|
|
321 |
|
|
// save return SS:eSP to be pushed on new stack
|
322 |
|
|
return_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
|
323 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
324 |
|
|
return_ESP = ESP;
|
325 |
|
|
else
|
326 |
|
|
return_ESP = SP;
|
327 |
|
|
|
328 |
|
|
// save return CS:eIP to be pushed on new stack
|
329 |
|
|
return_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
330 |
|
|
if (cs_descriptor.u.segment.d_b)
|
331 |
|
|
return_EIP = EIP;
|
332 |
|
|
else
|
333 |
|
|
return_EIP = IP;
|
334 |
|
|
|
335 |
|
|
// Prepare new stack segment
|
336 |
|
|
bx_segment_reg_t new_stack;
|
337 |
|
|
new_stack.selector = ss_selector;
|
338 |
|
|
new_stack.cache = ss_descriptor;
|
339 |
|
|
new_stack.selector.rpl = cs_descriptor.dpl;
|
340 |
|
|
// add cpl to the selector value
|
341 |
|
|
new_stack.selector.value = (0xfffc & new_stack.selector.value) |
|
342 |
|
|
new_stack.selector.rpl;
|
343 |
|
|
|
344 |
|
|
/* load new SS:SP value from TSS */
|
345 |
|
|
if (ss_descriptor.u.segment.d_b) {
|
346 |
|
|
Bit32u temp_ESP = ESP_for_cpl_x;
|
347 |
|
|
|
348 |
|
|
// push pointer of old stack onto new stack
|
349 |
|
|
if (gate_descriptor->type==BX_386_CALL_GATE) {
|
350 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, return_SS);
|
351 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-8, cs_descriptor.dpl, return_ESP);
|
352 |
|
|
temp_ESP -= 8;
|
353 |
|
|
|
354 |
|
|
for (unsigned n=param_count; n>0; n--) {
|
355 |
|
|
temp_ESP -= 4;
|
356 |
|
|
Bit32u param = stack_read_dword(return_ESP + (n-1)*4);
|
357 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP, cs_descriptor.dpl, param);
|
358 |
|
|
}
|
359 |
|
|
// push return address onto new stack
|
360 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, return_CS);
|
361 |
|
|
write_new_stack_dword_32(&new_stack, temp_ESP-8, cs_descriptor.dpl, return_EIP);
|
362 |
|
|
temp_ESP -= 8;
|
363 |
|
|
}
|
364 |
|
|
else {
|
365 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-2, cs_descriptor.dpl, return_SS);
|
366 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) return_ESP);
|
367 |
|
|
temp_ESP -= 4;
|
368 |
|
|
|
369 |
|
|
for (unsigned n=param_count; n>0; n--) {
|
370 |
|
|
temp_ESP -= 2;
|
371 |
|
|
Bit16u param = stack_read_word(return_ESP + (n-1)*2);
|
372 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP, cs_descriptor.dpl, param);
|
373 |
|
|
}
|
374 |
|
|
// push return address onto new stack
|
375 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-2, cs_descriptor.dpl, return_CS);
|
376 |
|
|
write_new_stack_word_32(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) return_EIP);
|
377 |
|
|
temp_ESP -= 4;
|
378 |
|
|
}
|
379 |
|
|
|
380 |
|
|
ESP = temp_ESP;
|
381 |
|
|
}
|
382 |
|
|
else {
|
383 |
|
|
Bit16u temp_SP = (Bit16u) ESP_for_cpl_x;
|
384 |
|
|
|
385 |
|
|
// push pointer of old stack onto new stack
|
386 |
|
|
if (gate_descriptor->type==BX_386_CALL_GATE) {
|
387 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, return_SS);
|
388 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl, return_ESP);
|
389 |
|
|
temp_SP -= 8;
|
390 |
|
|
|
391 |
|
|
for (unsigned n=param_count; n>0; n--) {
|
392 |
|
|
temp_SP -= 4;
|
393 |
|
|
Bit32u param = stack_read_dword(return_ESP + (n-1)*4);
|
394 |
|
|
write_new_stack_dword_32(&new_stack, temp_SP, cs_descriptor.dpl, param);
|
395 |
|
|
}
|
396 |
|
|
// push return address onto new stack
|
397 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, return_CS);
|
398 |
|
|
write_new_stack_dword_32(&new_stack, (Bit16u)(temp_SP-8), cs_descriptor.dpl, return_EIP);
|
399 |
|
|
temp_SP -= 8;
|
400 |
|
|
}
|
401 |
|
|
else {
|
402 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-2), cs_descriptor.dpl, return_SS);
|
403 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, (Bit16u) return_ESP);
|
404 |
|
|
temp_SP -= 4;
|
405 |
|
|
|
406 |
|
|
for (unsigned n=param_count; n>0; n--) {
|
407 |
|
|
temp_SP -= 2;
|
408 |
|
|
Bit16u param = stack_read_word(return_ESP + (n-1)*2);
|
409 |
|
|
write_new_stack_word_32(&new_stack, temp_SP, cs_descriptor.dpl, param);
|
410 |
|
|
}
|
411 |
|
|
// push return address onto new stack
|
412 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-2), cs_descriptor.dpl, return_CS);
|
413 |
|
|
write_new_stack_word_32(&new_stack, (Bit16u)(temp_SP-4), cs_descriptor.dpl, (Bit16u) return_EIP);
|
414 |
|
|
temp_SP -= 4;
|
415 |
|
|
}
|
416 |
|
|
|
417 |
|
|
SP = temp_SP;
|
418 |
|
|
}
|
419 |
|
|
|
420 |
|
|
// new eIP must be in code segment limit else #GP(0)
|
421 |
|
|
if (new_EIP > cs_descriptor.u.segment.limit_scaled) {
|
422 |
|
|
BX_ERROR(("call_protected: EIP not within CS limits"));
|
423 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
424 |
|
|
}
|
425 |
|
|
|
426 |
|
|
/* load SS descriptor */
|
427 |
|
|
load_ss(&ss_selector, &ss_descriptor, cs_descriptor.dpl);
|
428 |
|
|
|
429 |
|
|
/* load new CS:IP value from gate */
|
430 |
|
|
/* load CS descriptor */
|
431 |
|
|
/* set CPL to stack segment DPL */
|
432 |
|
|
/* set RPL of CS to CPL */
|
433 |
|
|
load_cs(&cs_selector, &cs_descriptor, cs_descriptor.dpl);
|
434 |
|
|
EIP = new_EIP;
|
435 |
|
|
}
|
436 |
|
|
else // CALL GATE TO SAME PRIVILEGE
|
437 |
|
|
{
|
438 |
|
|
BX_DEBUG(("CALL GATE TO SAME PRIVILEGE"));
|
439 |
|
|
|
440 |
|
|
if (gate_descriptor->type == BX_386_CALL_GATE) {
|
441 |
|
|
// call gate 32bit, push return address onto stack
|
442 |
|
|
push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
443 |
|
|
push_32(EIP);
|
444 |
|
|
}
|
445 |
|
|
else {
|
446 |
|
|
// call gate 16bit, push return address onto stack
|
447 |
|
|
push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
448 |
|
|
push_16(IP);
|
449 |
|
|
}
|
450 |
|
|
|
451 |
|
|
// load CS:EIP from gate
|
452 |
|
|
// load code segment descriptor into CS register
|
453 |
|
|
// set RPL of CS to CPL
|
454 |
|
|
branch_far32(&cs_selector, &cs_descriptor, new_EIP, CPL);
|
455 |
|
|
}
|
456 |
|
|
}
|
457 |
|
|
|
458 |
|
|
#if BX_SUPPORT_X86_64
|
459 |
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::call_gate64(bx_selector_t *gate_selector)
|
460 |
|
|
{
|
461 |
|
|
bx_selector_t cs_selector;
|
462 |
|
|
Bit32u dword1, dword2, dword3;
|
463 |
|
|
bx_descriptor_t cs_descriptor;
|
464 |
|
|
bx_descriptor_t gate_descriptor;
|
465 |
|
|
|
466 |
|
|
// examine code segment selector in call gate descriptor
|
467 |
|
|
BX_DEBUG(("call_gate64: CALL 64bit call gate"));
|
468 |
|
|
|
469 |
|
|
fetch_raw_descriptor_64(gate_selector, &dword1, &dword2, &dword3, BX_GP_EXCEPTION);
|
470 |
|
|
parse_descriptor(dword1, dword2, &gate_descriptor);
|
471 |
|
|
|
472 |
|
|
Bit16u dest_selector = gate_descriptor.u.gate.dest_selector;
|
473 |
|
|
// selector must not be null else #GP(0)
|
474 |
|
|
if ((dest_selector & 0xfffc) == 0) {
|
475 |
|
|
BX_ERROR(("call_gate64: selector in gate null"));
|
476 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
477 |
|
|
}
|
478 |
|
|
|
479 |
|
|
parse_selector(dest_selector, &cs_selector);
|
480 |
|
|
// selector must be within its descriptor table limits,
|
481 |
|
|
// else #GP(code segment selector)
|
482 |
|
|
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
|
483 |
|
|
parse_descriptor(dword1, dword2, &cs_descriptor);
|
484 |
|
|
|
485 |
|
|
// find the RIP in the gate_descriptor
|
486 |
|
|
Bit64u new_RIP = gate_descriptor.u.gate.dest_offset;
|
487 |
|
|
new_RIP |= ((Bit64u)dword3 << 32);
|
488 |
|
|
|
489 |
|
|
// AR byte of selected descriptor must indicate code segment,
|
490 |
|
|
// else #GP(code segment selector)
|
491 |
|
|
// DPL of selected descriptor must be <= CPL,
|
492 |
|
|
// else #GP(code segment selector)
|
493 |
|
|
if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
|
494 |
|
|
IS_DATA_SEGMENT(cs_descriptor.type) ||
|
495 |
|
|
cs_descriptor.dpl > CPL)
|
496 |
|
|
{
|
497 |
|
|
BX_ERROR(("call_gate64: selected descriptor is not code"));
|
498 |
|
|
exception(BX_GP_EXCEPTION, dest_selector & 0xfffc);
|
499 |
|
|
}
|
500 |
|
|
|
501 |
|
|
// In long mode, only 64-bit call gates are allowed, and they must point
|
502 |
|
|
// to 64-bit code segments, else #GP(selector)
|
503 |
|
|
if (! IS_LONG64_SEGMENT(cs_descriptor) || cs_descriptor.u.segment.d_b)
|
504 |
|
|
{
|
505 |
|
|
BX_ERROR(("call_gate64: not 64-bit code segment in call gate 64"));
|
506 |
|
|
exception(BX_GP_EXCEPTION, dest_selector & 0xfffc);
|
507 |
|
|
}
|
508 |
|
|
|
509 |
|
|
// code segment must be present else #NP(selector)
|
510 |
|
|
if (! IS_PRESENT(cs_descriptor)) {
|
511 |
|
|
BX_ERROR(("call_gate64: code segment not present !"));
|
512 |
|
|
exception(BX_NP_EXCEPTION, dest_selector & 0xfffc);
|
513 |
|
|
}
|
514 |
|
|
|
515 |
|
|
Bit64u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
516 |
|
|
Bit64u old_RIP = RIP;
|
517 |
|
|
|
518 |
|
|
// CALL GATE TO MORE PRIVILEGE
|
519 |
|
|
// if non-conforming code segment and DPL < CPL then
|
520 |
|
|
if (IS_CODE_SEGMENT_NON_CONFORMING(cs_descriptor.type) && (cs_descriptor.dpl < CPL))
|
521 |
|
|
{
|
522 |
|
|
BX_DEBUG(("CALL GATE TO MORE PRIVILEGE LEVEL"));
|
523 |
|
|
|
524 |
|
|
// get new RSP for new privilege level from TSS
|
525 |
|
|
Bit64u RSP_for_cpl_x = get_RSP_from_TSS(cs_descriptor.dpl);
|
526 |
|
|
Bit64u old_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
|
527 |
|
|
Bit64u old_RSP = RSP;
|
528 |
|
|
|
529 |
|
|
// push old stack long pointer onto new stack
|
530 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 8, cs_descriptor.dpl, old_SS);
|
531 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 16, cs_descriptor.dpl, old_RSP);
|
532 |
|
|
// push long pointer to return address onto new stack
|
533 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 24, cs_descriptor.dpl, old_CS);
|
534 |
|
|
write_new_stack_qword_64(RSP_for_cpl_x - 32, cs_descriptor.dpl, old_RIP);
|
535 |
|
|
RSP_for_cpl_x -= 32;
|
536 |
|
|
|
537 |
|
|
// load CS:RIP (guaranteed to be in 64 bit mode)
|
538 |
|
|
branch_far64(&cs_selector, &cs_descriptor, new_RIP, cs_descriptor.dpl);
|
539 |
|
|
|
540 |
|
|
// set up null SS descriptor
|
541 |
|
|
load_null_selector(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS], cs_descriptor.dpl);
|
542 |
|
|
|
543 |
|
|
RSP = RSP_for_cpl_x;
|
544 |
|
|
}
|
545 |
|
|
else
|
546 |
|
|
{
|
547 |
|
|
BX_DEBUG(("CALL GATE TO SAME PRIVILEGE"));
|
548 |
|
|
|
549 |
|
|
// push to 64-bit stack, switch to long64 guaranteed
|
550 |
|
|
write_new_stack_qword_64(RSP - 8, CPL, old_CS);
|
551 |
|
|
write_new_stack_qword_64(RSP - 16, CPL, old_RIP);
|
552 |
|
|
|
553 |
|
|
// load CS:RIP (guaranteed to be in 64 bit mode)
|
554 |
|
|
branch_far64(&cs_selector, &cs_descriptor, new_RIP, CPL);
|
555 |
|
|
|
556 |
|
|
RSP -= 16;
|
557 |
|
|
}
|
558 |
|
|
}
|
559 |
|
|
#endif
|