1 |
2 |
alfik |
/////////////////////////////////////////////////////////////////////////
|
2 |
|
|
// $Id: vm8086.cc 11107 2012-03-25 19:07:17Z sshwarts $
|
3 |
|
|
/////////////////////////////////////////////////////////////////////////
|
4 |
|
|
//
|
5 |
|
|
// Copyright (C) 2001-2012 The Bochs Project
|
6 |
|
|
//
|
7 |
|
|
// This library is free software; you can redistribute it and/or
|
8 |
|
|
// modify it under the terms of the GNU Lesser General Public
|
9 |
|
|
// License as published by the Free Software Foundation; either
|
10 |
|
|
// version 2 of the License, or (at your option) any later version.
|
11 |
|
|
//
|
12 |
|
|
// This library is distributed in the hope that it will be useful,
|
13 |
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
|
|
// Lesser General Public License for more details.
|
16 |
|
|
//
|
17 |
|
|
// You should have received a copy of the GNU Lesser General Public
|
18 |
|
|
// License along with this library; if not, write to the Free Software
|
19 |
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
20 |
|
|
/////////////////////////////////////////////////////////////////////////
|
21 |
|
|
|
22 |
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
23 |
|
|
#include "bochs.h"
|
24 |
|
|
#include "cpu.h"
|
25 |
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
26 |
|
|
|
27 |
|
|
//
|
28 |
|
|
// Notes:
|
29 |
|
|
//
|
30 |
|
|
// The high bits of the 32bit eip image are ignored by
|
31 |
|
|
// the IRET to VM. The high bits of the 32bit esp image
|
32 |
|
|
// are loaded into ESP. A subsequent push uses
|
33 |
|
|
// only the low 16bits since it's in VM. In neither case
|
34 |
|
|
// did a protection fault occur during actual tests. This
|
35 |
|
|
// is contrary to the Intel docs which claim a #GP for
|
36 |
|
|
// eIP out of code limits.
|
37 |
|
|
//
|
38 |
|
|
// IRET to VM does affect IOPL, IF, VM, and RF
|
39 |
|
|
//
|
40 |
|
|
|
41 |
|
|
#if BX_CPU_LEVEL >= 3
|
42 |
|
|
|
43 |
|
|
void BX_CPU_C::stack_return_to_v86(Bit32u new_eip, Bit32u raw_cs_selector, Bit32u flags32)
|
44 |
|
|
{
|
45 |
|
|
Bit32u temp_ESP, new_esp;
|
46 |
|
|
Bit16u raw_es_selector, raw_ds_selector, raw_fs_selector,
|
47 |
|
|
raw_gs_selector, raw_ss_selector;
|
48 |
|
|
|
49 |
|
|
// Must be 32bit effective opsize, VM is set in upper 16bits of eFLAGS
|
50 |
|
|
// and CPL = 0 to get here
|
51 |
|
|
|
52 |
|
|
// ----------------
|
53 |
|
|
// | | OLD GS | eSP+32
|
54 |
|
|
// | | OLD FS | eSP+28
|
55 |
|
|
// | | OLD DS | eSP+24
|
56 |
|
|
// | | OLD ES | eSP+20
|
57 |
|
|
// | | OLD SS | eSP+16
|
58 |
|
|
// | OLD ESP | eSP+12
|
59 |
|
|
// | OLD EFLAGS | eSP+8
|
60 |
|
|
// | | OLD CS | eSP+4
|
61 |
|
|
// | OLD EIP | eSP+0
|
62 |
|
|
// ----------------
|
63 |
|
|
|
64 |
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
65 |
|
|
temp_ESP = ESP;
|
66 |
|
|
else
|
67 |
|
|
temp_ESP = SP;
|
68 |
|
|
|
69 |
|
|
// load SS:ESP from stack
|
70 |
|
|
new_esp = stack_read_dword(temp_ESP+12);
|
71 |
|
|
raw_ss_selector = (Bit16u) stack_read_dword(temp_ESP+16);
|
72 |
|
|
|
73 |
|
|
// load ES,DS,FS,GS from stack
|
74 |
|
|
raw_es_selector = (Bit16u) stack_read_dword(temp_ESP+20);
|
75 |
|
|
raw_ds_selector = (Bit16u) stack_read_dword(temp_ESP+24);
|
76 |
|
|
raw_fs_selector = (Bit16u) stack_read_dword(temp_ESP+28);
|
77 |
|
|
raw_gs_selector = (Bit16u) stack_read_dword(temp_ESP+32);
|
78 |
|
|
|
79 |
|
|
writeEFlags(flags32, EFlagsValidMask);
|
80 |
|
|
|
81 |
|
|
// load CS:IP from stack; already read and passed as args
|
82 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value = raw_cs_selector;
|
83 |
|
|
EIP = new_eip & 0xffff;
|
84 |
|
|
|
85 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value = raw_es_selector;
|
86 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value = raw_ds_selector;
|
87 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value = raw_fs_selector;
|
88 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value = raw_gs_selector;
|
89 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value = raw_ss_selector;
|
90 |
|
|
ESP = new_esp; // full 32 bit are loaded
|
91 |
|
|
|
92 |
|
|
init_v8086_mode();
|
93 |
|
|
}
|
94 |
|
|
|
95 |
|
|
#if BX_CPU_LEVEL >= 5
|
96 |
|
|
#define BX_CR4_VME_ENABLED (BX_CPU_THIS_PTR cr4.get_VME())
|
97 |
|
|
#else
|
98 |
|
|
#define BX_CR4_VME_ENABLED (0)
|
99 |
|
|
#endif
|
100 |
|
|
|
101 |
|
|
void BX_CPU_C::iret16_stack_return_from_v86(bxInstruction_c *i)
|
102 |
|
|
{
|
103 |
|
|
if ((BX_CPU_THIS_PTR get_IOPL() < 3) && (BX_CR4_VME_ENABLED == 0)) {
|
104 |
|
|
// trap to virtual 8086 monitor
|
105 |
|
|
BX_DEBUG(("IRET in vm86 with IOPL != 3, VME = 0"));
|
106 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
107 |
|
|
}
|
108 |
|
|
|
109 |
|
|
Bit16u ip, cs_raw, flags16;
|
110 |
|
|
|
111 |
|
|
ip = pop_16();
|
112 |
|
|
cs_raw = pop_16();
|
113 |
|
|
flags16 = pop_16();
|
114 |
|
|
|
115 |
|
|
#if BX_CPU_LEVEL >= 5
|
116 |
|
|
if (BX_CPU_THIS_PTR cr4.get_VME() && BX_CPU_THIS_PTR get_IOPL() < 3)
|
117 |
|
|
{
|
118 |
|
|
if (((flags16 & EFlagsIFMask) && BX_CPU_THIS_PTR get_VIP()) ||
|
119 |
|
|
(flags16 & EFlagsTFMask))
|
120 |
|
|
{
|
121 |
|
|
BX_DEBUG(("iret16_stack_return_from_v86(): #GP(0) in VME mode"));
|
122 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
123 |
|
|
}
|
124 |
|
|
|
125 |
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
|
126 |
|
|
EIP = (Bit32u) ip;
|
127 |
|
|
|
128 |
|
|
// IF, IOPL unchanged, EFLAGS.VIF = TMP_FLAGS.IF
|
129 |
|
|
Bit32u changeMask = EFlagsOSZAPCMask | EFlagsTFMask |
|
130 |
|
|
EFlagsDFMask | EFlagsNTMask | EFlagsVIFMask;
|
131 |
|
|
Bit32u flags32 = (Bit32u) flags16;
|
132 |
|
|
if (flags16 & EFlagsIFMask) flags32 |= EFlagsVIFMask;
|
133 |
|
|
writeEFlags(flags32, changeMask);
|
134 |
|
|
|
135 |
|
|
return;
|
136 |
|
|
}
|
137 |
|
|
#endif
|
138 |
|
|
|
139 |
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
|
140 |
|
|
EIP = (Bit32u) ip;
|
141 |
|
|
write_flags(flags16, /*IOPL*/ 0, /*IF*/ 1);
|
142 |
|
|
}
|
143 |
|
|
|
144 |
|
|
void BX_CPU_C::iret32_stack_return_from_v86(bxInstruction_c *i)
|
145 |
|
|
{
|
146 |
|
|
if (BX_CPU_THIS_PTR get_IOPL() < 3) {
|
147 |
|
|
// trap to virtual 8086 monitor
|
148 |
|
|
BX_DEBUG(("IRET in vm86 with IOPL != 3, VME = 0"));
|
149 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
150 |
|
|
}
|
151 |
|
|
|
152 |
|
|
Bit32u eip, cs_raw, flags32;
|
153 |
|
|
// Build a mask of the following bits:
|
154 |
|
|
// ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
|
155 |
|
|
Bit32u change_mask = EFlagsOSZAPCMask | EFlagsTFMask | EFlagsIFMask
|
156 |
|
|
| EFlagsDFMask | EFlagsNTMask | EFlagsRFMask;
|
157 |
|
|
|
158 |
|
|
#if BX_CPU_LEVEL >= 4
|
159 |
|
|
change_mask |= (EFlagsIDMask | EFlagsACMask); // ID/AC
|
160 |
|
|
#endif
|
161 |
|
|
|
162 |
|
|
eip = pop_32();
|
163 |
|
|
cs_raw = pop_32();
|
164 |
|
|
flags32 = pop_32();
|
165 |
|
|
|
166 |
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], (Bit16u) cs_raw);
|
167 |
|
|
EIP = eip;
|
168 |
|
|
// VIF, VIP, VM, IOPL unchanged
|
169 |
|
|
writeEFlags(flags32, change_mask);
|
170 |
|
|
}
|
171 |
|
|
|
172 |
|
|
int BX_CPU_C::v86_redirect_interrupt(Bit8u vector)
|
173 |
|
|
{
|
174 |
|
|
#if BX_CPU_LEVEL >= 5
|
175 |
|
|
if (BX_CPU_THIS_PTR cr4.get_VME())
|
176 |
|
|
{
|
177 |
|
|
bx_address tr_base = BX_CPU_THIS_PTR tr.cache.u.segment.base;
|
178 |
|
|
if (BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled < 103) {
|
179 |
|
|
BX_ERROR(("v86_redirect_interrupt(): TR.limit < 103 in VME"));
|
180 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
181 |
|
|
}
|
182 |
|
|
|
183 |
|
|
Bit32u io_base = system_read_word(tr_base + 102), offset = io_base - 32 + (vector >> 3);
|
184 |
|
|
if (offset > BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled) {
|
185 |
|
|
BX_ERROR(("v86_redirect_interrupt(): failed to fetch VME redirection bitmap"));
|
186 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
187 |
|
|
}
|
188 |
|
|
|
189 |
|
|
Bit8u vme_redirection_bitmap = system_read_byte(tr_base + offset);
|
190 |
|
|
if (!(vme_redirection_bitmap & (1 << (vector & 7))))
|
191 |
|
|
{
|
192 |
|
|
// redirect interrupt through virtual-mode idt
|
193 |
|
|
Bit16u temp_flags = (Bit16u) read_eflags();
|
194 |
|
|
|
195 |
|
|
Bit16u temp_CS = system_read_word(vector*4 + 2);
|
196 |
|
|
Bit16u temp_IP = system_read_word(vector*4);
|
197 |
|
|
|
198 |
|
|
if (BX_CPU_THIS_PTR get_IOPL() < 3) {
|
199 |
|
|
temp_flags |= EFlagsIOPLMask;
|
200 |
|
|
if (BX_CPU_THIS_PTR get_VIF())
|
201 |
|
|
temp_flags |= EFlagsIFMask;
|
202 |
|
|
else
|
203 |
|
|
temp_flags &= ~EFlagsIFMask;
|
204 |
|
|
}
|
205 |
|
|
|
206 |
|
|
Bit16u old_IP = IP;
|
207 |
|
|
Bit16u old_CS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
208 |
|
|
|
209 |
|
|
push_16(temp_flags);
|
210 |
|
|
// push return address onto new stack
|
211 |
|
|
push_16(old_CS);
|
212 |
|
|
push_16(old_IP);
|
213 |
|
|
|
214 |
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], (Bit16u) temp_CS);
|
215 |
|
|
EIP = temp_IP;
|
216 |
|
|
|
217 |
|
|
BX_CPU_THIS_PTR clear_TF();
|
218 |
|
|
BX_CPU_THIS_PTR clear_RF();
|
219 |
|
|
if (BX_CPU_THIS_PTR get_IOPL() == 3)
|
220 |
|
|
BX_CPU_THIS_PTR clear_IF();
|
221 |
|
|
else
|
222 |
|
|
BX_CPU_THIS_PTR clear_VIF();
|
223 |
|
|
|
224 |
|
|
return 1;
|
225 |
|
|
}
|
226 |
|
|
}
|
227 |
|
|
#endif
|
228 |
|
|
// interrupt is not redirected or VME is OFF
|
229 |
|
|
if (BX_CPU_THIS_PTR get_IOPL() < 3)
|
230 |
|
|
{
|
231 |
|
|
BX_DEBUG(("v86_redirect_interrupt(): interrupt cannot be redirected, generate #GP(0)"));
|
232 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
233 |
|
|
}
|
234 |
|
|
|
235 |
|
|
return 0;
|
236 |
|
|
}
|
237 |
|
|
|
238 |
|
|
void BX_CPU_C::init_v8086_mode(void)
|
239 |
|
|
{
|
240 |
|
|
for(unsigned sreg = 0; sreg < 6; sreg++) {
|
241 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK;
|
242 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.p = 1;
|
243 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.dpl = 3;
|
244 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.segment = 1;
|
245 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.type = BX_DATA_READ_WRITE_ACCESSED;
|
246 |
|
|
|
247 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.base =
|
248 |
|
|
BX_CPU_THIS_PTR sregs[sreg].selector.value << 4;
|
249 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.limit_scaled = 0xffff;
|
250 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.g = 0;
|
251 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.d_b = 0;
|
252 |
|
|
BX_CPU_THIS_PTR sregs[sreg].cache.u.segment.avl = 0;
|
253 |
|
|
BX_CPU_THIS_PTR sregs[sreg].selector.rpl = 3;
|
254 |
|
|
}
|
255 |
|
|
|
256 |
|
|
handleCpuModeChange();
|
257 |
|
|
|
258 |
|
|
#if BX_CPU_LEVEL >= 4
|
259 |
|
|
handleAlignmentCheck(/* CPL change */);
|
260 |
|
|
#endif
|
261 |
|
|
|
262 |
|
|
invalidate_stack_cache();
|
263 |
|
|
}
|
264 |
|
|
|
265 |
|
|
#endif /* BX_CPU_LEVEL >= 3 */
|