1 |
2 |
alfik |
/////////////////////////////////////////////////////////////////////////
|
2 |
|
|
// $Id: cpu.cc 11598 2013-01-27 19:27:30Z sshwarts $
|
3 |
|
|
/////////////////////////////////////////////////////////////////////////
|
4 |
|
|
//
|
5 |
|
|
// Copyright (C) 2001-2012 The Bochs Project
|
6 |
|
|
//
|
7 |
|
|
// This library is free software; you can redistribute it and/or
|
8 |
|
|
// modify it under the terms of the GNU Lesser General Public
|
9 |
|
|
// License as published by the Free Software Foundation; either
|
10 |
|
|
// version 2 of the License, or (at your option) any later version.
|
11 |
|
|
//
|
12 |
|
|
// This library is distributed in the hope that it will be useful,
|
13 |
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
15 |
|
|
// Lesser General Public License for more details.
|
16 |
|
|
//
|
17 |
|
|
// You should have received a copy of the GNU Lesser General Public
|
18 |
|
|
// License along with this library; if not, write to the Free Software
|
19 |
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
20 |
|
|
/////////////////////////////////////////////////////////////////////////
|
21 |
|
|
|
22 |
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
23 |
|
|
#include "bochs.h"
|
24 |
|
|
#include "cpu.h"
|
25 |
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
26 |
|
|
|
27 |
|
|
#define InstrumentICACHE 0
|
28 |
|
|
|
29 |
|
|
#if InstrumentICACHE
|
30 |
|
|
static unsigned iCacheLookups=0;
|
31 |
|
|
static unsigned iCacheMisses=0;
|
32 |
|
|
|
33 |
|
|
#define InstrICache_StatsMask 0xffffff
|
34 |
|
|
|
35 |
|
|
#define InstrICache_Stats() {\
|
36 |
|
|
if ((iCacheLookups & InstrICache_StatsMask) == 0) { \
|
37 |
|
|
BX_INFO(("ICACHE lookups: %u, misses: %u, hit rate = %6.2f%% ", \
|
38 |
|
|
iCacheLookups, \
|
39 |
|
|
iCacheMisses, \
|
40 |
|
|
(iCacheLookups-iCacheMisses) * 100.0f / iCacheLookups)); \
|
41 |
|
|
iCacheLookups = iCacheMisses = 0; \
|
42 |
|
|
} \
|
43 |
|
|
}
|
44 |
|
|
#define InstrICache_Increment(v) (v)++
|
45 |
|
|
#else
|
46 |
|
|
#define InstrICache_Stats()
|
47 |
|
|
#define InstrICache_Increment(v)
|
48 |
|
|
#endif
|
49 |
|
|
|
50 |
|
|
void BX_CPU_C::cpu_loop(void)
|
51 |
|
|
{
|
52 |
|
|
#if BX_DEBUGGER
|
53 |
|
|
BX_CPU_THIS_PTR break_point = 0;
|
54 |
|
|
BX_CPU_THIS_PTR magic_break = 0;
|
55 |
|
|
BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
|
56 |
|
|
#endif
|
57 |
|
|
|
58 |
|
|
if (setjmp(BX_CPU_THIS_PTR jmp_buf_env)) {
|
59 |
|
|
// can get here only from exception function or VMEXIT
|
60 |
|
|
BX_CPU_THIS_PTR icount++;
|
61 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(0);
|
62 |
|
|
#if BX_DEBUGGER || BX_GDBSTUB
|
63 |
|
|
if (dbg_instruction_epilog()) return;
|
64 |
|
|
#endif
|
65 |
|
|
#if BX_GDBSTUB
|
66 |
|
|
if (bx_dbg.gdbstub_enabled) return;
|
67 |
|
|
#endif
|
68 |
|
|
}
|
69 |
|
|
|
70 |
|
|
// If the exception() routine has encountered a nasty fault scenario,
|
71 |
|
|
// the debugger may request that control is returned to it so that
|
72 |
|
|
// the situation may be examined.
|
73 |
|
|
#if BX_DEBUGGER
|
74 |
|
|
if (bx_guard.interrupt_requested) return;
|
75 |
|
|
#endif
|
76 |
|
|
|
77 |
|
|
// We get here either by a normal function call, or by a longjmp
|
78 |
|
|
// back from an exception() call. In either case, commit the
|
79 |
|
|
// new EIP/ESP, and set up other environmental fields. This code
|
80 |
|
|
// mirrors similar code below, after the interrupt() call.
|
81 |
|
|
BX_CPU_THIS_PTR prev_rip = RIP; // commit new EIP
|
82 |
|
|
BX_CPU_THIS_PTR speculative_rsp = 0;
|
83 |
|
|
|
84 |
|
|
while (1) {
|
85 |
|
|
|
86 |
|
|
// check on events which occurred for previous instructions (traps)
|
87 |
|
|
// and ones which are asynchronous to the CPU (hardware interrupts)
|
88 |
|
|
if (BX_CPU_THIS_PTR async_event) {
|
89 |
|
|
if (handleAsyncEvent()) {
|
90 |
|
|
// If request to return to caller ASAP.
|
91 |
|
|
return;
|
92 |
|
|
}
|
93 |
|
|
}
|
94 |
|
|
|
95 |
|
|
bxICacheEntry_c *entry = getICacheEntry();
|
96 |
|
|
bxInstruction_c *i = entry->i;
|
97 |
|
|
|
98 |
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
99 |
|
|
for(;;) {
|
100 |
|
|
|
101 |
|
|
// want to allow changing of the instruction inside instrumentation callback
|
102 |
|
|
BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
|
103 |
|
|
RIP += i->ilen();
|
104 |
|
|
// when handlers chaining is enabled this single call will execute entire trace
|
105 |
|
|
BX_CPU_CALL_METHOD(i->execute1, (i)); // might iterate repeat instruction
|
106 |
|
|
|
107 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(0);
|
108 |
|
|
|
109 |
|
|
if (BX_CPU_THIS_PTR async_event) break;
|
110 |
|
|
|
111 |
|
|
i = getICacheEntry()->i;
|
112 |
|
|
}
|
113 |
|
|
#else // BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS == 0
|
114 |
|
|
|
115 |
|
|
bxInstruction_c *last = i + (entry->tlen);
|
116 |
|
|
|
117 |
|
|
for(;;) {
|
118 |
|
|
#if BX_DEBUGGER
|
119 |
|
|
if (BX_CPU_THIS_PTR trace)
|
120 |
|
|
debug_disasm_instruction(BX_CPU_THIS_PTR prev_rip);
|
121 |
|
|
#endif
|
122 |
|
|
|
123 |
|
|
// want to allow changing of the instruction inside instrumentation callback
|
124 |
|
|
BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
|
125 |
|
|
RIP += i->ilen();
|
126 |
|
|
BX_CPU_CALL_METHOD(i->execute1, (i)); // might iterate repeat instruction
|
127 |
|
|
BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
|
128 |
|
|
BX_INSTR_AFTER_EXECUTION(BX_CPU_ID, i);
|
129 |
|
|
BX_CPU_THIS_PTR icount++;
|
130 |
|
|
|
131 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(0);
|
132 |
|
|
|
133 |
|
|
// note instructions generating exceptions never reach this point
|
134 |
|
|
#if BX_DEBUGGER || BX_GDBSTUB
|
135 |
|
|
if (dbg_instruction_epilog()) return;
|
136 |
|
|
#endif
|
137 |
|
|
|
138 |
|
|
if (BX_CPU_THIS_PTR async_event) break;
|
139 |
|
|
|
140 |
|
|
if (++i == last) {
|
141 |
|
|
entry = getICacheEntry();
|
142 |
|
|
i = entry->i;
|
143 |
|
|
last = i + (entry->tlen);
|
144 |
|
|
}
|
145 |
|
|
}
|
146 |
|
|
#endif
|
147 |
|
|
|
148 |
|
|
// clear stop trace magic indication that probably was set by repeat or branch32/64
|
149 |
|
|
BX_CPU_THIS_PTR async_event &= ~BX_ASYNC_EVENT_STOP_TRACE;
|
150 |
|
|
|
151 |
|
|
} // while (1)
|
152 |
|
|
}
|
153 |
|
|
|
154 |
|
|
#if BX_SUPPORT_SMP
|
155 |
|
|
|
156 |
|
|
void BX_CPU_C::cpu_run_trace(void)
|
157 |
|
|
{
|
158 |
|
|
if (setjmp(BX_CPU_THIS_PTR jmp_buf_env)) {
|
159 |
|
|
// can get here only from exception function or VMEXIT
|
160 |
|
|
BX_CPU_THIS_PTR icount++;
|
161 |
|
|
return;
|
162 |
|
|
}
|
163 |
|
|
|
164 |
|
|
// check on events which occurred for previous instructions (traps)
|
165 |
|
|
// and ones which are asynchronous to the CPU (hardware interrupts)
|
166 |
|
|
if (BX_CPU_THIS_PTR async_event) {
|
167 |
|
|
if (handleAsyncEvent()) {
|
168 |
|
|
// If request to return to caller ASAP.
|
169 |
|
|
return;
|
170 |
|
|
}
|
171 |
|
|
}
|
172 |
|
|
|
173 |
|
|
bxICacheEntry_c *entry = getICacheEntry();
|
174 |
|
|
bxInstruction_c *i = entry->i;
|
175 |
|
|
|
176 |
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
177 |
|
|
// want to allow changing of the instruction inside instrumentation callback
|
178 |
|
|
BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
|
179 |
|
|
RIP += i->ilen();
|
180 |
|
|
// when handlers chaining is enabled this single call will execute entire trace
|
181 |
|
|
BX_CPU_CALL_METHOD(i->execute1, (i)); // might iterate repeat instruction
|
182 |
|
|
|
183 |
|
|
if (BX_CPU_THIS_PTR async_event) {
|
184 |
|
|
// clear stop trace magic indication that probably was set by repeat or branch32/64
|
185 |
|
|
BX_CPU_THIS_PTR async_event &= ~BX_ASYNC_EVENT_STOP_TRACE;
|
186 |
|
|
}
|
187 |
|
|
#else
|
188 |
|
|
bxInstruction_c *last = i + (entry->tlen);
|
189 |
|
|
|
190 |
|
|
for(;;) {
|
191 |
|
|
// want to allow changing of the instruction inside instrumentation callback
|
192 |
|
|
BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
|
193 |
|
|
RIP += i->ilen();
|
194 |
|
|
BX_CPU_CALL_METHOD(i->execute1, (i)); // might iterate repeat instruction
|
195 |
|
|
BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
|
196 |
|
|
BX_INSTR_AFTER_EXECUTION(BX_CPU_ID, i);
|
197 |
|
|
BX_CPU_THIS_PTR icount++;
|
198 |
|
|
|
199 |
|
|
if (BX_CPU_THIS_PTR async_event) {
|
200 |
|
|
// clear stop trace magic indication that probably was set by repeat or branch32/64
|
201 |
|
|
BX_CPU_THIS_PTR async_event &= ~BX_ASYNC_EVENT_STOP_TRACE;
|
202 |
|
|
break;
|
203 |
|
|
}
|
204 |
|
|
|
205 |
|
|
if (++i == last) break;
|
206 |
|
|
}
|
207 |
|
|
#endif // BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
208 |
|
|
}
|
209 |
|
|
|
210 |
|
|
#endif
|
211 |
|
|
|
212 |
|
|
bxICacheEntry_c* BX_CPU_C::getICacheEntry(void)
|
213 |
|
|
{
|
214 |
|
|
bx_address eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
215 |
|
|
if (eipBiased >= BX_CPU_THIS_PTR eipPageWindowSize) {
|
216 |
|
|
prefetch();
|
217 |
|
|
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
218 |
|
|
}
|
219 |
|
|
|
220 |
|
|
InstrICache_Increment(iCacheLookups);
|
221 |
|
|
InstrICache_Stats();
|
222 |
|
|
|
223 |
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrFetchPage + eipBiased;
|
224 |
|
|
bxICacheEntry_c *entry = BX_CPU_THIS_PTR iCache.find_entry(pAddr, BX_CPU_THIS_PTR fetchModeMask);
|
225 |
|
|
|
226 |
|
|
if (entry == NULL)
|
227 |
|
|
{
|
228 |
|
|
// iCache miss. No validated instruction with matching fetch parameters
|
229 |
|
|
// is in the iCache.
|
230 |
|
|
InstrICache_Increment(iCacheMisses);
|
231 |
|
|
entry = serveICacheMiss(entry, (Bit32u) eipBiased, pAddr);
|
232 |
|
|
}
|
233 |
|
|
|
234 |
|
|
return entry;
|
235 |
|
|
}
|
236 |
|
|
|
237 |
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
238 |
|
|
|
239 |
|
|
// The function is called after taken branch instructions and tries to link the branch to the next trace
|
240 |
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::linkTrace(bxInstruction_c *i)
|
241 |
|
|
{
|
242 |
|
|
#if BX_SUPPORT_SMP
|
243 |
|
|
if (BX_SMP_PROCESSORS > 1)
|
244 |
|
|
return;
|
245 |
|
|
#endif
|
246 |
|
|
|
247 |
|
|
if (BX_CPU_THIS_PTR async_event) return;
|
248 |
|
|
|
249 |
|
|
Bit32u delta = (Bit32u) (BX_CPU_THIS_PTR icount - BX_CPU_THIS_PTR icount_last_sync);
|
250 |
|
|
if(delta >= bx_pc_system.getNumCpuTicksLeftNextEvent())
|
251 |
|
|
return;
|
252 |
|
|
|
253 |
|
|
bxInstruction_c *next = i->getNextTrace();
|
254 |
|
|
if (next) {
|
255 |
|
|
BX_EXECUTE_INSTRUCTION(next);
|
256 |
|
|
return;
|
257 |
|
|
}
|
258 |
|
|
|
259 |
|
|
bx_address eipBiased = EIP + BX_CPU_THIS_PTR eipPageBias;
|
260 |
|
|
if (eipBiased >= BX_CPU_THIS_PTR eipPageWindowSize) {
|
261 |
|
|
/*
|
262 |
|
|
prefetch();
|
263 |
|
|
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
264 |
|
|
*/
|
265 |
|
|
// You would like to have the prefetch() instead of this return; statement and link also
|
266 |
|
|
// branches that cross page boundary but this potentially could cause functional failure.
|
267 |
|
|
// An OS might modify the page tables and invalidate the TLB but it won't affect Bochs
|
268 |
|
|
// execution because of a trace linked into another old trace with data before the page
|
269 |
|
|
// invalidation. The case would be detected if doing prefetch() properly.
|
270 |
|
|
|
271 |
|
|
return;
|
272 |
|
|
}
|
273 |
|
|
|
274 |
|
|
InstrICache_Increment(iCacheLookups);
|
275 |
|
|
InstrICache_Stats();
|
276 |
|
|
|
277 |
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrFetchPage + eipBiased;
|
278 |
|
|
bxICacheEntry_c *entry = BX_CPU_THIS_PTR iCache.find_entry(pAddr, BX_CPU_THIS_PTR fetchModeMask);
|
279 |
|
|
|
280 |
|
|
if (entry != NULL) // link traces - handle only hit cases
|
281 |
|
|
{
|
282 |
|
|
i->setNextTrace(entry->i);
|
283 |
|
|
i = entry->i;
|
284 |
|
|
BX_EXECUTE_INSTRUCTION(i);
|
285 |
|
|
}
|
286 |
|
|
}
|
287 |
|
|
|
288 |
|
|
#endif
|
289 |
|
|
|
290 |
|
|
#define BX_REPEAT_TIME_UPDATE_INTERVAL (BX_MAX_TRACE_LENGTH-1)
|
291 |
|
|
|
292 |
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat(bxInstruction_c *i, BxRepIterationPtr_tR execute)
|
293 |
|
|
{
|
294 |
|
|
// non repeated instruction
|
295 |
|
|
if (! i->repUsedL()) {
|
296 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
297 |
|
|
return;
|
298 |
|
|
}
|
299 |
|
|
|
300 |
|
|
#if BX_X86_DEBUGGER
|
301 |
|
|
BX_CPU_THIS_PTR in_repeat = 0;
|
302 |
|
|
#endif
|
303 |
|
|
|
304 |
|
|
#if BX_SUPPORT_X86_64
|
305 |
|
|
if (i->as64L()) {
|
306 |
|
|
while(1) {
|
307 |
|
|
if (RCX != 0) {
|
308 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
309 |
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
310 |
|
|
RCX --;
|
311 |
|
|
}
|
312 |
|
|
if (RCX == 0) return;
|
313 |
|
|
|
314 |
|
|
#if BX_DEBUGGER == 0
|
315 |
|
|
if (BX_CPU_THIS_PTR async_event)
|
316 |
|
|
#endif
|
317 |
|
|
break; // exit always if debugger enabled
|
318 |
|
|
|
319 |
|
|
BX_CPU_THIS_PTR icount++;
|
320 |
|
|
|
321 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
322 |
|
|
}
|
323 |
|
|
}
|
324 |
|
|
else
|
325 |
|
|
#endif
|
326 |
|
|
if (i->as32L()) {
|
327 |
|
|
while(1) {
|
328 |
|
|
if (ECX != 0) {
|
329 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
330 |
|
|
//AO BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
331 |
|
|
RCX = ECX - 1;
|
332 |
|
|
//AO new
|
333 |
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
334 |
|
|
//AO end
|
335 |
|
|
}
|
336 |
|
|
if (ECX == 0) return;
|
337 |
|
|
|
338 |
|
|
#if BX_DEBUGGER == 0
|
339 |
|
|
if (BX_CPU_THIS_PTR async_event)
|
340 |
|
|
#endif
|
341 |
|
|
break; // exit always if debugger enabled
|
342 |
|
|
|
343 |
|
|
BX_CPU_THIS_PTR icount++;
|
344 |
|
|
|
345 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
346 |
|
|
}
|
347 |
|
|
}
|
348 |
|
|
else // 16bit addrsize
|
349 |
|
|
{
|
350 |
|
|
while(1) {
|
351 |
|
|
if (CX != 0) {
|
352 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
353 |
|
|
//AO BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
354 |
|
|
CX --;
|
355 |
|
|
//AO new
|
356 |
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
357 |
|
|
//AO end
|
358 |
|
|
}
|
359 |
|
|
if (CX == 0) return;
|
360 |
|
|
|
361 |
|
|
#if BX_DEBUGGER == 0
|
362 |
|
|
if (BX_CPU_THIS_PTR async_event)
|
363 |
|
|
#endif
|
364 |
|
|
break; // exit always if debugger enabled
|
365 |
|
|
|
366 |
|
|
BX_CPU_THIS_PTR icount++;
|
367 |
|
|
|
368 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
369 |
|
|
}
|
370 |
|
|
}
|
371 |
|
|
|
372 |
|
|
#if BX_X86_DEBUGGER
|
373 |
|
|
BX_CPU_THIS_PTR in_repeat = 1;
|
374 |
|
|
#endif
|
375 |
|
|
|
376 |
|
|
RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
|
377 |
|
|
|
378 |
|
|
// assert magic async_event to stop trace execution
|
379 |
|
|
BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
380 |
|
|
}
|
381 |
|
|
|
382 |
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxRepIterationPtr_tR execute)
|
383 |
|
|
{
|
384 |
|
|
unsigned rep = i->repUsedValue();
|
385 |
|
|
|
386 |
|
|
// non repeated instruction
|
387 |
|
|
if (! rep) {
|
388 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
389 |
|
|
return;
|
390 |
|
|
}
|
391 |
|
|
|
392 |
|
|
#if BX_X86_DEBUGGER
|
393 |
|
|
BX_CPU_THIS_PTR in_repeat = 0;
|
394 |
|
|
#endif
|
395 |
|
|
|
396 |
|
|
if (rep == 3) { /* repeat prefix 0xF3 */
|
397 |
|
|
#if BX_SUPPORT_X86_64
|
398 |
|
|
if (i->as64L()) {
|
399 |
|
|
while(1) {
|
400 |
|
|
if (RCX != 0) {
|
401 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
402 |
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
403 |
|
|
RCX --;
|
404 |
|
|
}
|
405 |
|
|
if (! get_ZF() || RCX == 0) return;
|
406 |
|
|
|
407 |
|
|
#if BX_DEBUGGER == 0
|
408 |
|
|
if (BX_CPU_THIS_PTR async_event)
|
409 |
|
|
#endif
|
410 |
|
|
break; // exit always if debugger enabled
|
411 |
|
|
|
412 |
|
|
BX_CPU_THIS_PTR icount++;
|
413 |
|
|
|
414 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
415 |
|
|
}
|
416 |
|
|
}
|
417 |
|
|
else
|
418 |
|
|
#endif
|
419 |
|
|
if (i->as32L()) {
|
420 |
|
|
while(1) {
|
421 |
|
|
if (ECX != 0) {
|
422 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
423 |
|
|
//AO BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
424 |
|
|
RCX = ECX - 1;
|
425 |
|
|
//AO new
|
426 |
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
427 |
|
|
//AO end
|
428 |
|
|
}
|
429 |
|
|
if (! get_ZF() || ECX == 0) return;
|
430 |
|
|
|
431 |
|
|
#if BX_DEBUGGER == 0
|
432 |
|
|
if (BX_CPU_THIS_PTR async_event)
|
433 |
|
|
#endif
|
434 |
|
|
break; // exit always if debugger enabled
|
435 |
|
|
|
436 |
|
|
BX_CPU_THIS_PTR icount++;
|
437 |
|
|
|
438 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
439 |
|
|
}
|
440 |
|
|
}
|
441 |
|
|
else // 16bit addrsize
|
442 |
|
|
{
|
443 |
|
|
while(1) {
|
444 |
|
|
if (CX != 0) {
|
445 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
446 |
|
|
//AO BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
447 |
|
|
CX --;
|
448 |
|
|
//AO new
|
449 |
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
450 |
|
|
//AO end
|
451 |
|
|
}
|
452 |
|
|
if (! get_ZF() || CX == 0) return;
|
453 |
|
|
|
454 |
|
|
#if BX_DEBUGGER == 0
|
455 |
|
|
if (BX_CPU_THIS_PTR async_event)
|
456 |
|
|
#endif
|
457 |
|
|
break; // exit always if debugger enabled
|
458 |
|
|
|
459 |
|
|
BX_CPU_THIS_PTR icount++;
|
460 |
|
|
|
461 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
462 |
|
|
}
|
463 |
|
|
}
|
464 |
|
|
}
|
465 |
|
|
else { /* repeat prefix 0xF2 */
|
466 |
|
|
#if BX_SUPPORT_X86_64
|
467 |
|
|
if (i->as64L()) {
|
468 |
|
|
while(1) {
|
469 |
|
|
if (RCX != 0) {
|
470 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
471 |
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
472 |
|
|
RCX --;
|
473 |
|
|
}
|
474 |
|
|
if (get_ZF() || RCX == 0) return;
|
475 |
|
|
|
476 |
|
|
#if BX_DEBUGGER == 0
|
477 |
|
|
if (BX_CPU_THIS_PTR async_event)
|
478 |
|
|
#endif
|
479 |
|
|
break; // exit always if debugger enabled
|
480 |
|
|
|
481 |
|
|
BX_CPU_THIS_PTR icount++;
|
482 |
|
|
|
483 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
484 |
|
|
}
|
485 |
|
|
}
|
486 |
|
|
else
|
487 |
|
|
#endif
|
488 |
|
|
if (i->as32L()) {
|
489 |
|
|
while(1) {
|
490 |
|
|
if (ECX != 0) {
|
491 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
492 |
|
|
//AO BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
493 |
|
|
RCX = ECX - 1;
|
494 |
|
|
//AO new
|
495 |
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
496 |
|
|
//AO end
|
497 |
|
|
}
|
498 |
|
|
if (get_ZF() || ECX == 0) return;
|
499 |
|
|
|
500 |
|
|
#if BX_DEBUGGER == 0
|
501 |
|
|
if (BX_CPU_THIS_PTR async_event)
|
502 |
|
|
#endif
|
503 |
|
|
break; // exit always if debugger enabled
|
504 |
|
|
|
505 |
|
|
BX_CPU_THIS_PTR icount++;
|
506 |
|
|
|
507 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
508 |
|
|
}
|
509 |
|
|
}
|
510 |
|
|
else // 16bit addrsize
|
511 |
|
|
{
|
512 |
|
|
while(1) {
|
513 |
|
|
if (CX != 0) {
|
514 |
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
515 |
|
|
//AO BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
516 |
|
|
CX --;
|
517 |
|
|
//AO new
|
518 |
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
519 |
|
|
//AO end
|
520 |
|
|
}
|
521 |
|
|
if (get_ZF() || CX == 0) return;
|
522 |
|
|
|
523 |
|
|
#if BX_DEBUGGER == 0
|
524 |
|
|
if (BX_CPU_THIS_PTR async_event)
|
525 |
|
|
#endif
|
526 |
|
|
break; // exit always if debugger enabled
|
527 |
|
|
|
528 |
|
|
BX_CPU_THIS_PTR icount++;
|
529 |
|
|
|
530 |
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
531 |
|
|
}
|
532 |
|
|
}
|
533 |
|
|
}
|
534 |
|
|
|
535 |
|
|
#if BX_X86_DEBUGGER
|
536 |
|
|
BX_CPU_THIS_PTR in_repeat = 1;
|
537 |
|
|
#endif
|
538 |
|
|
|
539 |
|
|
RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
|
540 |
|
|
|
541 |
|
|
// assert magic async_event to stop trace execution
|
542 |
|
|
BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
543 |
|
|
}
|
544 |
|
|
|
545 |
|
|
// boundaries of consideration:
|
546 |
|
|
//
|
547 |
|
|
// * physical memory boundary: 1024k (1Megabyte) (increments of...)
|
548 |
|
|
// * A20 boundary: 1024k (1Megabyte)
|
549 |
|
|
// * page boundary: 4k
|
550 |
|
|
// * ROM boundary: 2k (dont care since we are only reading)
|
551 |
|
|
// * segment boundary: any
|
552 |
|
|
|
553 |
|
|
void BX_CPU_C::prefetch(void)
|
554 |
|
|
{
|
555 |
|
|
bx_address laddr;
|
556 |
|
|
unsigned pageOffset;
|
557 |
|
|
|
558 |
|
|
#if BX_SUPPORT_X86_64
|
559 |
|
|
if (long64_mode()) {
|
560 |
|
|
if (! IsCanonical(RIP)) {
|
561 |
|
|
BX_ERROR(("prefetch: #GP(0): RIP crossed canonical boundary"));
|
562 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
563 |
|
|
}
|
564 |
|
|
|
565 |
|
|
// linear address is equal to RIP in 64-bit long mode
|
566 |
|
|
pageOffset = PAGE_OFFSET(EIP);
|
567 |
|
|
laddr = RIP;
|
568 |
|
|
|
569 |
|
|
// Calculate RIP at the beginning of the page.
|
570 |
|
|
BX_CPU_THIS_PTR eipPageBias = pageOffset - RIP;
|
571 |
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 4096;
|
572 |
|
|
}
|
573 |
|
|
else
|
574 |
|
|
#endif
|
575 |
|
|
{
|
576 |
|
|
|
577 |
|
|
#if BX_CPU_LEVEL >= 5
|
578 |
|
|
if (USER_PL && BX_CPU_THIS_PTR get_VIP() && BX_CPU_THIS_PTR get_VIF()) {
|
579 |
|
|
if (BX_CPU_THIS_PTR cr4.get_PVI() | (v8086_mode() && BX_CPU_THIS_PTR cr4.get_VME())) {
|
580 |
|
|
BX_ERROR(("prefetch: inconsistent VME state"));
|
581 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
582 |
|
|
}
|
583 |
|
|
}
|
584 |
|
|
#endif
|
585 |
|
|
|
586 |
|
|
BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP); /* avoid 32-bit EIP wrap */
|
587 |
|
|
laddr = get_laddr32(BX_SEG_REG_CS, EIP);
|
588 |
|
|
pageOffset = PAGE_OFFSET(laddr);
|
589 |
|
|
|
590 |
|
|
// Calculate RIP at the beginning of the page.
|
591 |
|
|
BX_CPU_THIS_PTR eipPageBias = (bx_address) pageOffset - EIP;
|
592 |
|
|
Bit32u limit = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled;
|
593 |
|
|
if (EIP > limit) {
|
594 |
|
|
BX_ERROR(("prefetch: EIP [%08x] > CS.limit [%08x]", EIP, limit));
|
595 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
596 |
|
|
}
|
597 |
|
|
|
598 |
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 4096;
|
599 |
|
|
if (limit + BX_CPU_THIS_PTR eipPageBias < 4096) {
|
600 |
|
|
BX_CPU_THIS_PTR eipPageWindowSize = (Bit32u)(limit + BX_CPU_THIS_PTR eipPageBias + 1);
|
601 |
|
|
}
|
602 |
|
|
}
|
603 |
|
|
|
604 |
|
|
#if BX_X86_DEBUGGER
|
605 |
|
|
if (hwbreakpoint_check(laddr, BX_HWDebugInstruction, BX_HWDebugInstruction)) {
|
606 |
|
|
signal_event(BX_EVENT_CODE_BREAKPOINT_ASSIST);
|
607 |
|
|
if (! interrupts_inhibited(BX_INHIBIT_DEBUG)) {
|
608 |
|
|
// The next instruction could already hit a code breakpoint but
|
609 |
|
|
// async_event won't take effect immediatelly.
|
610 |
|
|
// Check if the next executing instruction hits code breakpoint
|
611 |
|
|
|
612 |
|
|
// check only if not fetching page cross instruction
|
613 |
|
|
// this check is 32-bit wrap safe as well
|
614 |
|
|
if (EIP == (Bit32u) BX_CPU_THIS_PTR prev_rip) {
|
615 |
|
|
Bit32u dr6_bits = code_breakpoint_match(laddr);
|
616 |
|
|
if (dr6_bits & BX_DEBUG_TRAP_HIT) {
|
617 |
|
|
BX_ERROR(("#DB: x86 code breakpoint catched"));
|
618 |
|
|
BX_CPU_THIS_PTR debug_trap |= dr6_bits;
|
619 |
|
|
exception(BX_DB_EXCEPTION, 0);
|
620 |
|
|
}
|
621 |
|
|
}
|
622 |
|
|
}
|
623 |
|
|
}
|
624 |
|
|
else {
|
625 |
|
|
clear_event(BX_EVENT_CODE_BREAKPOINT_ASSIST);
|
626 |
|
|
}
|
627 |
|
|
#endif
|
628 |
|
|
|
629 |
|
|
BX_CPU_THIS_PTR clear_RF();
|
630 |
|
|
|
631 |
|
|
bx_address lpf = LPFOf(laddr);
|
632 |
|
|
unsigned TLB_index = BX_TLB_INDEX_OF(lpf, 0);
|
633 |
|
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[TLB_index];
|
634 |
|
|
Bit8u *fetchPtr = 0;
|
635 |
|
|
|
636 |
|
|
if ((tlbEntry->lpf == lpf) && (tlbEntry->accessBits & (0x10 << USER_PL)) != 0) {
|
637 |
|
|
BX_CPU_THIS_PTR pAddrFetchPage = tlbEntry->ppf;
|
638 |
|
|
fetchPtr = (Bit8u*) tlbEntry->hostPageAddr;
|
639 |
|
|
}
|
640 |
|
|
else {
|
641 |
|
|
bx_phy_address pAddr = translate_linear(tlbEntry, laddr, USER_PL, BX_EXECUTE);
|
642 |
|
|
BX_CPU_THIS_PTR pAddrFetchPage = PPFOf(pAddr);
|
643 |
|
|
}
|
644 |
|
|
|
645 |
|
|
if (fetchPtr) {
|
646 |
|
|
BX_CPU_THIS_PTR eipFetchPtr = fetchPtr;
|
647 |
|
|
}
|
648 |
|
|
else {
|
649 |
|
|
BX_CPU_THIS_PTR eipFetchPtr = (const Bit8u*) getHostMemAddr(BX_CPU_THIS_PTR pAddrFetchPage, BX_EXECUTE);
|
650 |
|
|
// Sanity checks
|
651 |
|
|
if (! BX_CPU_THIS_PTR eipFetchPtr) {
|
652 |
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrFetchPage + pageOffset;
|
653 |
|
|
if (pAddr >= BX_MEM(0)->get_memory_len()) {
|
654 |
|
|
BX_PANIC(("prefetch: running in bogus memory, pAddr=0x" FMT_PHY_ADDRX, pAddr));
|
655 |
|
|
}
|
656 |
|
|
else {
|
657 |
|
|
BX_PANIC(("prefetch: getHostMemAddr vetoed direct read, pAddr=0x" FMT_PHY_ADDRX, pAddr));
|
658 |
|
|
}
|
659 |
|
|
}
|
660 |
|
|
}
|
661 |
|
|
}
|
662 |
|
|
|
663 |
|
|
#if BX_DEBUGGER || BX_GDBSTUB
|
664 |
|
|
bx_bool BX_CPU_C::dbg_instruction_epilog(void)
|
665 |
|
|
{
|
666 |
|
|
#if BX_DEBUGGER
|
667 |
|
|
bx_address debug_eip = RIP;
|
668 |
|
|
Bit16u cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
669 |
|
|
|
670 |
|
|
BX_CPU_THIS_PTR guard_found.cs = cs;
|
671 |
|
|
BX_CPU_THIS_PTR guard_found.eip = debug_eip;
|
672 |
|
|
BX_CPU_THIS_PTR guard_found.laddr = get_laddr(BX_SEG_REG_CS, debug_eip);
|
673 |
|
|
BX_CPU_THIS_PTR guard_found.code_32_64 = BX_CPU_THIS_PTR fetchModeMask;
|
674 |
|
|
|
675 |
|
|
//
|
676 |
|
|
// Take care of break point conditions generated during instruction execution
|
677 |
|
|
//
|
678 |
|
|
|
679 |
|
|
// Check if we hit read/write or time breakpoint
|
680 |
|
|
if (BX_CPU_THIS_PTR break_point) {
|
681 |
|
|
Bit64u tt = bx_pc_system.time_ticks();
|
682 |
|
|
switch (BX_CPU_THIS_PTR break_point) {
|
683 |
|
|
case BREAK_POINT_TIME:
|
684 |
|
|
BX_INFO(("[" FMT_LL "d] Caught time breakpoint", tt));
|
685 |
|
|
BX_CPU_THIS_PTR stop_reason = STOP_TIME_BREAK_POINT;
|
686 |
|
|
return(1); // on a breakpoint
|
687 |
|
|
case BREAK_POINT_READ:
|
688 |
|
|
BX_INFO(("[" FMT_LL "d] Caught read watch point", tt));
|
689 |
|
|
BX_CPU_THIS_PTR stop_reason = STOP_READ_WATCH_POINT;
|
690 |
|
|
return(1); // on a breakpoint
|
691 |
|
|
case BREAK_POINT_WRITE:
|
692 |
|
|
BX_INFO(("[" FMT_LL "d] Caught write watch point", tt));
|
693 |
|
|
BX_CPU_THIS_PTR stop_reason = STOP_WRITE_WATCH_POINT;
|
694 |
|
|
return(1); // on a breakpoint
|
695 |
|
|
default:
|
696 |
|
|
BX_PANIC(("Weird break point condition"));
|
697 |
|
|
}
|
698 |
|
|
}
|
699 |
|
|
|
700 |
|
|
if (BX_CPU_THIS_PTR magic_break) {
|
701 |
|
|
BX_INFO(("[" FMT_LL "d] Stopped on MAGIC BREAKPOINT", bx_pc_system.time_ticks()));
|
702 |
|
|
BX_CPU_THIS_PTR stop_reason = STOP_MAGIC_BREAK_POINT;
|
703 |
|
|
return(1); // on a breakpoint
|
704 |
|
|
}
|
705 |
|
|
|
706 |
|
|
// see if debugger requesting icount guard
|
707 |
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_ICOUNT) {
|
708 |
|
|
if (get_icount() >= BX_CPU_THIS_PTR guard_found.icount_max) {
|
709 |
|
|
return(1);
|
710 |
|
|
}
|
711 |
|
|
}
|
712 |
|
|
|
713 |
|
|
// convenient point to see if user requested debug break or typed Ctrl-C
|
714 |
|
|
if (bx_guard.interrupt_requested) {
|
715 |
|
|
return(1);
|
716 |
|
|
}
|
717 |
|
|
|
718 |
|
|
// support for 'show' command in debugger
|
719 |
|
|
extern unsigned dbg_show_mask;
|
720 |
|
|
if(dbg_show_mask) {
|
721 |
|
|
int rv = bx_dbg_show_symbolic();
|
722 |
|
|
if (rv) return(rv);
|
723 |
|
|
}
|
724 |
|
|
|
725 |
|
|
// Just committed an instruction, before fetching a new one
|
726 |
|
|
// see if debugger is looking for iaddr breakpoint of any type
|
727 |
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_ALL) {
|
728 |
|
|
#if (BX_DBG_MAX_VIR_BPOINTS > 0)
|
729 |
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_VIR) {
|
730 |
|
|
for (unsigned n=0; n<bx_guard.iaddr.num_virtual; n++) {
|
731 |
|
|
if (bx_guard.iaddr.vir[n].enabled &&
|
732 |
|
|
(bx_guard.iaddr.vir[n].cs == cs) &&
|
733 |
|
|
(bx_guard.iaddr.vir[n].eip == debug_eip))
|
734 |
|
|
{
|
735 |
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_VIR;
|
736 |
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = n;
|
737 |
|
|
return(1); // on a breakpoint
|
738 |
|
|
}
|
739 |
|
|
}
|
740 |
|
|
}
|
741 |
|
|
#endif
|
742 |
|
|
#if (BX_DBG_MAX_LIN_BPOINTS > 0)
|
743 |
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_LIN) {
|
744 |
|
|
for (unsigned n=0; n<bx_guard.iaddr.num_linear; n++) {
|
745 |
|
|
if (bx_guard.iaddr.lin[n].enabled &&
|
746 |
|
|
(bx_guard.iaddr.lin[n].addr == BX_CPU_THIS_PTR guard_found.laddr))
|
747 |
|
|
{
|
748 |
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_LIN;
|
749 |
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = n;
|
750 |
|
|
return(1); // on a breakpoint
|
751 |
|
|
}
|
752 |
|
|
}
|
753 |
|
|
}
|
754 |
|
|
#endif
|
755 |
|
|
#if (BX_DBG_MAX_PHY_BPOINTS > 0)
|
756 |
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_PHY) {
|
757 |
|
|
bx_phy_address phy;
|
758 |
|
|
bx_bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR guard_found.laddr, &phy);
|
759 |
|
|
if (valid) {
|
760 |
|
|
for (unsigned n=0; n<bx_guard.iaddr.num_physical; n++) {
|
761 |
|
|
if (bx_guard.iaddr.phy[n].enabled && (bx_guard.iaddr.phy[n].addr == phy))
|
762 |
|
|
{
|
763 |
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_PHY;
|
764 |
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = n;
|
765 |
|
|
return(1); // on a breakpoint
|
766 |
|
|
}
|
767 |
|
|
}
|
768 |
|
|
}
|
769 |
|
|
}
|
770 |
|
|
#endif
|
771 |
|
|
}
|
772 |
|
|
#endif
|
773 |
|
|
|
774 |
|
|
#if BX_GDBSTUB
|
775 |
|
|
if (bx_dbg.gdbstub_enabled) {
|
776 |
|
|
unsigned reason = bx_gdbstub_check(EIP);
|
777 |
|
|
if (reason != GDBSTUB_STOP_NO_REASON) return(1);
|
778 |
|
|
}
|
779 |
|
|
#endif
|
780 |
|
|
|
781 |
|
|
return(0);
|
782 |
|
|
}
|
783 |
|
|
#endif // BX_DEBUGGER || BX_GDBSTUB
|