OpenCores
URL https://opencores.org/ocsvn/zet86/zet86/trunk

Subversion Repositories zet86

[/] [zet86/] [trunk/] [src/] [bochs-diff-2.3.7/] [cpu/] [cpu.cc] - Blame information for rev 49

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 39 zeus
/////////////////////////////////////////////////////////////////////////
2 43 zeus
// $Id: cpu.cc,v 1.5 2009-02-06 03:48:30 zeus Exp $
3 39 zeus
/////////////////////////////////////////////////////////////////////////
4
//
5
//  Copyright (C) 2001  MandrakeSoft S.A.
6
//
7
//    MandrakeSoft S.A.
8
//    43, rue d'Aboukir
9
//    75002 Paris - France
10
//    http://www.linux-mandrake.com/
11
//    http://www.mandrakesoft.com/
12
//
13
//  This library is free software; you can redistribute it and/or
14
//  modify it under the terms of the GNU Lesser General Public
15
//  License as published by the Free Software Foundation; either
16
//  version 2 of the License, or (at your option) any later version.
17
//
18
//  This library is distributed in the hope that it will be useful,
19
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
20
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21
//  Lesser General Public License for more details.
22
//
23
//  You should have received a copy of the GNU Lesser General Public
24
//  License along with this library; if not, write to the Free Software
25
//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
26
/////////////////////////////////////////////////////////////////////////
27
 
28
#define NEED_CPU_REG_SHORTCUTS 1
29
#include "bochs.h"
30
#include "cpu.h"
31
#define LOG_THIS BX_CPU_THIS_PTR
32
 
33
#include "iodev/iodev.h"
34
 
35
#if BX_EXTERNAL_DEBUGGER
36
#include "extdb.h"
37
#endif
38
 
39
// Make code more tidy with a few macros.
40
#if BX_SUPPORT_X86_64==0
41
#define RIP EIP
42
#define RCX ECX
43
#endif
44
 
45
// ICACHE instrumentation code
46
#if BX_SUPPORT_ICACHE
47
 
48
#define InstrumentICACHE 0
49
 
50
#if InstrumentICACHE
51
static unsigned iCacheLookups=0;
52
static unsigned iCacheMisses=0;
53
 
54
#define InstrICache_StatsMask 0xffffff
55
 
56
#define InstrICache_Stats() {\
57
  if ((iCacheLookups & InstrICache_StatsMask) == 0) { \
58
    BX_INFO(("ICACHE lookups: %u, misses: %u, hit rate = %6.2f%% ", \
59
          iCacheLookups, \
60
          iCacheMisses,  \
61
          (iCacheLookups-iCacheMisses) * 100.0 / iCacheLookups)); \
62
    iCacheLookups = iCacheMisses = 0; \
63
  } \
64
}
65
#define InstrICache_Increment(v) (v)++
66
 
67
#else
68
#define InstrICache_Stats()
69
#define InstrICache_Increment(v)
70
#endif
71
 
72
#endif // BX_SUPPORT_ICACHE
73
 
74
// The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
75
// instructions and then return so that the other processors have a chance to
76
// run.  This is used by bochs internal debugger or when simulating
77
// multiple processors.
78
//
79
// If maximum instructions have been executed, return. The zero-count
80
// means run forever.
81
#if BX_SUPPORT_SMP || BX_DEBUGGER
82
  #define CHECK_MAX_INSTRUCTIONS(count) \
83
    if ((count) > 0) {                  \
84
      (count)--;                        \
85
      if ((count) == 0) return;         \
86
    }
87
#else
88
  #define CHECK_MAX_INSTRUCTIONS(count)
89
#endif
90
 
91
void BX_CPU_C::cpu_loop(Bit32u max_instr_count)
92
{
93
#if BX_DEBUGGER
94
  BX_CPU_THIS_PTR break_point = 0;
95
  BX_CPU_THIS_PTR magic_break = 0;
96
  BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
97
#endif
98
 
99
  if (setjmp(BX_CPU_THIS_PTR jmp_buf_env)) {
100
    // only from exception function we can get here ...
101
    BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID);
102
    BX_TICK1_IF_SINGLE_PROCESSOR();
103
#if BX_DEBUGGER || BX_EXTERNAL_DEBUGGER || BX_GDBSTUB
104
    if (dbg_instruction_epilog()) return;
105
#endif
106
    CHECK_MAX_INSTRUCTIONS(max_instr_count);
107
#if BX_GDBSTUB
108
    if (bx_dbg.gdbstub_enabled) return;
109
#endif
110
  }
111
 
112
  // If the exception() routine has encountered a nasty fault scenario,
113
  // the debugger may request that control is returned to it so that
114
  // the situation may be examined.
115
#if BX_DEBUGGER
116
  if (bx_guard.interrupt_requested) return;
117
#endif
118
 
119
  // We get here either by a normal function call, or by a longjmp
120
  // back from an exception() call.  In either case, commit the
121
  // new EIP/ESP, and set up other environmental fields.  This code
122
  // mirrors similar code below, after the interrupt() call.
123
  BX_CPU_THIS_PTR prev_rip = RIP; // commit new EIP
124
  BX_CPU_THIS_PTR speculative_rsp = 0;
125
  BX_CPU_THIS_PTR EXT = 0;
126
  BX_CPU_THIS_PTR errorno = 0;
127
 
128
  while (1) {
129
 
130
    // check on events which occurred for previous instructions (traps)
131
    // and ones which are asynchronous to the CPU (hardware interrupts)
132
/*
133
 * Zet: we don't handle external interrupts
134
 *
135
    if (BX_CPU_THIS_PTR async_event) {
136
      if (handleAsyncEvent()) {
137
        // If request to return to caller ASAP.
138
        return;
139
      }
140
    }
141
 */
142
no_async_event:
143
 
144
    Bit32u eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
145
 
146
    if (eipBiased >= BX_CPU_THIS_PTR eipPageWindowSize) {
147
      prefetch();
148
      eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
149
    }
150
 
151
#if BX_SUPPORT_ICACHE
152
    bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrA20Page + eipBiased;
153
    bxICacheEntry_c *entry = BX_CPU_THIS_PTR iCache.get_entry(pAddr);
154
    bxInstruction_c *i = entry->i;
155
 
156
    InstrICache_Increment(iCacheLookups);
157
    InstrICache_Stats();
158
 
159
    if ((entry->pAddr == pAddr) &&
160
        (entry->writeStamp == *(BX_CPU_THIS_PTR currPageWriteStampPtr)))
161
    {
162
      // iCache hit. An instruction was found in the iCache.
163
#if BX_INSTRUMENTATION
164
      BX_INSTR_OPCODE(BX_CPU_ID, BX_CPU_THIS_PTR eipFetchPtr + eipBiased,
165
         i->ilen(), BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, Is64BitMode());
166
#endif
167
    }
168
    else {
169
      // iCache miss. No validated instruction with matching fetch parameters
170
      // is in the iCache.
171
      InstrICache_Increment(iCacheMisses);
172
      serveICacheMiss(entry, eipBiased, pAddr);
173
      i = entry->i;
174
    }
175
#else
176
    bxInstruction_c iStorage, *i = &iStorage;
177
    unsigned remainingInPage = BX_CPU_THIS_PTR eipPageWindowSize - eipBiased;
178
    const Bit8u *fetchPtr = BX_CPU_THIS_PTR eipFetchPtr + eipBiased;
179
    fetchInstruction(i, fetchPtr, remainingInPage);
180
#endif
181
 
182
#if BX_SUPPORT_TRACE_CACHE
183
    unsigned length = entry->ilen;
184
 
185
    for(;;i++) {
186
#endif
187
      // An instruction will have been fetched using either the normal case,
188
      // or the boundary fetch (across pages), by this point.
189
      BX_INSTR_FETCH_DECODE_COMPLETED(BX_CPU_ID, i);
190
 
191
#if BX_DEBUGGER || BX_EXTERNAL_DEBUGGER || BX_GDBSTUB
192
      if (dbg_instruction_prolog()) return;
193
#endif
194
 
195
#if BX_DISASM
196
      if (BX_CPU_THIS_PTR trace) {
197
        // print the instruction that is about to be executed
198
        debug_disasm_instruction(BX_CPU_THIS_PTR prev_rip);
199
      }
200
#endif
201
 
202
      // decoding instruction compeleted -> continue with execution
203
      BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
204
      RIP += i->ilen();
205
      BX_CPU_CALL_METHOD(i->execute, (i)); // might iterate repeat instruction
206
      BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
207
      BX_INSTR_AFTER_EXECUTION(BX_CPU_ID, i);
208
      BX_TICK1_IF_SINGLE_PROCESSOR();
209
 
210
      // inform instrumentation about new instruction
211
      BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID);
212
 
213
      // note instructions generating exceptions never reach this point
214
#if BX_DEBUGGER || BX_EXTERNAL_DEBUGGER || BX_GDBSTUB
215
      if (dbg_instruction_epilog()) return;
216
#endif
217
 
218
      CHECK_MAX_INSTRUCTIONS(max_instr_count);
219
 
220
#if BX_SUPPORT_TRACE_CACHE
221
      if (BX_CPU_THIS_PTR async_event) {
222
        // clear stop trace magic indication that probably was set by repeat or branch32/64
223
        BX_CPU_THIS_PTR async_event &= ~BX_ASYNC_EVENT_STOP_TRACE;
224
        break;
225
      }
226
 
227
      if (--length == 0) goto no_async_event;
228
    }
229
#endif
230
  }  // while (1)
231
}
232
 
233
void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat(bxInstruction_c *i, BxExecutePtr_tR execute)
234
{
235
  // non repeated instruction
236
  if (! i->repUsedL()) {
237
    BX_CPU_CALL_METHOD(execute, (i));
238
    return;
239
  }
240
 
241
#if BX_SUPPORT_X86_64
242
  if (i->as64L()) {
243
    while(1) {
244
      if (RCX != 0) {
245
        BX_CPU_CALL_METHOD(execute, (i));
246
        BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
247
        RCX --;
248
      }
249
      if (RCX == 0) return;
250
 
251
#if BX_DEBUGGER == 0
252
      if (BX_CPU_THIS_PTR async_event)
253
#endif
254
        break; // exit always if debugger enabled
255
 
256
      BX_TICK1_IF_SINGLE_PROCESSOR();
257
    }
258
  }
259
  else
260
#endif
261
  if (i->as32L()) {
262
    while(1) {
263
      if (ECX != 0) {
264
        BX_CPU_CALL_METHOD(execute, (i));
265
        BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
266
        RCX = ECX - 1;
267
      }
268
      if (ECX == 0) return;
269
 
270
#if BX_DEBUGGER == 0
271
      if (BX_CPU_THIS_PTR async_event)
272
#endif
273
        break; // exit always if debugger enabled
274
 
275
      BX_TICK1_IF_SINGLE_PROCESSOR();
276
    }
277
  }
278
  else  // 16bit addrsize
279
  {
280
    while(1) {
281
      if (CX != 0) {
282
        BX_CPU_CALL_METHOD(execute, (i));
283
        BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
284
        CX --;
285
      }
286
      if (CX == 0) return;
287
 
288
#if BX_DEBUGGER == 0
289
      if (BX_CPU_THIS_PTR async_event)
290
#endif
291
        break; // exit always if debugger enabled
292
 
293
      BX_TICK1_IF_SINGLE_PROCESSOR();
294
    }
295
  }
296
 
297
  RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
298
 
299
#if BX_SUPPORT_TRACE_CACHE
300
  // assert magic async_event to stop trace execution
301
  BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
302
#endif
303
}
304
 
305
void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZFL(bxInstruction_c *i, BxExecutePtr_tR execute)
306
{
307
  // non repeated instruction
308
  if (! i->repUsedL()) {
309
    BX_CPU_CALL_METHOD(execute, (i));
310
    return;
311
  }
312
 
313
  unsigned rep = i->repUsedValue();
314
 
315
#if BX_SUPPORT_X86_64
316
  if (i->as64L()) {
317
    while(1) {
318
      if (RCX != 0) {
319
        BX_CPU_CALL_METHOD(execute, (i));
320
        BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
321
        RCX --;
322
      }
323
      if (rep==3 && get_ZF()==0) return;
324
      if (rep==2 && get_ZF()!=0) return;
325
      if (RCX == 0) return;
326
 
327
#if BX_DEBUGGER == 0
328
      if (BX_CPU_THIS_PTR async_event)
329
#endif
330
        break; // exit always if debugger enabled
331
 
332
      BX_TICK1_IF_SINGLE_PROCESSOR();
333
    }
334
  }
335
  else
336
#endif
337
  if (i->as32L()) {
338
    while(1) {
339
      if (ECX != 0) {
340
        BX_CPU_CALL_METHOD(execute, (i));
341
        BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
342
        RCX = ECX - 1;
343
      }
344
      if (rep==3 && get_ZF()==0) return;
345
      if (rep==2 && get_ZF()!=0) return;
346
      if (ECX == 0) return;
347
 
348
#if BX_DEBUGGER == 0
349
      if (BX_CPU_THIS_PTR async_event)
350
#endif
351
        break; // exit always if debugger enabled
352
 
353
      BX_TICK1_IF_SINGLE_PROCESSOR();
354
    }
355
  }
356
  else  // 16bit addrsize
357
  {
358
    while(1) {
359
      if (CX != 0) {
360
        BX_CPU_CALL_METHOD(execute, (i));
361
        BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
362
        CX --;
363
      }
364
      if (rep==3 && get_ZF()==0) return;
365
      if (rep==2 && get_ZF()!=0) return;
366
      if (CX == 0) return;
367
 
368
#if BX_DEBUGGER == 0
369
      if (BX_CPU_THIS_PTR async_event)
370
#endif
371
        break; // exit always if debugger enabled
372
 
373
      BX_TICK1_IF_SINGLE_PROCESSOR();
374
    }
375
  }
376
 
377
  RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
378
 
379
#if BX_SUPPORT_TRACE_CACHE
380
  // assert magic async_event to stop trace execution
381
  BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
382
#endif
383
}
384
 
385
unsigned BX_CPU_C::handleAsyncEvent(void)
386
{
387
  //
388
  // This area is where we process special conditions and events.
389
  //
390
  if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_SPECIAL) {
391
    // I made up the bitmask above to mean HALT state.
392
    // for one processor, pass the time as quickly as possible until
393
    // an interrupt wakes up the CPU.
394
    while (1)
395
    {
396
      if ((BX_CPU_INTR && (BX_CPU_THIS_PTR get_IF() || (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_MWAIT_IF))) ||
397
           BX_CPU_THIS_PTR nmi_pending || BX_CPU_THIS_PTR smi_pending)
398
      {
399
        // interrupt ends the HALT condition
400
#if BX_SUPPORT_MONITOR_MWAIT
401
        if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_MWAIT)
402
          BX_MEM(0)->clear_monitor(BX_CPU_THIS_PTR bx_cpuid);
403
#endif
404
        BX_CPU_THIS_PTR debug_trap = 0; // clear traps for after resume
405
        BX_CPU_THIS_PTR inhibit_mask = 0; // clear inhibits for after resume
406
        break;
407
      }
408
      if ((BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_SPECIAL) == 0) {
409
        BX_INFO(("handleAsyncEvent: reset detected in HLT state"));
410
        break;
411
      }
412
 
413
      // for multiprocessor simulation, even if this CPU is halted we still
414
      // must give the others a chance to simulate.  If an interrupt has
415
      // arrived, then clear the HALT condition; otherwise just return from
416
      // the CPU loop with stop_reason STOP_CPU_HALTED.
417
#if BX_SUPPORT_SMP
418
      if (BX_SMP_PROCESSORS > 1) {
419
        // HALT condition remains, return so other CPUs have a chance
420
#if BX_DEBUGGER
421
        BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
422
#endif
423
        return 1; // Return to caller of cpu_loop.
424
      }
425
#endif
426
 
427
#if BX_DEBUGGER
428
      if (bx_guard.interrupt_requested)
429
        return 1; // Return to caller of cpu_loop.
430
#endif
431
 
432
      BX_TICK1();
433
    }
434
  } else if (bx_pc_system.kill_bochs_request) {
435
    // setting kill_bochs_request causes the cpu loop to return ASAP.
436
    return 1; // Return to caller of cpu_loop.
437
  }
438
 
439
  // Priority 1: Hardware Reset and Machine Checks
440
  //   RESET
441
  //   Machine Check
442
  // (bochs doesn't support these)
443
 
444
  // Priority 2: Trap on Task Switch
445
  //   T flag in TSS is set
446
  if (BX_CPU_THIS_PTR debug_trap & 0x00008000) {
447
    BX_CPU_THIS_PTR dr6 |= BX_CPU_THIS_PTR debug_trap;
448
    exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
449
  }
450
 
451
  // Priority 3: External Hardware Interventions
452
  //   FLUSH
453
  //   STOPCLK
454
  //   SMI
455
  //   INIT
456
  // (bochs doesn't support these)
457
  if (BX_CPU_THIS_PTR smi_pending && ! BX_CPU_THIS_PTR smm_mode())
458
  {
459
    // clear SMI pending flag and disable NMI when SMM was accepted
460
    BX_CPU_THIS_PTR smi_pending = 0;
461
    BX_CPU_THIS_PTR nmi_disable = 1;
462
    enter_system_management_mode();
463
  }
464
 
465
  // Priority 4: Traps on Previous Instruction
466
  //   Breakpoints
467
  //   Debug Trap Exceptions (TF flag set or data/IO breakpoint)
468
  if (BX_CPU_THIS_PTR debug_trap &&
469
       !(BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG))
470
  {
471
    // A trap may be inhibited on this boundary due to an instruction
472
    // which loaded SS.  If so we clear the inhibit_mask below
473
    // and don't execute this code until the next boundary.
474
    // Commit debug events to DR6
475
    BX_CPU_THIS_PTR dr6 |= BX_CPU_THIS_PTR debug_trap;
476
    exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
477
  }
478
 
479
  // Priority 5: External Interrupts
480
  //   NMI Interrupts
481
  //   Maskable Hardware Interrupts
482
  if (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_INTERRUPTS) {
483
    // Processing external interrupts is inhibited on this
484
    // boundary because of certain instructions like STI.
485
    // inhibit_mask is cleared below, in which case we will have
486
    // an opportunity to check interrupts on the next instruction
487
    // boundary.
488
  }
489
  else if (BX_CPU_THIS_PTR nmi_pending) {
490
    BX_CPU_THIS_PTR nmi_pending = 0;
491
    BX_CPU_THIS_PTR nmi_disable = 1;
492
    BX_CPU_THIS_PTR errorno = 0;
493
    BX_CPU_THIS_PTR EXT = 1; /* external event */
494
    BX_INSTR_HWINTERRUPT(BX_CPU_ID, 2, BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
495
    interrupt(2, 0, 0, 0);
496
  }
497
  else if (BX_CPU_INTR && BX_CPU_THIS_PTR get_IF() && BX_DBG_ASYNC_INTR)
498
  {
499
    Bit8u vector;
500
 
501
    // NOTE: similar code in ::take_irq()
502
#if BX_SUPPORT_APIC
503
    if (BX_CPU_THIS_PTR local_apic.INTR)
504
      vector = BX_CPU_THIS_PTR local_apic.acknowledge_int();
505
    else
506
#endif
507
      // if no local APIC, always acknowledge the PIC.
508
      vector = DEV_pic_iac(); // may set INTR with next interrupt
509
    BX_CPU_THIS_PTR errorno = 0;
510
    BX_CPU_THIS_PTR EXT = 1; /* external event */
511
    BX_INSTR_HWINTERRUPT(BX_CPU_ID, vector,
512
        BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
513
    interrupt(vector, 0, 0, 0);
514
    // Set up environment, as would be when this main cpu loop gets
515
    // invoked.  At the end of normal instructions, we always commmit
516
    // the new EIP/ESP values.  But here, we call interrupt() much like
517
    // it was a sofware interrupt instruction, and need to effect the
518
    // commit here.  This code mirrors similar code above.
519
    BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
520
    BX_CPU_THIS_PTR speculative_rsp = 0;
521
    BX_CPU_THIS_PTR EXT = 0;
522
    BX_CPU_THIS_PTR errorno = 0;
523
  }
524
  else if (BX_HRQ && BX_DBG_ASYNC_DMA) {
525
    // NOTE: similar code in ::take_dma()
526
    // assert Hold Acknowledge (HLDA) and go into a bus hold state
527
    DEV_dma_raise_hlda();
528
  }
529
 
530
  // Priority 6: Faults from fetching next instruction
531
  //   Code breakpoint fault
532
  //   Code segment limit violation (priority 7 on 486/Pentium)
533
  //   Code page fault (priority 7 on 486/Pentium)
534
  // (handled in main decode loop)
535
 
536
  // Priority 7: Faults from decoding next instruction
537
  //   Instruction length > 15 bytes
538
  //   Illegal opcode
539
  //   Coprocessor not available
540
  // (handled in main decode loop etc)
541
 
542
  // Priority 8: Faults on executing an instruction
543
  //   Floating point execution
544
  //   Overflow
545
  //   Bound error
546
  //   Invalid TSS
547
  //   Segment not present
548
  //   Stack fault
549
  //   General protection
550
  //   Data page fault
551
  //   Alignment check
552
  // (handled by rest of the code)
553
 
554
  if (BX_CPU_THIS_PTR get_TF())
555
  {
556
    // TF is set before execution of next instruction.  Schedule
557
    // a debug trap (#DB) after execution.  After completion of
558
    // next instruction, the code above will invoke the trap.
559
    BX_CPU_THIS_PTR debug_trap |= 0x00004000; // BS flag in DR6
560
  }
561
 
562
  // Now we can handle things which are synchronous to instruction
563
  // execution.
564
  if (BX_CPU_THIS_PTR get_RF()) {
565
    BX_CPU_THIS_PTR clear_RF();
566
  }
567
#if BX_X86_DEBUGGER
568
  else {
569
    // only bother comparing if any breakpoints enabled
570
    if (BX_CPU_THIS_PTR dr7 & 0x000000ff) {
571
      bx_address iaddr = get_laddr(BX_SEG_REG_CS, BX_CPU_THIS_PTR prev_rip);
572
      Bit32u dr6_bits = hwdebug_compare(iaddr, 1, BX_HWDebugInstruction, BX_HWDebugInstruction);
573
      if (dr6_bits)
574
      {
575
        // Add to the list of debug events thus far.
576
        BX_CPU_THIS_PTR async_event = 1;
577
        BX_CPU_THIS_PTR debug_trap |= dr6_bits;
578
        // If debug events are not inhibited on this boundary,
579
        // fire off a debug fault.  Otherwise handle it on the next
580
        // boundary. (becomes a trap)
581
        if (! (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG)) {
582
          // Commit debug events to DR6
583
          BX_CPU_THIS_PTR dr6 = BX_CPU_THIS_PTR debug_trap;
584
          exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
585
        }
586
      }
587
    }
588
  }
589
#endif
590
 
591
  // We have ignored processing of external interrupts and
592
  // debug events on this boundary.  Reset the mask so they
593
  // will be processed on the next boundary.
594
  BX_CPU_THIS_PTR inhibit_mask = 0;
595
 
596
  if (!(BX_CPU_INTR ||
597
        BX_CPU_THIS_PTR debug_trap ||
598
        BX_HRQ ||
599
        BX_CPU_THIS_PTR get_TF()
600
#if BX_X86_DEBUGGER
601
        || (BX_CPU_THIS_PTR dr7 & 0xff)
602
#endif
603
        ))
604
    BX_CPU_THIS_PTR async_event = 0;
605
 
606
  return 0; // Continue executing cpu_loop.
607
}
608
 
609
 
610
// boundaries of consideration:
611
//
612
//  * physical memory boundary: 1024k (1Megabyte) (increments of...)
613
//  * A20 boundary:             1024k (1Megabyte)
614
//  * page boundary:            4k
615
//  * ROM boundary:             2k (dont care since we are only reading)
616
//  * segment boundary:         any
617
 
618
void BX_CPU_C::prefetch(void)
619
{
620
  bx_address temp_rip = RIP;
621
  bx_address laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, temp_rip);
622
  bx_phy_address pAddr;
623
  unsigned pageOffset = PAGE_OFFSET(laddr);
624
 
625
  // Calculate RIP at the beginning of the page.
626
  BX_CPU_THIS_PTR eipPageBias = pageOffset - RIP;
627
  BX_CPU_THIS_PTR eipPageWindowSize = 4096;
628
 
629
#if BX_SUPPORT_X86_64
630
  if (Is64BitMode()) {
631
    if (! IsCanonical(RIP)) {
632
      BX_ERROR(("prefetch: #GP(0): RIP crossed canonical boundary"));
633
      exception(BX_GP_EXCEPTION, 0, 0);
634
    }
635
  }
636
  else
637
#endif
638
  {
639
    Bit32u temp_limit = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled;
640
    if (((Bit32u) temp_rip) > temp_limit) {
641
      BX_ERROR(("prefetch: EIP [%08x] > CS.limit [%08x]", (Bit32u) temp_rip, temp_limit));
642
      exception(BX_GP_EXCEPTION, 0, 0);
643
    }
644
    if (temp_limit + BX_CPU_THIS_PTR eipPageBias < 4096) {
645
      BX_CPU_THIS_PTR eipPageWindowSize = temp_limit + BX_CPU_THIS_PTR eipPageBias + 1;
646
    }
647
  }
648
 
649
  bx_address lpf = LPFOf(laddr);
650
  unsigned TLB_index = BX_TLB_INDEX_OF(lpf, 0);
651
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[TLB_index];
652
  Bit8u *fetchPtr = 0;
653
 
654
  if (tlbEntry->lpf == lpf && (tlbEntry->accessBits & (0x01 << CPL))) {
655
    pAddr = A20ADDR(tlbEntry->ppf | pageOffset);
656
#if BX_SupportGuest2HostTLB
657
    fetchPtr = (Bit8u*) (tlbEntry->hostPageAddr);
658
#endif
659
  }
660
  else {
661
    if (BX_CPU_THIS_PTR cr0.get_PG()) {
662
      pAddr = translate_linear(laddr, CPL, BX_READ, CODE_ACCESS);
663
      pAddr = A20ADDR(pAddr);
664
    }
665
    else {
666
      pAddr = A20ADDR(laddr);
667
    }
668
  }
669
 
670
  BX_CPU_THIS_PTR pAddrA20Page = LPFOf(pAddr);
671
 
672
  if (fetchPtr) {
673
    BX_CPU_THIS_PTR eipFetchPtr = fetchPtr;
674
  }
675
  else {
676
    BX_CPU_THIS_PTR eipFetchPtr = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS,
677
        BX_CPU_THIS_PTR pAddrA20Page, BX_READ, CODE_ACCESS);
678
  }
679
 
680
  // Sanity checks
681
  if (! BX_CPU_THIS_PTR eipFetchPtr) {
682
    if (pAddr >= BX_MEM(0)->get_memory_len()) {
683
      BX_PANIC(("prefetch: running in bogus memory, pAddr=0x" FMT_PHY_ADDRX, pAddr));
684
    }
685
    else {
686
      BX_PANIC(("prefetch: getHostMemAddr vetoed direct read, pAddr=0x" FMT_PHY_ADDRX, pAddr));
687
    }
688
  }
689
 
690
#if BX_SUPPORT_ICACHE
691
  BX_CPU_THIS_PTR currPageWriteStampPtr = pageWriteStampTable.getPageWriteStampPtr(pAddr);
692
  Bit32u pageWriteStamp = *(BX_CPU_THIS_PTR currPageWriteStampPtr);
693
  pageWriteStamp &= ~ICacheWriteStampFetchModeMask; // Clear out old fetch mode bits
694
  pageWriteStamp |=  BX_CPU_THIS_PTR fetchModeMask; // And add new ones
695
  pageWriteStampTable.setPageWriteStamp(pAddr, pageWriteStamp);
696
#endif
697
}
698
 
699
void BX_CPU_C::boundaryFetch(const Bit8u *fetchPtr, unsigned remainingInPage, bxInstruction_c *i)
700
{
701
  unsigned j;
702
  Bit8u fetchBuffer[16]; // Really only need 15
703
  unsigned ret;
704
 
705
  if (remainingInPage >= 15) {
706
    BX_ERROR(("boundaryFetch #GP(0): too many instruction prefixes"));
707
    exception(BX_GP_EXCEPTION, 0, 0);
708
  }
709
 
710
  // Read all leftover bytes in current page up to boundary.
711
  for (j=0; j<remainingInPage; j++) {
712
    fetchBuffer[j] = *fetchPtr++;
713
  }
714
 
715
  // The 2nd chunk of the instruction is on the next page.
716
  // Set RIP to the 0th byte of the 2nd page, and force a
717
  // prefetch so direct access of that physical page is possible, and
718
  // all the associated info is updated.
719
  RIP += remainingInPage;
720
  prefetch();
721
 
722
  unsigned fetchBufferLimit = 15;
723
  if (BX_CPU_THIS_PTR eipPageWindowSize < 15) {
724
    BX_DEBUG(("boundaryFetch: small window size after prefetch - %d bytes", BX_CPU_THIS_PTR eipPageWindowSize));
725
    fetchBufferLimit = BX_CPU_THIS_PTR eipPageWindowSize;
726
  }
727
 
728
  // We can fetch straight from the 0th byte, which is eipFetchPtr;
729
  fetchPtr = BX_CPU_THIS_PTR eipFetchPtr;
730
 
731
  // read leftover bytes in next page
732
  for (; j<fetchBufferLimit; j++) {
733
    fetchBuffer[j] = *fetchPtr++;
734
  }
735
#if BX_SUPPORT_X86_64
736
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
737
    ret = fetchDecode64(fetchBuffer, i, fetchBufferLimit);
738
  else
739
#endif
740
    ret = fetchDecode32(fetchBuffer, i, fetchBufferLimit);
741
 
742
  if (ret==0) {
743
    BX_INFO(("boundaryFetch #GP(0): failed to complete instruction decoding"));
744
    exception(BX_GP_EXCEPTION, 0, 0);
745
  }
746
 
747
  // Restore EIP since we fudged it to start at the 2nd page boundary.
748
  RIP = BX_CPU_THIS_PTR prev_rip;
749
 
750
  // Since we cross an instruction boundary, note that we need a prefetch()
751
  // again on the next instruction.  Perhaps we can optimize this to
752
  // eliminate the extra prefetch() since we do it above, but have to
753
  // think about repeated instructions, etc.
754
  invalidate_prefetch_q();
755
 
756
  BX_INSTR_OPCODE(BX_CPU_ID, fetchBuffer, i->ilen(),
757
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, Is64BitMode());
758
}
759
 
760
void BX_CPU_C::deliver_NMI(void)
761
{
762
  BX_CPU_THIS_PTR nmi_pending = 1;
763
  BX_CPU_THIS_PTR async_event = 1;
764
}
765
 
766
void BX_CPU_C::deliver_SMI(void)
767
{
768
  BX_CPU_THIS_PTR smi_pending = 1;
769
  BX_CPU_THIS_PTR async_event = 1;
770
}
771
 
772
#if BX_EXTERNAL_DEBUGGER
773
void BX_CPU_C::ask(int level, const char *prefix, const char *fmt, va_list ap)
774
{
775
  char buf1[1024];
776
  vsprintf (buf1, fmt, ap);
777
  printf ("%s %s\n", prefix, buf1);
778
  trap_debugger(1, BX_CPU_THIS);
779
}
780
#endif
781
 
782
#if BX_DEBUGGER || BX_EXTERNAL_DEBUGGER || BX_GDBSTUB
783
bx_bool BX_CPU_C::dbg_instruction_prolog(void)
784
{
785
#if BX_DEBUGGER
786
  if(dbg_check_begin_instr_bpoint()) return 1;
787
#endif
788
 
789
#if BX_EXTERNAL_DEBUGGER
790
  bx_external_debugger(BX_CPU_THIS);
791
#endif
792
 
793
  return 0;
794
}
795
 
796
bx_bool BX_CPU_C::dbg_instruction_epilog(void)
797
{
798
#if BX_DEBUGGER
799
  if (dbg_check_end_instr_bpoint()) return 1;
800
#endif
801
 
802
#if BX_GDBSTUB
803
  if (bx_dbg.gdbstub_enabled) {
804
    unsigned reason = bx_gdbstub_check(EIP);
805
    if (reason != GDBSTUB_STOP_NO_REASON) return 1;
806
  }
807
#endif
808
 
809
  return 0;
810
}
811
#endif // BX_DEBUGGER || BX_EXTERNAL_DEBUGGER || BX_GDBSTUB
812
 
813
#if BX_DEBUGGER
814
extern unsigned dbg_show_mask;
815
 
816
bx_bool BX_CPU_C::dbg_check_begin_instr_bpoint(void)
817
{
818
  Bit64u tt = bx_pc_system.time_ticks();
819
  bx_address debug_eip = RIP;
820
  Bit16u cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
821
 
822
  BX_CPU_THIS_PTR guard_found.cs  = cs;
823
  BX_CPU_THIS_PTR guard_found.eip = debug_eip;
824
  BX_CPU_THIS_PTR guard_found.laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, debug_eip);
825
  BX_CPU_THIS_PTR guard_found.is_32bit_code =
826
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b;
827
  BX_CPU_THIS_PTR guard_found.is_64bit_code = Is64BitMode();
828
 
829
  // support for 'show' command in debugger
830
  if(dbg_show_mask) {
831
    int rv = bx_dbg_show_symbolic();
832
    if (rv) return(rv);
833
  }
834
 
835
  // see if debugger is looking for iaddr breakpoint of any type
836
  if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_ALL) {
837
#if (BX_DBG_MAX_VIR_BPOINTS > 0)
838
    if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_VIR) {
839
      if ((BX_CPU_THIS_PTR guard_found.icount!=0) ||
840
          (tt != BX_CPU_THIS_PTR guard_found.time_tick))
841
      {
842
        for (unsigned i=0; i<bx_guard.iaddr.num_virtual; i++) {
843
          if (bx_guard.iaddr.vir[i].enabled &&
844
             (bx_guard.iaddr.vir[i].cs  == cs) &&
845
             (bx_guard.iaddr.vir[i].eip == debug_eip))
846
          {
847
            BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_VIR;
848
            BX_CPU_THIS_PTR guard_found.iaddr_index = i;
849
            BX_CPU_THIS_PTR guard_found.time_tick = tt;
850
            return(1); // on a breakpoint
851
          }
852
        }
853
      }
854
    }
855
#endif
856
#if (BX_DBG_MAX_LIN_BPOINTS > 0)
857
    if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_LIN) {
858
      if ((BX_CPU_THIS_PTR guard_found.icount!=0) ||
859
          (tt != BX_CPU_THIS_PTR guard_found.time_tick))
860
      {
861
        for (unsigned i=0; i<bx_guard.iaddr.num_linear; i++) {
862
          if (bx_guard.iaddr.lin[i].enabled &&
863
             (bx_guard.iaddr.lin[i].addr == BX_CPU_THIS_PTR guard_found.laddr))
864
          {
865
            BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_LIN;
866
            BX_CPU_THIS_PTR guard_found.iaddr_index = i;
867
            BX_CPU_THIS_PTR guard_found.time_tick = tt;
868
            return(1); // on a breakpoint
869
          }
870
        }
871
      }
872
    }
873
#endif
874
#if (BX_DBG_MAX_PHY_BPOINTS > 0)
875
    if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_PHY) {
876
      bx_phy_address phy;
877
      bx_bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR guard_found.laddr, &phy);
878
      // The "guard_found.icount!=0" condition allows you to step or
879
      // continue beyond a breakpoint.  Bryce tried removing it once,
880
      // and once you get to a breakpoint you are stuck there forever.
881
      // Not pretty.
882
      if (valid && ((BX_CPU_THIS_PTR guard_found.icount!=0) ||
883
          (tt != BX_CPU_THIS_PTR guard_found.time_tick)))
884
      {
885
        for (unsigned i=0; i<bx_guard.iaddr.num_physical; i++) {
886
          if (bx_guard.iaddr.phy[i].enabled && (bx_guard.iaddr.phy[i].addr == phy))
887
          {
888
            BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_PHY;
889
            BX_CPU_THIS_PTR guard_found.iaddr_index = i;
890
            BX_CPU_THIS_PTR guard_found.time_tick = tt;
891
            return(1); // on a breakpoint
892
          }
893
        }
894
      }
895
    }
896
#endif
897
  }
898
 
899
  return(0); // not on a breakpoint
900
}
901
 
902
bx_bool BX_CPU_C::dbg_check_end_instr_bpoint(void)
903
{
904
  BX_CPU_THIS_PTR guard_found.icount++;
905
  BX_CPU_THIS_PTR guard_found.cs  =
906
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
907
  BX_CPU_THIS_PTR guard_found.eip = RIP;
908
  BX_CPU_THIS_PTR guard_found.laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, RIP);
909
  BX_CPU_THIS_PTR guard_found.is_32bit_code =
910
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b;
911
  BX_CPU_THIS_PTR guard_found.is_64bit_code = Is64BitMode();
912
 
913
  // Check if we hit read/write or time breakpoint
914
  if (BX_CPU_THIS_PTR break_point) {
915
    switch (BX_CPU_THIS_PTR break_point) {
916
    case BREAK_POINT_TIME:
917
      BX_INFO(("[" FMT_LL "d] Caught time breakpoint", bx_pc_system.time_ticks()));
918
      BX_CPU_THIS_PTR stop_reason = STOP_TIME_BREAK_POINT;
919
      return(1); // on a breakpoint
920
    case BREAK_POINT_READ:
921
      BX_INFO(("[" FMT_LL "d] Caught read watch point", bx_pc_system.time_ticks()));
922
      BX_CPU_THIS_PTR stop_reason = STOP_READ_WATCH_POINT;
923
      return(1); // on a breakpoint
924
    case BREAK_POINT_WRITE:
925
      BX_INFO(("[" FMT_LL "d] Caught write watch point", bx_pc_system.time_ticks()));
926
      BX_CPU_THIS_PTR stop_reason = STOP_WRITE_WATCH_POINT;
927
      return(1); // on a breakpoint
928
    default:
929
      BX_PANIC(("Weird break point condition"));
930
    }
931
  }
932
 
933
  if (BX_CPU_THIS_PTR magic_break) {
934
    BX_INFO(("[" FMT_LL "d] Stopped on MAGIC BREAKPOINT", bx_pc_system.time_ticks()));
935
    BX_CPU_THIS_PTR stop_reason = STOP_MAGIC_BREAK_POINT;
936
    return(1); // on a breakpoint
937
  }
938
 
939
  // convenient point to see if user typed Ctrl-C
940
  if (bx_guard.interrupt_requested &&
941
     (bx_guard.guard_for & BX_DBG_GUARD_CTRL_C))
942
  {
943
    BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_CTRL_C;
944
    return(1); // Ctrl-C pressed
945
  }
946
 
947
  return(0); // no breakpoint
948
}
949
 
950
void BX_CPU_C::dbg_take_irq(void)
951
{
952
  // NOTE: similar code in ::cpu_loop()
953
 
954
  if (BX_CPU_INTR && BX_CPU_THIS_PTR get_IF()) {
955
    if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
956
      // normal return from setjmp setup
957
      unsigned vector = DEV_pic_iac(); // may set INTR with next interrupt
958
      BX_CPU_THIS_PTR errorno = 0;
959
      BX_CPU_THIS_PTR EXT = 1; // external event
960
      BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
961
      interrupt(vector, 0, 0, 0);
962
    }
963
  }
964
}
965
 
966
void BX_CPU_C::dbg_force_interrupt(unsigned vector)
967
{
968
  // Used to force simulator to take an interrupt, without
969
  // regard to IF
970
 
971
  if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
972
    // normal return from setjmp setup
973
    BX_CPU_THIS_PTR errorno = 0;
974
    BX_CPU_THIS_PTR EXT = 1; // external event
975
    BX_CPU_THIS_PTR async_event = 1; // probably don't need this
976
    interrupt(vector, 0, 0, 0);
977
  }
978
}
979
 
980
void BX_CPU_C::dbg_take_dma(void)
981
{
982
  // NOTE: similar code in ::cpu_loop()
983
  if (BX_HRQ) {
984
    BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
985
    DEV_dma_raise_hlda();
986
  }
987
}
988
 
989
#endif  // #if BX_DEBUGGER

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.