OpenCores
URL https://opencores.org/ocsvn/ao486/ao486/trunk

Subversion Repositories ao486

[/] [ao486/] [trunk/] [bochs486/] [cpu/] [event.cc] - Blame information for rev 7

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 alfik
/////////////////////////////////////////////////////////////////////////
2
// $Id: event.cc 11683 2013-05-04 19:10:50Z sshwarts $
3
/////////////////////////////////////////////////////////////////////////
4
//
5
//   Copyright (c) 2011-2012 Stanislav Shwartsman
6
//          Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7
//
8
//  This library is free software; you can redistribute it and/or
9
//  modify it under the terms of the GNU Lesser General Public
10
//  License as published by the Free Software Foundation; either
11
//  version 2 of the License, or (at your option) any later version.
12
//
13
//  This library is distributed in the hope that it will be useful,
14
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
//  Lesser General Public License for more details.
17
//
18
//  You should have received a copy of the GNU Lesser General Public
19
//  License along with this library; if not, write to the Free Software
20
//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21
/////////////////////////////////////////////////////////////////////////
22
 
23
#define NEED_CPU_REG_SHORTCUTS 1
24
#include "bochs.h"
25
#include "cpu.h"
26
#define LOG_THIS BX_CPU_THIS_PTR
27
 
28
#include "iodev/iodev.h"
29
 
30
bx_bool BX_CPU_C::handleWaitForEvent(void)
31
{
32
  if (BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_WAIT_FOR_SIPI) {
33
    // HALT condition remains, return so other CPUs have a chance
34
#if BX_DEBUGGER
35
    BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
36
#endif
37
    return 1; // Return to caller of cpu_loop.
38
  }
39
 
40
  // For one processor, pass the time as quickly as possible until
41
  // an interrupt wakes up the CPU.
42
  while (1)
43
  {
44
    if ((is_pending(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR) && (BX_CPU_THIS_PTR get_IF() || BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_MWAIT_IF)) ||
45
         is_pending(BX_EVENT_NMI | BX_EVENT_SMI | BX_EVENT_INIT |
46
            BX_EVENT_VMX_VTPR_UPDATE |
47
            BX_EVENT_VMX_VEOI_UPDATE |
48
            BX_EVENT_VMX_VIRTUAL_APIC_WRITE |
49
            BX_EVENT_VMX_MONITOR_TRAP_FLAG |
50
            BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED |
51
            BX_EVENT_VMX_VIRTUAL_NMI))
52
    {
53
      // interrupt ends the HALT condition
54
#if BX_SUPPORT_MONITOR_MWAIT
55
      if (BX_CPU_THIS_PTR activity_state >= BX_ACTIVITY_STATE_MWAIT)
56
        BX_CPU_THIS_PTR monitor.reset_monitor();
57
#endif
58
      BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
59
      BX_CPU_THIS_PTR inhibit_mask = 0; // clear inhibits for after resume
60
      break;
61
    }
62
    if (BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_ACTIVE) {
63
      // happens also when MWAIT monitor was hit
64
//    BX_INFO(("handleWaitForEvent: reset detected in HLT state"));
65
      break;
66
    }
67
 
68
    if (BX_HRQ && BX_DBG_ASYNC_DMA) {
69
      // handle DMA also when CPU is halted
70
      DEV_dma_raise_hlda();
71
    }
72
 
73
    // for multiprocessor simulation, even if this CPU is halted we still
74
    // must give the others a chance to simulate.  If an interrupt has
75
    // arrived, then clear the HALT condition; otherwise just return from
76
    // the CPU loop with stop_reason STOP_CPU_HALTED.
77
#if BX_SUPPORT_SMP
78
    if (BX_SMP_PROCESSORS > 1) {
79
      // HALT condition remains, return so other CPUs have a chance
80
#if BX_DEBUGGER
81
      BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
82
#endif
83
      return 1; // Return to caller of cpu_loop.
84
    }
85
#endif
86
 
87
#if BX_DEBUGGER
88
    if (bx_guard.interrupt_requested)
89
      return 1; // Return to caller of cpu_loop.
90
#endif
91
 
92
    if (bx_pc_system.kill_bochs_request) {
93
      // setting kill_bochs_request causes the cpu loop to return ASAP.
94
      return 1; // Return to caller of cpu_loop.
95
    }
96
 
97
    BX_TICKN(10); // when in HLT run time faster for single CPU
98
 
99
//AO new
100
BX_INSTR_HLT(0);
101
//AO new
102
  }
103
 
104
  return 0;
105
}
106
 
107
void BX_CPU_C::InterruptAcknowledge(void)
108
{
109
  Bit8u vector;
110
 
111
#if BX_SUPPORT_SVM
112
  if (BX_CPU_THIS_PTR in_svm_guest) {
113
    if (SVM_INTERCEPT(SVM_INTERCEPT0_INTR)) Svm_Vmexit(SVM_VMEXIT_INTR);
114
  }
115
#endif
116
 
117
#if BX_SUPPORT_VMX
118
  if (BX_CPU_THIS_PTR in_vmx_guest) {
119
 
120
#if BX_SUPPORT_VMX >= 2
121
    if (is_pending(BX_EVENT_PENDING_VMX_VIRTUAL_INTR)) {
122
      VMX_Deliver_Virtual_Interrupt();
123
      return;
124
    }
125
#endif
126
 
127
    VMexit_ExtInterrupt();
128
  }
129
#endif
130
 
131
  // NOTE: similar code in ::take_irq()
132
#if BX_SUPPORT_APIC
133
  if (is_pending(BX_EVENT_PENDING_LAPIC_INTR))
134
    vector = BX_CPU_THIS_PTR lapic.acknowledge_int();
135
  else
136
#endif
137
    // if no local APIC, always acknowledge the PIC.
138
    vector = DEV_pic_iac(); // may set INTR with next interrupt
139
 
140
  BX_CPU_THIS_PTR EXT = 1; /* external event */
141
#if BX_SUPPORT_VMX
142
  VMexit_Event(BX_EXTERNAL_INTERRUPT, vector, 0, 0);
143
#endif
144
 
145
  BX_INSTR_HWINTERRUPT(BX_CPU_ID, vector,
146
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
147
  interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
148
 
149
  BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
150
}
151
 
152
#if BX_SUPPORT_SVM
153
void BX_CPU_C::VirtualInterruptAcknowledge(void)
154
{
155
  Bit8u vector = SVM_V_INTR_VECTOR;
156
 
157
  if (SVM_INTERCEPT(SVM_INTERCEPT0_VINTR)) Svm_Vmexit(SVM_VMEXIT_VINTR);
158
 
159
  clear_event(BX_EVENT_SVM_VIRQ_PENDING);
160
 
161
  BX_CPU_THIS_PTR EXT = 1; /* external event */
162
 
163
  BX_INSTR_HWINTERRUPT(BX_CPU_ID, vector,
164
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
165
  interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
166
 
167
  BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
168
}
169
#endif
170
 
171
bx_bool BX_CPU_C::handleAsyncEvent(void)
172
{
173
  //
174
  // This area is where we process special conditions and events.
175
  //
176
  if (BX_CPU_THIS_PTR activity_state != BX_ACTIVITY_STATE_ACTIVE) {
177
    // For one processor, pass the time as quickly as possible until
178
    // an interrupt wakes up the CPU.
179
    if (handleWaitForEvent()) return 1;
180
  }
181
 
182
  if (bx_pc_system.kill_bochs_request) {
183
    // setting kill_bochs_request causes the cpu loop to return ASAP.
184
    return 1; // Return to caller of cpu_loop.
185
  }
186
 
187
  // Priority 1: Hardware Reset and Machine Checks
188
  //   RESET
189
  //   Machine Check
190
  // (bochs doesn't support these)
191
 
192
#if BX_SUPPORT_SVM
193
  // debug exceptions or trap due to breakpoint register match
194
  // ignored and discarded if GIF == 0
195
  // debug traps due to EFLAGS.TF remain untouched
196
  if (! BX_CPU_THIS_PTR svm_gif)
197
    BX_CPU_THIS_PTR debug_trap &= BX_DEBUG_SINGLE_STEP_BIT;
198
#endif
199
 
200
  // APIC virtualization trap take priority over SMI, INIT and lower priority events and
201
  // not blocked by EFLAGS.IF or interrupt inhibits by MOV_SS and STI
202
#if BX_SUPPORT_VMX && BX_SUPPORT_X86_64
203
  if (is_unmasked_event_pending(BX_EVENT_VMX_VTPR_UPDATE |
204
                                BX_EVENT_VMX_VEOI_UPDATE | BX_EVENT_VMX_VIRTUAL_APIC_WRITE))
205
  {
206
    VMX_Virtual_Apic_Access_Trap();
207
  }
208
#endif
209
 
210
  // Priority 2: Trap on Task Switch
211
  //   T flag in TSS is set
212
  if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_TASK_SWITCH_BIT) {
213
    exception(BX_DB_EXCEPTION, 0); // no error, not interrupt
214
  }
215
 
216
  // Priority 3: External Hardware Interventions
217
  //   FLUSH
218
  //   STOPCLK
219
  //   SMI
220
  //   INIT
221
  if (is_unmasked_event_pending(BX_EVENT_SMI) && SVM_GIF)
222
  {
223
    clear_event(BX_EVENT_SMI); // clear SMI pending flag
224
    enter_system_management_mode(); // would disable NMI when SMM was accepted
225
  }
226
 
227
  if (is_unmasked_event_pending(BX_EVENT_INIT) && SVM_GIF) {
228
#if BX_SUPPORT_SVM
229
    if (BX_CPU_THIS_PTR in_svm_guest) {
230
      if (SVM_INTERCEPT(SVM_INTERCEPT0_INIT)) Svm_Vmexit(SVM_VMEXIT_INIT);
231
    }
232
#endif
233
#if BX_SUPPORT_VMX
234
    if (BX_CPU_THIS_PTR in_vmx_guest) {
235
      VMexit(VMX_VMEXIT_INIT, 0);
236
    }
237
#endif
238
    // reset will clear pending INIT
239
    reset(BX_RESET_SOFTWARE);
240
 
241
#if BX_SUPPORT_SMP
242
    if (BX_SMP_PROCESSORS > 1) {
243
      // if HALT condition remains, return so other CPUs have a chance
244
      if (BX_CPU_THIS_PTR activity_state != BX_ACTIVITY_STATE_ACTIVE) {
245
#if BX_DEBUGGER
246
        BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
247
#endif
248
        return 1; // Return to caller of cpu_loop.
249
      }
250
    }
251
#endif
252
  }
253
 
254
#if BX_SUPPORT_VMX
255
  if (is_unmasked_event_pending(BX_EVENT_VMX_MONITOR_TRAP_FLAG)) {
256
    VMexit(VMX_VMEXIT_MONITOR_TRAP_FLAG, 0);
257
  }
258
#endif
259
 
260
  // Priority 4: Traps on Previous Instruction
261
  //   Breakpoints
262
  //   Debug Trap Exceptions (TF flag set or data/IO breakpoint)
263
  if (! interrupts_inhibited(BX_INHIBIT_DEBUG)) {
264
    // A trap may be inhibited on this boundary due to an instruction which loaded SS
265
#if BX_X86_DEBUGGER
266
    // Pages with code breakpoints always have async_event=1 and therefore come here
267
    BX_CPU_THIS_PTR debug_trap |= code_breakpoint_match(get_laddr(BX_SEG_REG_CS, BX_CPU_THIS_PTR prev_rip));
268
#endif
269
    if (BX_CPU_THIS_PTR debug_trap & 0xf000) {
270
      exception(BX_DB_EXCEPTION, 0); // no error, not interrupt
271
    }
272
    else {
273
      BX_CPU_THIS_PTR debug_trap = 0;
274
    }
275
  }
276
 
277
  // Priority 5: External Interrupts
278
  //   VMX Preemption Timer Expired.
279
  //   NMI Interrupts
280
  //   Maskable Hardware Interrupts
281
  if (interrupts_inhibited(BX_INHIBIT_INTERRUPTS) || ! SVM_GIF) {
282
    // Processing external interrupts is inhibited on this
283
    // boundary because of certain instructions like STI.
284
  }
285
#if BX_SUPPORT_VMX >= 2
286
  else if (is_unmasked_event_pending(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED)) {
287
    VMexit(VMX_VMEXIT_VMX_PREEMPTION_TIMER_EXPIRED, 0);
288
  }
289
#endif
290
#if BX_SUPPORT_VMX
291
  else if (is_unmasked_event_pending(BX_EVENT_VMX_VIRTUAL_NMI)) {
292
    VMexit(VMX_VMEXIT_NMI_WINDOW, 0);
293
  }
294
#endif
295
  else if (is_unmasked_event_pending(BX_EVENT_NMI)) {
296
#if BX_SUPPORT_SVM
297
    if (BX_CPU_THIS_PTR in_svm_guest) {
298
      if (SVM_INTERCEPT(SVM_INTERCEPT0_NMI)) Svm_Vmexit(SVM_VMEXIT_NMI);
299
    }
300
#endif
301
    clear_event(BX_EVENT_NMI);
302
     mask_event(BX_EVENT_NMI);
303
    BX_CPU_THIS_PTR EXT = 1; /* external event */
304
#if BX_SUPPORT_VMX
305
    VMexit_Event(BX_NMI, 2, 0, 0);
306
#endif
307
    BX_INSTR_HWINTERRUPT(BX_CPU_ID, 2, BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
308
    interrupt(2, BX_NMI, 0, 0);
309
  }
310
#if BX_SUPPORT_VMX
311
  else if (is_pending(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING) && BX_CPU_THIS_PTR get_IF()) {
312
    // interrupt-window exiting
313
    VMexit(VMX_VMEXIT_INTERRUPT_WINDOW, 0);
314
  }
315
#endif
316
  else if (is_unmasked_event_pending(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR |
317
                                     BX_EVENT_PENDING_VMX_VIRTUAL_INTR))
318
  {
319
    InterruptAcknowledge();
320
  }
321
#if BX_SUPPORT_SVM
322
  else if (is_unmasked_event_pending(BX_EVENT_SVM_VIRQ_PENDING))
323
  {
324
    // virtual interrupt acknowledge
325
    VirtualInterruptAcknowledge();
326
  }
327
#endif
328
  else if (BX_HRQ && BX_DBG_ASYNC_DMA) {
329
    // NOTE: similar code in ::take_dma()
330
    // assert Hold Acknowledge (HLDA) and go into a bus hold state
331
    DEV_dma_raise_hlda();
332
  }
333
 
334
  if (BX_CPU_THIS_PTR get_TF())
335
  {
336
    // TF is set before execution of next instruction.  Schedule
337
    // a debug trap (#DB) after execution.  After completion of
338
    // next instruction, the code above will invoke the trap.
339
    BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_SINGLE_STEP_BIT;
340
  }
341
 
342
  // Priority 6: Faults from fetching next instruction
343
  //   Code breakpoint fault
344
  //   Code segment limit violation (priority 7 on 486/Pentium)
345
  //   Code page fault (priority 7 on 486/Pentium)
346
  // (handled in main decode loop)
347
 
348
  // Priority 7: Faults from decoding next instruction
349
  //   Instruction length > 15 bytes
350
  //   Illegal opcode
351
  //   Coprocessor not available
352
  // (handled in main decode loop etc)
353
 
354
  // Priority 8: Faults on executing an instruction
355
  //   Floating point execution
356
  //   Overflow
357
  //   Bound error
358
  //   Invalid TSS
359
  //   Segment not present
360
  //   Stack fault
361
  //   General protection
362
  //   Data page fault
363
  //   Alignment check
364
  // (handled by rest of the code)
365
 
366
  if (!((SVM_GIF && unmasked_events_pending()) || BX_CPU_THIS_PTR debug_trap ||
367
//      BX_CPU_THIS_PTR get_TF() || // implies debug_trap is set
368
        BX_HRQ))
369
  {
370
    BX_CPU_THIS_PTR async_event = 0;
371
  }
372
 
373
  return 0; // Continue executing cpu_loop.
374
}
375
 
376
// Certain instructions inhibit interrupts, some debug exceptions and single-step traps.
377
void BX_CPU_C::inhibit_interrupts(unsigned mask)
378
{
379
  // Loading of SS disables interrupts until the next instruction completes
380
  // but only under assumption that previous instruction didn't load SS also.
381
  if (! interrupts_inhibited(BX_INHIBIT_INTERRUPTS_BY_MOVSS)) {
382
    BX_DEBUG(("inhibit interrupts mask = %d", mask));
383
    BX_CPU_THIS_PTR inhibit_mask = mask;
384
    BX_CPU_THIS_PTR inhibit_icount = get_icount() + 1; // inhibit for next instruction
385
  }
386
}
387
 
388
bx_bool BX_CPU_C::interrupts_inhibited(unsigned mask)
389
{
390
  return (get_icount() <= BX_CPU_THIS_PTR inhibit_icount) && (BX_CPU_THIS_PTR inhibit_mask & mask) == mask;
391
}
392
 
393
void BX_CPU_C::deliver_SIPI(unsigned vector)
394
{
395
  if (BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_WAIT_FOR_SIPI) {
396
#if BX_SUPPORT_VMX
397
    if (BX_CPU_THIS_PTR in_vmx_guest)
398
      VMexit(VMX_VMEXIT_SIPI, vector);
399
#endif
400
    BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
401
    RIP = 0;
402
    load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], vector*0x100);
403
    unmask_event(BX_EVENT_INIT | BX_EVENT_SMI | BX_EVENT_NMI);
404
    BX_INFO(("CPU %d started up at %04X:%08X by APIC",
405
                   BX_CPU_THIS_PTR bx_cpuid, vector*0x100, EIP));
406
  } else {
407
    BX_INFO(("CPU %d started up by APIC, but was not halted at that time", BX_CPU_THIS_PTR bx_cpuid));
408
  }
409
}
410
 
411
void BX_CPU_C::deliver_INIT(void)
412
{
413
  if (! is_masked_event(BX_EVENT_INIT)) {
414
    signal_event(BX_EVENT_INIT);
415
  }
416
}
417
 
418
void BX_CPU_C::deliver_NMI(void)
419
{
420
  signal_event(BX_EVENT_NMI);
421
}
422
 
423
void BX_CPU_C::deliver_SMI(void)
424
{
425
  signal_event(BX_EVENT_SMI);
426
}
427
 
428
void BX_CPU_C::raise_INTR(void)
429
{
430
  signal_event(BX_EVENT_PENDING_INTR);
431
}
432
 
433
void BX_CPU_C::clear_INTR(void)
434
{
435
  clear_event(BX_EVENT_PENDING_INTR);
436
}
437
 
438
#if BX_DEBUGGER
439
 
440
void BX_CPU_C::dbg_take_dma(void)
441
{
442
  // NOTE: similar code in ::cpu_loop()
443
  if (BX_HRQ) {
444
    BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
445
    DEV_dma_raise_hlda();
446
  }
447
}
448
 
449
#endif  // #if BX_DEBUGGER

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.