OpenCores
URL https://opencores.org/ocsvn/ao486/ao486/trunk

Subversion Repositories ao486

[/] [ao486/] [trunk/] [bochs486/] [cpu/] [crregs.cc] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 alfik
/////////////////////////////////////////////////////////////////////////
2
// $Id: crregs.cc 11679 2013-04-17 19:46:11Z sshwarts $
3
/////////////////////////////////////////////////////////////////////////
4
//
5
//   Copyright (c) 2010-2012 Stanislav Shwartsman
6
//          Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7
//
8
//  This library is free software; you can redistribute it and/or
9
//  modify it under the terms of the GNU Lesser General Public
10
//  License as published by the Free Software Foundation; either
11
//  version 2 of the License, or (at your option) any later version.
12
//
13
//  This library is distributed in the hope that it will be useful,
14
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
//  Lesser General Public License for more details.
17
//
18
//  You should have received a copy of the GNU Lesser General Public
19
//  License along with this library; if not, write to the Free Software
20
//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21
//
22
/////////////////////////////////////////////////////////////////////////
23
 
24
#define NEED_CPU_REG_SHORTCUTS 1
25
#include "bochs.h"
26
#include "cpu.h"
27
#define LOG_THIS BX_CPU_THIS_PTR
28
 
29
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_DdRd(bxInstruction_c *i)
30
{
31
#if BX_SUPPORT_VMX
32
  if (BX_CPU_THIS_PTR in_vmx_guest)
33
    VMexit_DR_Access(0 /* write */, i->dst(), i->src());
34
#endif
35
 
36
#if BX_CPU_LEVEL >= 5
37
  if (BX_CPU_THIS_PTR cr4.get_DE()) {
38
    if ((i->dst() & 0xE) == 4) {
39
      BX_ERROR(("MOV_DdRd: access to DR4/DR5 causes #UD"));
40
      exception(BX_UD_EXCEPTION, 0);
41
    }
42
  }
43
#endif
44
 
45
  // Note: processor clears GD upon entering debug exception
46
  // handler, to allow access to the debug registers
47
  if (BX_CPU_THIS_PTR dr7.get_GD()) {
48
    BX_ERROR(("MOV_DdRd: DR7 GD bit is set"));
49
    BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_DR_ACCESS_BIT;
50
    exception(BX_DB_EXCEPTION, 0);
51
  }
52
 
53
  // CPL is always 0 in real mode
54
  if (/* !real_mode() && */ CPL!=0) {
55
    BX_ERROR(("MOV_DdRd: CPL!=0 not in real mode"));
56
    exception(BX_GP_EXCEPTION, 0);
57
  }
58
 
59
#if BX_SUPPORT_SVM
60
  if (BX_CPU_THIS_PTR in_svm_guest) {
61
    if (SVM_DR_WRITE_INTERCEPTED(i->dst()))
62
      Svm_Vmexit(SVM_VMEXIT_DR0_WRITE + i->dst(), BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->src() : 0);
63
  }
64
#endif
65
 
66
  invalidate_prefetch_q();
67
 
68
  Bit32u val_32 = BX_READ_32BIT_REG(i->src());
69
 
70
  switch (i->dst()) {
71
    case 0: // DR0
72
    case 1: // DR1
73
    case 2: // DR2
74
    case 3: // DR3
75
      BX_CPU_THIS_PTR dr[i->dst()] = val_32;
76
      TLB_invlpg(val_32);
77
      break;
78
 
79
    case 4: // DR4
80
      // DR4 aliased to DR6 by default. With Debug Extensions on,
81
      // access to DR4 causes #UD
82
    case 6: // DR6
83
#if BX_CPU_LEVEL <= 4
84
      // On 386/486 bit12 is settable
85
      BX_CPU_THIS_PTR dr6.val32 = (BX_CPU_THIS_PTR dr6.val32 & 0xffff0ff0) |
86
                            (val_32 & 0x0000f00f);
87
#else
88
      // On Pentium+, bit12 is always zero
89
      BX_CPU_THIS_PTR dr6.val32 = (BX_CPU_THIS_PTR dr6.val32 & 0xffff0ff0) |
90
                            (val_32 & 0x0000e00f);
91
#endif
92
      break;
93
 
94
    case 5: // DR5
95
      // DR5 aliased to DR7 by default. With Debug Extensions on,
96
      // access to DR5 causes #UD
97
    case 7: // DR7
98
      // Note: 486+ ignore GE and LE flags.  On the 386, exact
99
      // data breakpoint matching does not occur unless it is enabled
100
      // by setting the LE and/or GE flags.
101
 
102
#if BX_CPU_LEVEL <= 4
103
      // 386/486: you can play with all the bits except b10 is always 1
104
      BX_CPU_THIS_PTR dr7.set32(val_32 | 0x00000400);
105
#else
106
      // Pentium+: bits15,14,12 are hardwired to 0, rest are settable.
107
      // Even bits 11,10 are changeable though reserved.
108
      BX_CPU_THIS_PTR dr7.set32((val_32 & 0xffff2fff) | 0x00000400);
109
#endif
110
#if BX_X86_DEBUGGER
111
      // Some sanity checks...
112
      if ((BX_CPU_THIS_PTR dr7.get_R_W0() == BX_HWDebugInstruction && BX_CPU_THIS_PTR dr7.get_LEN0()) ||
113
          (BX_CPU_THIS_PTR dr7.get_R_W1() == BX_HWDebugInstruction && BX_CPU_THIS_PTR dr7.get_LEN1()) ||
114
          (BX_CPU_THIS_PTR dr7.get_R_W2() == BX_HWDebugInstruction && BX_CPU_THIS_PTR dr7.get_LEN2()) ||
115
          (BX_CPU_THIS_PTR dr7.get_R_W3() == BX_HWDebugInstruction && BX_CPU_THIS_PTR dr7.get_LEN3()))
116
      {
117
        // Instruction breakpoint with LENx not 00b (1-byte length)
118
        BX_ERROR(("MOV_DdRd: write of 0x%08x, R/W=00b LEN!=00b", val_32));
119
      }
120
#endif
121
 
122
      TLB_flush(); // the DR7 write could enable some breakpoints
123
      break;
124
 
125
    default:
126
      BX_ERROR(("MOV_DdRd: #UD - register index out of range"));
127
      exception(BX_UD_EXCEPTION, 0);
128
  }
129
 
130
  BX_NEXT_TRACE(i);
131
}
132
 
133
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RdDd(bxInstruction_c *i)
134
{
135
  Bit32u val_32;
136
 
137
#if BX_SUPPORT_VMX
138
  if (BX_CPU_THIS_PTR in_vmx_guest)
139
    VMexit_DR_Access(1 /* read */, i->src(), i->dst());
140
#endif
141
 
142
#if BX_CPU_LEVEL >= 5
143
  if (BX_CPU_THIS_PTR cr4.get_DE()) {
144
    if ((i->src() & 0xE) == 4) {
145
      BX_ERROR(("MOV_RdDd: access to DR4/DR5 causes #UD"));
146
      exception(BX_UD_EXCEPTION, 0);
147
    }
148
  }
149
#endif
150
 
151
  // Note: processor clears GD upon entering debug exception
152
  // handler, to allow access to the debug registers
153
  if (BX_CPU_THIS_PTR dr7.get_GD()) {
154
    BX_ERROR(("MOV_RdDd: DR7 GD bit is set"));
155
    BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_DR_ACCESS_BIT;
156
    exception(BX_DB_EXCEPTION, 0);
157
  }
158
 
159
  // CPL is always 0 in real mode
160
  if (/* !real_mode() && */ CPL!=0) {
161
    BX_ERROR(("MOV_RdDd: CPL!=0 not in real mode"));
162
    exception(BX_GP_EXCEPTION, 0);
163
  }
164
 
165
#if BX_SUPPORT_SVM
166
  if (BX_CPU_THIS_PTR in_svm_guest) {
167
    if (SVM_DR_READ_INTERCEPTED(i->src()))
168
      Svm_Vmexit(SVM_VMEXIT_DR0_READ + i->src(), BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
169
  }
170
#endif
171
 
172
  switch (i->src()) {
173
    case 0: // DR0
174
    case 1: // DR1
175
    case 2: // DR2
176
    case 3: // DR3
177
      val_32 = (Bit32u) BX_CPU_THIS_PTR dr[i->src()];
178
      break;
179
 
180
    case 4: // DR4
181
      // DR4 aliased to DR6 by default. With Debug Extensions ON,
182
      // access to DR4 causes #UD
183
    case 6: // DR6
184
      val_32 = BX_CPU_THIS_PTR dr6.get32();
185
      break;
186
 
187
    case 5: // DR5
188
      // DR5 aliased to DR7 by default. With Debug Extensions ON,
189
      // access to DR5 causes #UD
190
    case 7: // DR7
191
      val_32 = BX_CPU_THIS_PTR dr7.get32();
192
      break;
193
 
194
    default:
195
      BX_ERROR(("MOV_RdDd: #UD - register index out of range"));
196
      exception(BX_UD_EXCEPTION, 0);
197
  }
198
 
199
  BX_WRITE_32BIT_REGZ(i->dst(), val_32);
200
 
201
  BX_NEXT_INSTR(i);
202
}
203
 
204
#if BX_SUPPORT_X86_64
205
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_DqRq(bxInstruction_c *i)
206
{
207
#if BX_SUPPORT_VMX
208
  if (BX_CPU_THIS_PTR in_vmx_guest)
209
    VMexit_DR_Access(0 /* write */, i->dst(), i->src());
210
#endif
211
 
212
  if (BX_CPU_THIS_PTR cr4.get_DE()) {
213
    if ((i->dst() & 0xE) == 4) {
214
      BX_ERROR(("MOV_DqRq: access to DR4/DR5 causes #UD"));
215
      exception(BX_UD_EXCEPTION, 0);
216
    }
217
  }
218
 
219
  if (i->dst() >= 8) {
220
    BX_ERROR(("MOV_DqRq: #UD - register index out of range"));
221
    exception(BX_UD_EXCEPTION, 0);
222
  }
223
 
224
  // Note: processor clears GD upon entering debug exception
225
  // handler, to allow access to the debug registers
226
  if (BX_CPU_THIS_PTR dr7.get_GD()) {
227
    BX_ERROR(("MOV_DqRq: DR7 GD bit is set"));
228
    BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_DR_ACCESS_BIT;
229
    exception(BX_DB_EXCEPTION, 0);
230
  }
231
 
232
  /* #GP(0) if CPL is not 0 */
233
  if (CPL != 0) {
234
    BX_ERROR(("MOV_DqRq: #GP(0) if CPL is not 0"));
235
    exception(BX_GP_EXCEPTION, 0);
236
  }
237
 
238
#if BX_SUPPORT_SVM
239
  if (BX_CPU_THIS_PTR in_svm_guest) {
240
    if (SVM_DR_WRITE_INTERCEPTED(i->dst()))
241
      Svm_Vmexit(SVM_VMEXIT_DR0_WRITE + i->dst(), BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->src() : 0);
242
  }
243
#endif
244
 
245
  invalidate_prefetch_q();
246
 
247
  Bit64u val_64 = BX_READ_64BIT_REG(i->src());
248
 
249
  switch (i->dst()) {
250
    case 0: // DR0
251
    case 1: // DR1
252
    case 2: // DR2
253
    case 3: // DR3
254
      BX_CPU_THIS_PTR dr[i->dst()] = val_64;
255
      TLB_invlpg(val_64);
256
      break;
257
 
258
    case 4: // DR4
259
      // DR4 aliased to DR6 by default. With Debug Extensions ON,
260
      // access to DR4 causes #UD
261
    case 6: // DR6
262
      if (GET32H(val_64)) {
263
        BX_ERROR(("MOV_DqRq: attempt to set upper part of DR6"));
264
        exception(BX_GP_EXCEPTION, 0);
265
      }
266
      // On Pentium+, bit12 is always zero
267
      BX_CPU_THIS_PTR dr6.val32 = (BX_CPU_THIS_PTR dr6.val32 & 0xffff0ff0) |
268
                            (val_64 & 0x0000e00f);
269
      break;
270
 
271
    case 5: // DR5
272
      // DR5 aliased to DR7 by default. With Debug Extensions ON,
273
      // access to DR5 causes #UD
274
    case 7: // DR7
275
      // Note: 486+ ignore GE and LE flags.  On the 386, exact
276
      // data breakpoint matching does not occur unless it is enabled
277
      // by setting the LE and/or GE flags.
278
 
279
      if (GET32H(val_64)) {
280
        BX_ERROR(("MOV_DqRq: attempt to set upper part of DR7"));
281
        exception(BX_GP_EXCEPTION, 0);
282
      }
283
 
284
      // Pentium+: bits15,14,12 are hardwired to 0, rest are settable.
285
      // Even bits 11,10 are changeable though reserved.
286
      BX_CPU_THIS_PTR dr7.set32((val_64 & 0xffff2fff) | 0x00000400);
287
 
288
#if BX_X86_DEBUGGER
289
      if ((BX_CPU_THIS_PTR dr7.get_R_W0() == BX_HWDebugInstruction && BX_CPU_THIS_PTR dr7.get_LEN0()) ||
290
          (BX_CPU_THIS_PTR dr7.get_R_W1() == BX_HWDebugInstruction && BX_CPU_THIS_PTR dr7.get_LEN1()) ||
291
          (BX_CPU_THIS_PTR dr7.get_R_W2() == BX_HWDebugInstruction && BX_CPU_THIS_PTR dr7.get_LEN2()) ||
292
          (BX_CPU_THIS_PTR dr7.get_R_W3() == BX_HWDebugInstruction && BX_CPU_THIS_PTR dr7.get_LEN3()))
293
      {
294
        // Instruction breakpoint with LENx not 00b (1-byte length)
295
        BX_ERROR(("MOV_DqRq: write of 0x%08x, R/W=00b LEN!=00b", BX_CPU_THIS_PTR dr7.get32()));
296
      }
297
#endif
298
 
299
      TLB_flush(); // the DR7 write could enable some breakpoints
300
      break;
301
 
302
    default:
303
      BX_ERROR(("MOV_DqRq: #UD - register index out of range"));
304
      exception(BX_UD_EXCEPTION, 0);
305
  }
306
 
307
  BX_NEXT_TRACE(i);
308
}
309
 
310
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RqDq(bxInstruction_c *i)
311
{
312
  Bit64u val_64;
313
 
314
#if BX_SUPPORT_VMX
315
  if (BX_CPU_THIS_PTR in_vmx_guest)
316
    VMexit_DR_Access(1 /* read */, i->src(), i->dst());
317
#endif
318
 
319
  if (BX_CPU_THIS_PTR cr4.get_DE()) {
320
    if ((i->src() & 0xE) == 4) {
321
      BX_ERROR(("MOV_RqDq: access to DR4/DR5 causes #UD"));
322
      exception(BX_UD_EXCEPTION, 0);
323
    }
324
  }
325
 
326
  if (i->src() >= 8) {
327
    BX_ERROR(("MOV_RqDq: #UD - register index out of range"));
328
    exception(BX_UD_EXCEPTION, 0);
329
  }
330
 
331
  // Note: processor clears GD upon entering debug exception
332
  // handler, to allow access to the debug registers
333
  if (BX_CPU_THIS_PTR dr7.get_GD()) {
334
    BX_ERROR(("MOV_RqDq: DR7 GD bit is set"));
335
    BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_DR_ACCESS_BIT;
336
    exception(BX_DB_EXCEPTION, 0);
337
  }
338
 
339
  /* #GP(0) if CPL is not 0 */
340
  if (CPL != 0) {
341
    BX_ERROR(("MOV_RqDq: #GP(0) if CPL is not 0"));
342
    exception(BX_GP_EXCEPTION, 0);
343
  }
344
 
345
#if BX_SUPPORT_SVM
346
  if (BX_CPU_THIS_PTR in_svm_guest) {
347
    if (SVM_DR_READ_INTERCEPTED(i->src()))
348
      Svm_Vmexit(SVM_VMEXIT_DR0_READ + i->src(), BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
349
  }
350
#endif
351
 
352
  switch (i->src()) {
353
    case 0: // DR0
354
    case 1: // DR1
355
    case 2: // DR2
356
    case 3: // DR3
357
      val_64 = BX_CPU_THIS_PTR dr[i->src()];
358
      break;
359
 
360
    case 4: // DR4
361
      // DR4 aliased to DR6 by default. With Debug Extensions ON,
362
      // access to DR4 causes #UD
363
    case 6: // DR6
364
      val_64 = BX_CPU_THIS_PTR dr6.get32();
365
      break;
366
 
367
    case 5: // DR5
368
      // DR5 aliased to DR7 by default. With Debug Extensions ON,
369
      // access to DR5 causes #UD
370
    case 7: // DR7
371
      val_64 = BX_CPU_THIS_PTR dr7.get32();
372
      break;
373
 
374
    default:
375
      BX_ERROR(("MOV_RqDq: #UD - register index out of range"));
376
      exception(BX_UD_EXCEPTION, 0);
377
  }
378
 
379
  BX_WRITE_64BIT_REG(i->dst(), val_64);
380
 
381
  BX_NEXT_INSTR(i);
382
}
383
#endif // #if BX_SUPPORT_X86_64
384
 
385
#define BX_SVM_CR_WRITE_MASK (BX_CONST64(1) << 63)
386
 
387
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CR0Rd(bxInstruction_c *i)
388
{
389
  // CPL is always 0 in real mode
390
  if (/* !real_mode() && */ CPL!=0) {
391
    BX_ERROR(("MOV_CR0Rd: CPL!=0 not in real mode"));
392
    exception(BX_GP_EXCEPTION, 0);
393
  }
394
 
395
  invalidate_prefetch_q();
396
 
397
  Bit32u val_32 = BX_READ_32BIT_REG(i->src());
398
 
399
  if (i->dst() == 0) {
400
    // CR0
401
#if BX_SUPPORT_VMX
402
    if (BX_CPU_THIS_PTR in_vmx_guest)
403
      val_32 = (Bit32u) VMexit_CR0_Write(i, val_32);
404
#endif
405
    if (! SetCR0(i, val_32))
406
      exception(BX_GP_EXCEPTION, 0);
407
 
408
    BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_MOV_CR0, val_32);
409
  }
410
#if BX_CPU_LEVEL >= 6
411
  else {
412
    // AMD feature: LOCK CR0 allows CR8 access even in 32-bit mode
413
    WriteCR8(i, val_32);
414
  }
415
#endif
416
 
417
  BX_NEXT_TRACE(i);
418
}
419
 
420
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CR2Rd(bxInstruction_c *i)
421
{
422
  // CPL is always 0 in real mode
423
  if (/* !real_mode() && */ CPL!=0) {
424
    BX_ERROR(("MOV_CR2Rd: CPL!=0 not in real mode"));
425
    exception(BX_GP_EXCEPTION, 0);
426
  }
427
 
428
#if BX_SUPPORT_SVM
429
  if (BX_CPU_THIS_PTR in_svm_guest) {
430
    if(SVM_CR_WRITE_INTERCEPTED(2)) {
431
      if (BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST))
432
        Svm_Vmexit(SVM_VMEXIT_CR2_WRITE, BX_SVM_CR_WRITE_MASK | i->src());
433
      else
434
        Svm_Vmexit(SVM_VMEXIT_CR2_WRITE);
435
    }
436
  }
437
#endif
438
 
439
  BX_CPU_THIS_PTR cr2 = BX_READ_32BIT_REG(i->src());
440
 
441
  BX_NEXT_INSTR(i);
442
}
443
 
444
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CR3Rd(bxInstruction_c *i)
445
{
446
  // CPL is always 0 in real mode
447
  if (/* !real_mode() && */ CPL!=0) {
448
    BX_ERROR(("MOV_CR3Rd: CPL!=0 not in real mode"));
449
    exception(BX_GP_EXCEPTION, 0);
450
  }
451
 
452
  invalidate_prefetch_q();
453
 
454
  Bit32u val_32 = BX_READ_32BIT_REG(i->src());
455
 
456
#if BX_SUPPORT_VMX
457
  if (BX_CPU_THIS_PTR in_vmx_guest)
458
    VMexit_CR3_Write(i, val_32);
459
#endif
460
 
461
#if BX_SUPPORT_SVM
462
  if (BX_CPU_THIS_PTR in_svm_guest) {
463
    if(SVM_CR_WRITE_INTERCEPTED(3)) {
464
      if (BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST))
465
        Svm_Vmexit(SVM_VMEXIT_CR3_WRITE, BX_SVM_CR_WRITE_MASK | i->src());
466
      else
467
        Svm_Vmexit(SVM_VMEXIT_CR3_WRITE);
468
    }
469
  }
470
#endif
471
 
472
#if BX_CPU_LEVEL >= 6
473
  if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !long_mode()) {
474
    if (! CheckPDPTR(val_32)) {
475
      BX_ERROR(("MOV_CR3Rd(): PDPTR check failed !"));
476
      exception(BX_GP_EXCEPTION, 0);
477
    }
478
  }
479
#endif
480
 
481
  if (! SetCR3(val_32))
482
    exception(BX_GP_EXCEPTION, 0);
483
 
484
  BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_MOV_CR3, val_32);
485
 
486
  BX_NEXT_TRACE(i);
487
}
488
 
489
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CR4Rd(bxInstruction_c *i)
490
{
491
#if BX_CPU_LEVEL >= 5
492
  // CPL is always 0 in real mode
493
  if (/* !real_mode() && */ CPL!=0) {
494
    BX_ERROR(("MOV_CR4Rd: CPL!=0 not in real mode"));
495
    exception(BX_GP_EXCEPTION, 0);
496
  }
497
 
498
  invalidate_prefetch_q();
499
 
500
  Bit32u val_32 = BX_READ_32BIT_REG(i->src());
501
#if BX_SUPPORT_VMX
502
  if (BX_CPU_THIS_PTR in_vmx_guest)
503
    val_32 = (Bit32u) VMexit_CR4_Write(i, val_32);
504
#endif
505
  if (! SetCR4(i, val_32))
506
    exception(BX_GP_EXCEPTION, 0);
507
 
508
  BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_MOV_CR4, val_32);
509
#endif
510
 
511
  BX_NEXT_TRACE(i);
512
}
513
 
514
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RdCR0(bxInstruction_c *i)
515
{
516
  // CPL is always 0 in real mode
517
  if (/* !real_mode() && */ CPL!=0) {
518
    BX_ERROR(("MOV_RdCR0: CPL!=0 not in real mode"));
519
    exception(BX_GP_EXCEPTION, 0);
520
  }
521
 
522
  Bit32u val_32 = 0;
523
 
524
  if (i->src() == 0) {
525
    // CR0
526
#if BX_SUPPORT_SVM
527
    if (BX_CPU_THIS_PTR in_svm_guest) {
528
      if(SVM_CR_READ_INTERCEPTED(0))
529
        Svm_Vmexit(SVM_VMEXIT_CR0_READ, BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
530
    }
531
#endif
532
 
533
    val_32 = (Bit32u) read_CR0(); /* correctly handle VMX */
534
  }
535
#if BX_CPU_LEVEL >= 6
536
  else {
537
    // AMD feature: LOCK CR0 allows CR8 access even in 32-bit mode
538
    val_32 = ReadCR8(i);
539
  }
540
#endif
541
 
542
  BX_WRITE_32BIT_REGZ(i->dst(), val_32);
543
 
544
  BX_NEXT_INSTR(i);
545
}
546
 
547
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RdCR2(bxInstruction_c *i)
548
{
549
  // CPL is always 0 in real mode
550
  if (/* !real_mode() && */ CPL!=0) {
551
    BX_ERROR(("MOV_RdCR2: CPL!=0 not in real mode"));
552
    exception(BX_GP_EXCEPTION, 0);
553
  }
554
 
555
#if BX_SUPPORT_SVM
556
  if (BX_CPU_THIS_PTR in_svm_guest) {
557
    if(SVM_CR_READ_INTERCEPTED(2))
558
      Svm_Vmexit(SVM_VMEXIT_CR2_READ, BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
559
  }
560
#endif
561
 
562
  BX_WRITE_32BIT_REGZ(i->dst(), (Bit32u) BX_CPU_THIS_PTR cr2);
563
 
564
  BX_NEXT_INSTR(i);
565
}
566
 
567
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RdCR3(bxInstruction_c *i)
568
{
569
  // CPL is always 0 in real mode
570
  if (/* !real_mode() && */ CPL!=0) {
571
    BX_ERROR(("MOV_RdCR3: CPL!=0 not in real mode"));
572
    exception(BX_GP_EXCEPTION, 0);
573
  }
574
 
575
#if BX_SUPPORT_SVM
576
  if (BX_CPU_THIS_PTR in_svm_guest) {
577
    if(SVM_CR_READ_INTERCEPTED(3))
578
      Svm_Vmexit(SVM_VMEXIT_CR3_READ, BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
579
  }
580
#endif
581
 
582
#if BX_SUPPORT_VMX
583
  if (BX_CPU_THIS_PTR in_vmx_guest)
584
    VMexit_CR3_Read(i);
585
#endif
586
 
587
  Bit32u val_32 = (Bit32u) BX_CPU_THIS_PTR cr3;
588
 
589
  BX_WRITE_32BIT_REGZ(i->dst(), val_32);
590
 
591
  BX_NEXT_INSTR(i);
592
}
593
 
594
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RdCR4(bxInstruction_c *i)
595
{
596
#if BX_CPU_LEVEL >= 5
597
  // CPL is always 0 in real mode
598
  if (/* !real_mode() && */ CPL!=0) {
599
    BX_ERROR(("MOV_RdCR4: CPL!=0 not in real mode"));
600
    exception(BX_GP_EXCEPTION, 0);
601
  }
602
 
603
#if BX_SUPPORT_SVM
604
  if (BX_CPU_THIS_PTR in_svm_guest) {
605
    if(SVM_CR_READ_INTERCEPTED(4))
606
      Svm_Vmexit(SVM_VMEXIT_CR4_READ, BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
607
  }
608
#endif
609
 
610
  Bit32u val_32 = (Bit32u) read_CR4(); /* correctly handle VMX */
611
 
612
  BX_WRITE_32BIT_REGZ(i->dst(), val_32);
613
#endif
614
 
615
  BX_NEXT_INSTR(i);
616
}
617
 
618
#if BX_SUPPORT_X86_64
619
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CR0Rq(bxInstruction_c *i)
620
{
621
  if (CPL!=0) {
622
    BX_ERROR(("MOV_CR0Rq: #GP(0) if CPL is not 0"));
623
    exception(BX_GP_EXCEPTION, 0);
624
  }
625
 
626
  invalidate_prefetch_q();
627
 
628
  Bit64u val_64 = BX_READ_64BIT_REG(i->src());
629
 
630
  if (i->dst() == 0) {
631
    // CR0
632
#if BX_SUPPORT_VMX
633
    if (BX_CPU_THIS_PTR in_vmx_guest)
634
      val_64 = VMexit_CR0_Write(i, val_64);
635
#endif
636
    if (! SetCR0(i, val_64))
637
      exception(BX_GP_EXCEPTION, 0);
638
 
639
    BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_MOV_CR0, (Bit32u) val_64);
640
  }
641
  else {
642
    WriteCR8(i, val_64);
643
  }
644
 
645
  BX_NEXT_TRACE(i);
646
}
647
 
648
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CR2Rq(bxInstruction_c *i)
649
{
650
  if (i->dst() != 2) {
651
    BX_ERROR(("MOV_CR2Rq: #UD - register index out of range"));
652
    exception(BX_UD_EXCEPTION, 0);
653
  }
654
 
655
  if (CPL!=0) {
656
    BX_ERROR(("MOV_CR2Rq: #GP(0) if CPL is not 0"));
657
    exception(BX_GP_EXCEPTION, 0);
658
  }
659
 
660
#if BX_SUPPORT_SVM
661
  if (BX_CPU_THIS_PTR in_svm_guest) {
662
    if(SVM_CR_WRITE_INTERCEPTED(2)) {
663
      if (BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST))
664
        Svm_Vmexit(SVM_VMEXIT_CR2_WRITE, BX_SVM_CR_WRITE_MASK | i->src());
665
      else
666
        Svm_Vmexit(SVM_VMEXIT_CR2_WRITE);
667
    }
668
  }
669
#endif
670
 
671
  BX_CPU_THIS_PTR cr2 = BX_READ_64BIT_REG(i->src());
672
 
673
  BX_NEXT_INSTR(i);
674
}
675
 
676
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CR3Rq(bxInstruction_c *i)
677
{
678
  if (i->dst() != 3) {
679
    BX_ERROR(("MOV_CR3Rq: #UD - register index out of range"));
680
    exception(BX_UD_EXCEPTION, 0);
681
  }
682
 
683
  if (CPL!=0) {
684
    BX_ERROR(("MOV_CR3Rq: #GP(0) if CPL is not 0"));
685
    exception(BX_GP_EXCEPTION, 0);
686
  }
687
 
688
  invalidate_prefetch_q();
689
 
690
  Bit64u val_64 = BX_READ_64BIT_REG(i->src());
691
 
692
#if BX_SUPPORT_VMX
693
  if (BX_CPU_THIS_PTR in_vmx_guest)
694
    VMexit_CR3_Write(i, val_64);
695
#endif
696
 
697
#if BX_SUPPORT_SVM
698
  if (BX_CPU_THIS_PTR in_svm_guest) {
699
    if(SVM_CR_WRITE_INTERCEPTED(3)) {
700
      if (BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST))
701
        Svm_Vmexit(SVM_VMEXIT_CR3_WRITE, BX_SVM_CR_WRITE_MASK | i->src());
702
      else
703
        Svm_Vmexit(SVM_VMEXIT_CR3_WRITE);
704
    }
705
  }
706
#endif
707
 
708
  // no PDPTR checks in long mode
709
  if (! SetCR3(val_64))
710
    exception(BX_GP_EXCEPTION, 0);
711
 
712
  BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_MOV_CR3, val_64);
713
 
714
  BX_NEXT_TRACE(i);
715
}
716
 
717
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_CR4Rq(bxInstruction_c *i)
718
{
719
  if (i->dst() != 4) {
720
    BX_ERROR(("MOV_CR4Rq: #UD - register index out of range"));
721
    exception(BX_UD_EXCEPTION, 0);
722
  }
723
 
724
  if (CPL!=0) {
725
    BX_ERROR(("MOV_CR4Rq: #GP(0) if CPL is not 0"));
726
    exception(BX_GP_EXCEPTION, 0);
727
  }
728
 
729
  invalidate_prefetch_q();
730
 
731
  Bit64u val_64 = BX_READ_64BIT_REG(i->src());
732
#if BX_SUPPORT_VMX
733
  if (BX_CPU_THIS_PTR in_vmx_guest)
734
    val_64 = VMexit_CR4_Write(i, val_64);
735
#endif
736
  if (! SetCR4(i, val_64))
737
    exception(BX_GP_EXCEPTION, 0);
738
 
739
  BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_MOV_CR4, (Bit32u) val_64);
740
 
741
  BX_NEXT_TRACE(i);
742
}
743
 
744
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RqCR0(bxInstruction_c *i)
745
{
746
  if (CPL!=0) {
747
    BX_ERROR(("MOV_RqCR0: #GP(0) if CPL is not 0"));
748
    exception(BX_GP_EXCEPTION, 0);
749
  }
750
 
751
  Bit64u val_64;
752
 
753
  if (i->src() == 0) {
754
    // CR0
755
#if BX_SUPPORT_SVM
756
    if (BX_CPU_THIS_PTR in_svm_guest) {
757
      if(SVM_CR_READ_INTERCEPTED(0))
758
        Svm_Vmexit(SVM_VMEXIT_CR0_READ, BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
759
    }
760
#endif
761
 
762
    val_64 = read_CR0(); /* correctly handle VMX */
763
  }
764
  else {
765
    // CR8
766
    val_64 = ReadCR8(i);
767
  }
768
 
769
  BX_WRITE_64BIT_REG(i->dst(), val_64);
770
 
771
  BX_NEXT_INSTR(i);
772
}
773
 
774
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RqCR2(bxInstruction_c *i)
775
{
776
  if (i->src() != 2) {
777
    BX_ERROR(("MOV_RqCR2: #UD - register index out of range"));
778
    exception(BX_UD_EXCEPTION, 0);
779
  }
780
 
781
  if (CPL!=0) {
782
    BX_ERROR(("MOV_RqCR2: #GP(0) if CPL is not 0"));
783
    exception(BX_GP_EXCEPTION, 0);
784
  }
785
 
786
#if BX_SUPPORT_SVM
787
  if (BX_CPU_THIS_PTR in_svm_guest) {
788
    if(SVM_CR_READ_INTERCEPTED(2))
789
      Svm_Vmexit(SVM_VMEXIT_CR2_READ, BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
790
  }
791
#endif
792
 
793
  BX_WRITE_64BIT_REG(i->dst(), BX_CPU_THIS_PTR cr2);
794
 
795
  BX_NEXT_INSTR(i);
796
}
797
 
798
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RqCR3(bxInstruction_c *i)
799
{
800
  if (i->src() != 3) {
801
    BX_ERROR(("MOV_RqCR3: #UD - register index out of range"));
802
    exception(BX_UD_EXCEPTION, 0);
803
  }
804
 
805
  if (CPL!=0) {
806
    BX_ERROR(("MOV_RqCR3: #GP(0) if CPL is not 0"));
807
    exception(BX_GP_EXCEPTION, 0);
808
  }
809
 
810
#if BX_SUPPORT_SVM
811
  if (BX_CPU_THIS_PTR in_svm_guest) {
812
    if(SVM_CR_READ_INTERCEPTED(3))
813
      Svm_Vmexit(SVM_VMEXIT_CR3_READ, BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
814
  }
815
#endif
816
 
817
#if BX_SUPPORT_VMX
818
  if (BX_CPU_THIS_PTR in_vmx_guest)
819
    VMexit_CR3_Read(i);
820
#endif
821
 
822
  BX_WRITE_64BIT_REG(i->dst(), BX_CPU_THIS_PTR cr3);
823
 
824
  BX_NEXT_INSTR(i);
825
}
826
 
827
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_RqCR4(bxInstruction_c *i)
828
{
829
  if (i->src() != 4) {
830
    BX_ERROR(("MOV_RqCR4: #UD - register index out of range"));
831
    exception(BX_UD_EXCEPTION, 0);
832
  }
833
 
834
  if (CPL!=0) {
835
    BX_ERROR(("MOV_RqCR4: #GP(0) if CPL is not 0"));
836
    exception(BX_GP_EXCEPTION, 0);
837
  }
838
 
839
#if BX_SUPPORT_SVM
840
  if (BX_CPU_THIS_PTR in_svm_guest) {
841
    if(SVM_CR_READ_INTERCEPTED(4))
842
      Svm_Vmexit(SVM_VMEXIT_CR4_READ, BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST) ? i->dst() : 0);
843
  }
844
#endif
845
 
846
  Bit64u val_64 = read_CR4(); /* correctly handle VMX */
847
 
848
  BX_WRITE_64BIT_REG(i->dst(), val_64);
849
 
850
  BX_NEXT_INSTR(i);
851
}
852
#endif // #if BX_SUPPORT_X86_64
853
 
854
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LMSW_Ew(bxInstruction_c *i)
855
{
856
  Bit16u msw;
857
 
858
  // CPL is always 0 in real mode
859
  if (/* !real_mode() && */ CPL!=0) {
860
    BX_ERROR(("LMSW: CPL!=0 not in real mode"));
861
    exception(BX_GP_EXCEPTION, 0);
862
  }
863
 
864
#if BX_SUPPORT_SVM
865
  if (BX_CPU_THIS_PTR in_svm_guest) {
866
    if(SVM_CR_WRITE_INTERCEPTED(0)) Svm_Vmexit(SVM_VMEXIT_CR0_WRITE);
867
  }
868
#endif
869
 
870
  if (i->modC0()) {
871
    msw = BX_READ_16BIT_REG(i->src());
872
  }
873
  else {
874
    /* use RMAddr(i) to save address for VMexit */
875
    RMAddr(i) = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
876
    /* pointer, segment address pair */
877
    msw = read_virtual_word(i->seg(), RMAddr(i));
878
  }
879
 
880
  // LMSW does not affect PG,CD,NW,AM,WP,NE,ET bits, and cannot clear PE
881
 
882
#if BX_SUPPORT_VMX
883
  if (BX_CPU_THIS_PTR in_vmx_guest)
884
    msw = VMexit_LMSW(i, msw);
885
#endif
886
 
887
  // LMSW cannot clear PE
888
  if (BX_CPU_THIS_PTR cr0.get_PE())
889
    msw |= BX_CR0_PE_MASK; // adjust PE bit to current value of 1
890
 
891
  msw &= 0xf; // LMSW only affects last 4 flags
892
 
893
  Bit32u cr0 = (BX_CPU_THIS_PTR cr0.get32() & 0xfffffff0) | msw;
894
  if (! SetCR0(i, cr0))
895
    exception(BX_GP_EXCEPTION, 0);
896
 
897
  BX_NEXT_TRACE(i);
898
}
899
 
900
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SMSW_EwR(bxInstruction_c *i)
901
{
902
  Bit32u msw = (Bit32u) read_CR0();  // handle CR0 shadow in VMX
903
 
904
  if (i->os32L()) {
905
    BX_WRITE_32BIT_REGZ(i->dst(), msw);
906
  }
907
  else {
908
    BX_WRITE_16BIT_REG(i->dst(), msw & 0xffff);
909
  }
910
 
911
  BX_NEXT_INSTR(i);
912
}
913
 
914
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SMSW_EwM(bxInstruction_c *i)
915
{
916
  Bit16u msw = read_CR0() & 0xffff;   // handle CR0 shadow in VMX
917
  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
918
  write_virtual_word(i->seg(), eaddr, msw);
919
 
920
  BX_NEXT_INSTR(i);
921
}
922
 
923
bx_address BX_CPU_C::read_CR0(void)
924
{
925
#if BX_SUPPORT_SVM
926
  if (BX_CPU_THIS_PTR in_svm_guest) {
927
    // used for SMSW instruction only
928
    if(SVM_CR_READ_INTERCEPTED(0)) Svm_Vmexit(SVM_VMEXIT_CR0_READ);
929
  }
930
#endif
931
 
932
  bx_address cr0_val = BX_CPU_THIS_PTR cr0.get32();
933
 
934
#if BX_SUPPORT_VMX
935
  if (BX_CPU_THIS_PTR in_vmx_guest) {
936
    VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
937
    cr0_val = (cr0_val & ~vm->vm_cr0_mask) | (vm->vm_cr0_read_shadow & vm->vm_cr0_mask);
938
  }
939
#endif
940
 
941
  return cr0_val;
942
}
943
 
944
#if BX_CPU_LEVEL >= 5
945
bx_address BX_CPU_C::read_CR4(void)
946
{
947
  bx_address cr4_val = BX_CPU_THIS_PTR cr4.get32();
948
 
949
#if BX_SUPPORT_VMX
950
  if (BX_CPU_THIS_PTR in_vmx_guest) {
951
    VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
952
    cr4_val = (cr4_val & ~vm->vm_cr4_mask) | (vm->vm_cr4_read_shadow & vm->vm_cr4_mask);
953
  }
954
#endif
955
 
956
  return cr4_val;
957
}
958
#endif
959
 
960
bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::check_CR0(bx_address cr0_val)
961
{
962
  bx_cr0_t temp_cr0;
963
 
964
#if BX_SUPPORT_X86_64
965
  if (GET32H(cr0_val)) {
966
    BX_ERROR(("check_CR0(): trying to set CR0 > 32 bits"));
967
    return 0;
968
  }
969
#endif
970
 
971
  temp_cr0.set32((Bit32u) cr0_val);
972
 
973
#if BX_SUPPORT_SVM
974
  if (! BX_CPU_THIS_PTR in_svm_guest) // it should be fine to enter paged real mode in SVM guest
975
#endif
976
  {
977
    if (temp_cr0.get_PG() && !temp_cr0.get_PE()) {
978
      BX_ERROR(("check_CR0(0x%08x): attempt to set CR0.PG with CR0.PE cleared !", temp_cr0.get32()));
979
      return 0;
980
    }
981
  }
982
 
983
#if BX_CPU_LEVEL >= 4
984
  if (temp_cr0.get_NW() && !temp_cr0.get_CD()) {
985
    BX_ERROR(("check_CR0(0x%08x): attempt to set CR0.NW with CR0.CD cleared !", temp_cr0.get32()));
986
    return 0;
987
  }
988
#endif
989
 
990
#if BX_SUPPORT_VMX
991
  if (BX_CPU_THIS_PTR in_vmx) {
992
    if (!temp_cr0.get_NE()) {
993
      BX_ERROR(("check_CR0(0x%08x): attempt to clear CR0.NE in vmx mode !", temp_cr0.get32()));
994
      return 0;
995
    }
996
    if (!BX_CPU_THIS_PTR in_vmx_guest && !SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST)) {
997
      if (!temp_cr0.get_PE() || !temp_cr0.get_PG()) {
998
        BX_ERROR(("check_CR0(0x%08x): attempt to clear CR0.PE/CR0.PG in vmx mode !", temp_cr0.get32()));
999
        return 0;
1000
      }
1001
    }
1002
  }
1003
#endif
1004
 
1005
  return 1;
1006
}
1007
 
1008
bx_bool BX_CPU_C::SetCR0(bxInstruction_c *i, bx_address val)
1009
{
1010
  if (! check_CR0(val)) return 0;
1011
 
1012
  Bit32u val_32 = GET32L(val);
1013
 
1014
#if BX_CPU_LEVEL >= 6
1015
  bx_bool pg = (val_32 >> 31) & 0x1;
1016
#endif
1017
 
1018
#if BX_SUPPORT_X86_64
1019
  if (! BX_CPU_THIS_PTR cr0.get_PG() && pg) {
1020
    if (BX_CPU_THIS_PTR efer.get_LME()) {
1021
      if (!BX_CPU_THIS_PTR cr4.get_PAE()) {
1022
        BX_ERROR(("SetCR0: attempt to enter x86-64 long mode without enabling CR4.PAE !"));
1023
        return 0;
1024
      }
1025
      if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l) {
1026
        BX_ERROR(("SetCR0: attempt to enter x86-64 long mode with CS.L !"));
1027
        return 0;
1028
      }
1029
      if (BX_CPU_THIS_PTR tr.cache.type <= 3) {
1030
        BX_ERROR(("SetCR0: attempt to enter x86-64 long mode with TSS286 in TR !"));
1031
        return 0;
1032
      }
1033
      BX_CPU_THIS_PTR efer.set_LMA(1);
1034
    }
1035
  }
1036
  else if (BX_CPU_THIS_PTR cr0.get_PG() && ! pg) {
1037
    if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
1038
      BX_ERROR(("SetCR0(): attempt to leave 64 bit mode directly to legacy mode !"));
1039
      return 0;
1040
    }
1041
    if (BX_CPU_THIS_PTR efer.get_LMA()) {
1042
      if (BX_CPU_THIS_PTR cr4.get_PCIDE()) {
1043
        BX_ERROR(("SetCR0(): attempt to leave 64 bit mode with CR4.PCIDE set !"));
1044
        return 0;
1045
      }
1046
      if (BX_CPU_THIS_PTR gen_reg[BX_64BIT_REG_RIP].dword.hrx != 0) {
1047
        BX_PANIC(("SetCR0(): attempt to leave x86-64 LONG mode with RIP upper != 0"));
1048
      }
1049
      BX_CPU_THIS_PTR efer.set_LMA(0);
1050
    }
1051
  }
1052
#endif  // #if BX_SUPPORT_X86_64
1053
 
1054
  // handle reserved bits behaviour
1055
#if BX_CPU_LEVEL == 3
1056
  val_32 = val_32 | 0x7ffffff0;
1057
#elif BX_CPU_LEVEL == 4
1058
  val_32 = (val_32 | 0x00000010) & 0xe005003f;
1059
#elif BX_CPU_LEVEL == 5
1060
  val_32 = val_32 | 0x00000010;
1061
#elif BX_CPU_LEVEL == 6
1062
  val_32 = (val_32 | 0x00000010) & 0xe005003f;
1063
#else
1064
#error "SetCR0: implement reserved bits behaviour for this CPU_LEVEL"
1065
#endif
1066
 
1067
  Bit32u oldCR0 = BX_CPU_THIS_PTR cr0.get32();
1068
 
1069
#if BX_SUPPORT_SVM
1070
  if (BX_CPU_THIS_PTR in_svm_guest) {
1071
    if(SVM_CR_WRITE_INTERCEPTED(0)) {
1072
      // LMSW instruction should VMEXIT before
1073
      if (BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST))
1074
        Svm_Vmexit(SVM_VMEXIT_CR0_WRITE, BX_SVM_CR_WRITE_MASK | i->src());
1075
      else
1076
        Svm_Vmexit(SVM_VMEXIT_CR0_WRITE);
1077
    }
1078
 
1079
    if (SVM_INTERCEPT(SVM_INTERCEPT0_CR0_WRITE_NO_TS_MP)) {
1080
      if ((oldCR0 & 0xfffffff5) != (val_32 & 0xfffffff5)) {
1081
        // any other bit except TS or MP had changed
1082
        Svm_Vmexit(SVM_VMEXIT_CR0_SEL_WRITE);
1083
      }
1084
    }
1085
  }
1086
#endif
1087
 
1088
#if BX_CPU_LEVEL >= 6
1089
  if (pg && BX_CPU_THIS_PTR cr4.get_PAE() && !long_mode()) {
1090
    if (! CheckPDPTR(BX_CPU_THIS_PTR cr3)) {
1091
      BX_ERROR(("SetCR0(): PDPTR check failed !"));
1092
      return 0;
1093
    }
1094
  }
1095
#endif
1096
 
1097
  BX_CPU_THIS_PTR cr0.set32(val_32);
1098
 
1099
#if BX_CPU_LEVEL >= 4
1100
  handleAlignmentCheck(/* CR0.AC reloaded */);
1101
#endif
1102
 
1103
  handleCpuModeChange();
1104
 
1105
#if BX_CPU_LEVEL >= 6
1106
  handleSseModeChange();
1107
#if BX_SUPPORT_AVX
1108
  handleAvxModeChange();
1109
#endif
1110
#endif
1111
 
1112
  // Modification of PG,PE flushes TLB cache according to docs.
1113
  // Additionally, the TLB strategy is based on the current value of
1114
  // WP, so if that changes we must also flush the TLB.
1115
  if ((oldCR0 & 0x80010001) != (val_32 & 0x80010001))
1116
    TLB_flush(); // Flush Global entries also
1117
 
1118
  return 1;
1119
}
1120
 
1121
#if BX_CPU_LEVEL >= 5
1122
Bit32u BX_CPU_C::get_cr4_allow_mask(void)
1123
{
1124
  Bit32u allowMask = 0;
1125
 
1126
  // CR4 bits definitions:
1127
  //   [31-22] Reserved, Must be Zero
1128
  //   [21]    SMAP: Supervisor Mode Access Prevention R/W
1129
  //   [20]    SMEP: Supervisor Mode Execution Protection R/W
1130
  //   [19]    Reserved, Must be Zero
1131
  //   [18]    OSXSAVE: Operating System XSAVE Support R/W
1132
  //   [17]    PCIDE: PCID Support R/W
1133
  //   [16]    FSGSBASE: FS/GS BASE access R/W
1134
  //   [15]    Reserved, Must be Zero
1135
  //   [14]    SMXE: SMX Extensions R/W
1136
  //   [13]    VMXE: VMX Extensions R/W
1137
  //   [12-11] Reserved, Must be Zero
1138
  //   [10]    OSXMMEXCPT: Operating System Unmasked Exception Support R/W
1139
  //   [9]     OSFXSR: Operating System FXSAVE/FXRSTOR Support R/W
1140
  //   [8]     PCE: Performance-Monitoring Counter Enable R/W
1141
  //   [7]     PGE: Page-Global Enable R/W
1142
  //   [6]     MCE: Machine Check Enable R/W
1143
  //   [5]     PAE: Physical-Address Extension R/W
1144
  //   [4]     PSE: Page Size Extensions R/W
1145
  //   [3]     DE: Debugging Extensions R/W
1146
  //   [2]     TSD: Time Stamp Disable R/W
1147
  //   [1]     PVI: Protected-Mode Virtual Interrupts R/W
1148
  //   [0]     VME: Virtual-8086 Mode Extensions R/W
1149
 
1150
  /* VME */
1151
  if (bx_cpuid_support_vme())
1152
    allowMask |= BX_CR4_VME_MASK | BX_CR4_PVI_MASK;
1153
 
1154
  if (bx_cpuid_support_tsc())
1155
    allowMask |= BX_CR4_TSD_MASK;
1156
 
1157
  if (bx_cpuid_support_debug_extensions())
1158
    allowMask |= BX_CR4_DE_MASK;
1159
 
1160
  if (bx_cpuid_support_pse())
1161
    allowMask |= BX_CR4_PSE_MASK;
1162
 
1163
#if BX_CPU_LEVEL >= 6
1164
  if (bx_cpuid_support_pae())
1165
    allowMask |= BX_CR4_PAE_MASK;
1166
#endif
1167
 
1168
  // NOTE: exception 18 (#MC) never appears in Bochs
1169
  allowMask |= BX_CR4_MCE_MASK;
1170
 
1171
#if BX_CPU_LEVEL >= 6
1172
  if (bx_cpuid_support_pge())
1173
    allowMask |= BX_CR4_PGE_MASK;
1174
 
1175
  allowMask |= BX_CR4_PCE_MASK;
1176
 
1177
  /* OSFXSR */
1178
  if (bx_cpuid_support_fxsave_fxrstor())
1179
    allowMask |= BX_CR4_OSFXSR_MASK;
1180
 
1181
  /* OSXMMEXCPT */
1182
  if (bx_cpuid_support_sse())
1183
    allowMask |= BX_CR4_OSXMMEXCPT_MASK;
1184
 
1185
#if BX_SUPPORT_VMX
1186
  if (bx_cpuid_support_vmx())
1187
    allowMask |= BX_CR4_VMXE_MASK;
1188
#endif
1189
 
1190
  if (bx_cpuid_support_smx())
1191
    allowMask |= BX_CR4_SMXE_MASK;
1192
 
1193
#if BX_SUPPORT_X86_64
1194
  if (bx_cpuid_support_pcid())
1195
    allowMask |= BX_CR4_PCIDE_MASK;
1196
 
1197
  if (bx_cpuid_support_fsgsbase())
1198
    allowMask |= BX_CR4_FSGSBASE_MASK;
1199
#endif
1200
 
1201
  /* OSXSAVE */
1202
  if (bx_cpuid_support_xsave())
1203
    allowMask |= BX_CR4_OSXSAVE_MASK;
1204
 
1205
  if (bx_cpuid_support_smep())
1206
    allowMask |= BX_CR4_SMEP_MASK;
1207
 
1208
  if (bx_cpuid_support_smap())
1209
    allowMask |= BX_CR4_SMAP_MASK;
1210
#endif
1211
 
1212
  return allowMask;
1213
}
1214
 
1215
bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::check_CR4(bx_address cr4_val)
1216
{
1217
  // check if trying to set undefined bits
1218
  if (cr4_val & ~((bx_address) BX_CPU_THIS_PTR cr4_suppmask)) {
1219
    BX_ERROR(("check_CR4(): write of 0x%08x not supported (allowMask=0x%x)", (Bit32u) cr4_val, BX_CPU_THIS_PTR cr4_suppmask));
1220
    return 0;
1221
  }
1222
 
1223
  bx_cr4_t temp_cr4;
1224
  temp_cr4.set32((Bit32u) cr4_val);
1225
 
1226
#if BX_SUPPORT_X86_64
1227
  if (long_mode()) {
1228
    if(! temp_cr4.get_PAE()) {
1229
      BX_ERROR(("check_CR4(): attempt to clear CR4.PAE when EFER.LMA=1"));
1230
      return 0;
1231
    }
1232
  }
1233
  else {
1234
    if (temp_cr4.get_PCIDE()) {
1235
      BX_ERROR(("check_CR4(): attempt to set CR4.PCIDE when EFER.LMA=0"));
1236
      return 0;
1237
    }
1238
  }
1239
#endif
1240
 
1241
#if BX_SUPPORT_VMX
1242
  if(! temp_cr4.get_VMXE()) {
1243
    if (BX_CPU_THIS_PTR in_vmx) {
1244
      BX_ERROR(("check_CR4(): attempt to clear CR4.VMXE in vmx mode"));
1245
      return 0;
1246
    }
1247
  }
1248
  else {
1249
    if (BX_CPU_THIS_PTR in_smm) {
1250
      BX_ERROR(("check_CR4(): attempt to set CR4.VMXE in smm mode"));
1251
      return 0;
1252
    }
1253
  }
1254
#endif
1255
 
1256
  return 1;
1257
}
1258
 
1259
bx_bool BX_CPU_C::SetCR4(bxInstruction_c *i, bx_address val)
1260
{
1261
  if (! check_CR4(val)) return 0;
1262
 
1263
#if BX_CPU_LEVEL >= 6
1264
  // Modification of PGE,PAE,PSE,PCIDE,SMEP flushes TLB cache according to docs.
1265
  if ((val & BX_CR4_FLUSH_TLB_MASK) != (BX_CPU_THIS_PTR cr4.val32 & BX_CR4_FLUSH_TLB_MASK)) {
1266
    // reload PDPTR if needed
1267
    if (BX_CPU_THIS_PTR cr0.get_PG() && (val & BX_CR4_PAE_MASK) != 0 && !long_mode()) {
1268
      if (! CheckPDPTR(BX_CPU_THIS_PTR cr3)) {
1269
        BX_ERROR(("SetCR4(): PDPTR check failed !"));
1270
        return 0;
1271
      }
1272
    }
1273
#if BX_SUPPORT_X86_64
1274
    else {
1275
      // if trying to enable CR4.PCIDE
1276
      if (! BX_CPU_THIS_PTR cr4.get_PCIDE() && (val & BX_CR4_PCIDE_MASK)) {
1277
        if (BX_CPU_THIS_PTR cr3 & 0xfff) {
1278
          BX_ERROR(("SetCR4(): Attempt to enable CR4.PCIDE with non-zero PCID !"));
1279
          return 0;
1280
        }
1281
      }
1282
    }
1283
#endif
1284
    TLB_flush(); // Flush Global entries also.
1285
  }
1286
#endif
1287
 
1288
#if BX_SUPPORT_SVM
1289
  if (BX_CPU_THIS_PTR in_svm_guest) {
1290
    if(SVM_CR_WRITE_INTERCEPTED(4)) {
1291
      if (BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST))
1292
        Svm_Vmexit(SVM_VMEXIT_CR4_WRITE, BX_SVM_CR_WRITE_MASK | i->src());
1293
      else
1294
        Svm_Vmexit(SVM_VMEXIT_CR4_WRITE);
1295
    }
1296
  }
1297
#endif
1298
 
1299
  BX_CPU_THIS_PTR cr4.set32((Bit32u) val);
1300
 
1301
#if BX_CPU_LEVEL >= 6
1302
  handleSseModeChange();
1303
#if BX_SUPPORT_AVX
1304
  handleAvxModeChange();
1305
#endif
1306
#endif
1307
 
1308
  return 1;
1309
}
1310
#endif // BX_CPU_LEVEL >= 5
1311
 
1312
bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::SetCR3(bx_address val)
1313
{
1314
#if BX_SUPPORT_X86_64
1315
  if (long_mode()) {
1316
    if (! IsValidPhyAddr(val)) {
1317
      BX_ERROR(("SetCR3(): Attempt to write to reserved bits of CR3 !"));
1318
      return 0;
1319
    }
1320
  }
1321
#endif
1322
 
1323
  BX_CPU_THIS_PTR cr3 = val;
1324
 
1325
  // flush TLB even if value does not change
1326
#if BX_CPU_LEVEL >= 6
1327
  if (BX_CPU_THIS_PTR cr4.get_PGE())
1328
    TLB_flushNonGlobal(); // Don't flush Global entries.
1329
  else
1330
#endif
1331
    TLB_flush();          // Flush Global entries also.
1332
 
1333
  return 1;
1334
}
1335
 
1336
#if BX_CPU_LEVEL >= 5
1337
bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::SetEFER(bx_address val_64)
1338
{
1339
  Bit32u val32 = (Bit32u) val_64;
1340
 
1341
  if (val_64 & ~((Bit64u) BX_CPU_THIS_PTR efer_suppmask)) {
1342
    BX_ERROR(("SetEFER(0x%08x): attempt to set reserved bits of EFER MSR !", val32));
1343
    return 0;
1344
  }
1345
 
1346
#if BX_SUPPORT_X86_64
1347
  /* #GP(0) if changing EFER.LME when cr0.pg = 1 */
1348
  if ((BX_CPU_THIS_PTR efer.get_LME() != ((val32 >> 8) & 1)) &&
1349
       BX_CPU_THIS_PTR  cr0.get_PG())
1350
  {
1351
    BX_ERROR(("SetEFER: attempt to change LME when CR0.PG=1"));
1352
    return 0;
1353
  }
1354
#endif
1355
 
1356
  BX_CPU_THIS_PTR efer.set32((val32 & BX_CPU_THIS_PTR efer_suppmask & ~BX_EFER_LMA_MASK)
1357
        | (BX_CPU_THIS_PTR efer.get32() & BX_EFER_LMA_MASK)); // keep LMA untouched
1358
 
1359
  return 1;
1360
}
1361
#endif
1362
 
1363
#if BX_CPU_LEVEL >= 6
1364
 
1365
void BX_CPU_C::WriteCR8(bxInstruction_c *i, bx_address val)
1366
{
1367
#if BX_SUPPORT_SVM
1368
  if (BX_CPU_THIS_PTR in_svm_guest) {
1369
    if(SVM_CR_WRITE_INTERCEPTED(8)) {
1370
      if (BX_SUPPORT_SVM_EXTENSION(BX_CPUID_SVM_DECODE_ASSIST))
1371
        Svm_Vmexit(SVM_VMEXIT_CR8_WRITE, BX_SVM_CR_WRITE_MASK | i->src());
1372
      else
1373
        Svm_Vmexit(SVM_VMEXIT_CR8_WRITE);
1374
    }
1375
  }
1376
#endif
1377
 
1378
#if BX_SUPPORT_VMX
1379
  if (BX_CPU_THIS_PTR in_vmx_guest)
1380
    VMexit_CR8_Write(i);
1381
#endif
1382
 
1383
  // CR8 is aliased to APIC->TASK PRIORITY register
1384
  //   APIC.TPR[7:4] = CR8[3:0]
1385
  //   APIC.TPR[3:0] = 0
1386
  // Reads of CR8 return zero extended APIC.TPR[7:4]
1387
  // Write to CR8 update APIC.TPR[7:4]
1388
  if (val & BX_CONST64(0xfffffffffffffff0)) {
1389
    BX_ERROR(("WriteCR8: Attempt to set reserved bits of CR8"));
1390
    exception(BX_GP_EXCEPTION, 0);
1391
  }
1392
  unsigned tpr = (val & 0xf) << 4;
1393
 
1394
#if BX_SUPPORT_SVM
1395
  if (BX_CPU_THIS_PTR in_svm_guest) {
1396
    SVM_V_TPR = tpr;
1397
    handleInterruptMaskChange();
1398
    if (SVM_V_INTR_MASKING) return;
1399
  }
1400
#endif
1401
 
1402
#if BX_SUPPORT_VMX && BX_SUPPORT_X86_64
1403
  if (BX_CPU_THIS_PTR in_vmx_guest && VMEXIT(VMX_VM_EXEC_CTRL2_TPR_SHADOW)) {
1404
    VMX_Write_Virtual_APIC(BX_LAPIC_TPR, tpr);
1405
    VMX_TPR_Virtualization();
1406
    return;
1407
  }
1408
#endif
1409
 
1410
  BX_CPU_THIS_PTR lapic.set_tpr(tpr);
1411
}
1412
 
1413
Bit32u BX_CPU_C::ReadCR8(bxInstruction_c *i)
1414
{
1415
#if BX_SUPPORT_SVM
1416
  if (BX_CPU_THIS_PTR in_svm_guest) {
1417
    if (SVM_CR_READ_INTERCEPTED(8)) Svm_Vmexit(SVM_VMEXIT_CR8_READ);
1418
 
1419
    if (SVM_V_INTR_MASKING) return SVM_V_TPR;
1420
  }
1421
#endif
1422
 
1423
#if BX_SUPPORT_VMX && BX_SUPPORT_X86_64
1424
  if (BX_CPU_THIS_PTR in_vmx_guest)
1425
    VMexit_CR8_Read(i);
1426
 
1427
  if (BX_CPU_THIS_PTR in_vmx_guest && VMEXIT(VMX_VM_EXEC_CTRL2_TPR_SHADOW)) {
1428
     Bit32u tpr = (VMX_Read_Virtual_APIC(BX_LAPIC_TPR) >> 4) & 0xf;
1429
     return tpr;
1430
  }
1431
#endif
1432
 
1433
  // CR8 is aliased to APIC->TASK PRIORITY register
1434
  //   APIC.TPR[7:4] = CR8[3:0]
1435
  //   APIC.TPR[3:0] = 0
1436
  // Reads of CR8 return zero extended APIC.TPR[7:4]
1437
  // Write to CR8 update APIC.TPR[7:4]
1438
  return BX_CPU_THIS_PTR get_cr8();
1439
}
1440
 
1441
#endif
1442
 
1443
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CLTS(bxInstruction_c *i)
1444
{
1445
  // CPL is always 0 in real mode
1446
  if (/* !real_mode() && */ CPL!=0) {
1447
    BX_ERROR(("CLTS: priveledge check failed, generate #GP(0)"));
1448
    exception(BX_GP_EXCEPTION, 0);
1449
  }
1450
 
1451
#if BX_SUPPORT_VMX
1452
  if (BX_CPU_THIS_PTR in_vmx_guest) {
1453
    if(VMexit_CLTS()) {
1454
      BX_NEXT_TRACE(i);
1455
    }
1456
  }
1457
#endif
1458
 
1459
#if BX_SUPPORT_SVM
1460
  if (BX_CPU_THIS_PTR in_svm_guest) {
1461
    if(SVM_CR_WRITE_INTERCEPTED(0)) Svm_Vmexit(SVM_VMEXIT_CR0_WRITE);
1462
  }
1463
#endif
1464
 
1465
  BX_CPU_THIS_PTR cr0.set_TS(0);
1466
 
1467
#if BX_CPU_LEVEL >= 6
1468
  handleSseModeChange();
1469
#if BX_SUPPORT_AVX
1470
  handleAvxModeChange();
1471
#endif
1472
#endif
1473
 
1474
  BX_NEXT_TRACE(i);
1475
}
1476
 
1477
#if BX_X86_DEBUGGER
1478
 
1479
bx_bool BX_CPU_C::hwbreakpoint_check(bx_address laddr, unsigned opa, unsigned opb)
1480
{
1481
  laddr = LPFOf(laddr);
1482
 
1483
  Bit32u dr_op[4];
1484
 
1485
  dr_op[0] = BX_CPU_THIS_PTR dr7.get_R_W0();
1486
  dr_op[1] = BX_CPU_THIS_PTR dr7.get_R_W1();
1487
  dr_op[2] = BX_CPU_THIS_PTR dr7.get_R_W2();
1488
  dr_op[3] = BX_CPU_THIS_PTR dr7.get_R_W3();
1489
 
1490
  for (int n=0;n<4;n++) {
1491
    if ((dr_op[n] == opa || dr_op[n] == opb) && laddr == LPFOf(BX_CPU_THIS_PTR dr[n])) {
1492
      return 1;
1493
    }
1494
  }
1495
 
1496
  return 0;
1497
}
1498
 
1499
Bit32u BX_CPU_C::code_breakpoint_match(bx_address laddr)
1500
{
1501
  if (BX_CPU_THIS_PTR get_RF() || BX_CPU_THIS_PTR in_repeat)
1502
    return 0;
1503
 
1504
  if (BX_CPU_THIS_PTR dr7.get_bp_enabled()) {
1505
    Bit32u dr6_bits = hwdebug_compare(laddr, 1, BX_HWDebugInstruction, BX_HWDebugInstruction);
1506
    return dr6_bits;
1507
  }
1508
 
1509
  return 0;
1510
}
1511
 
1512
void BX_CPU_C::hwbreakpoint_match(bx_address laddr, unsigned len, unsigned rw)
1513
{
1514
  if (BX_CPU_THIS_PTR dr7.get_bp_enabled()) {
1515
    // Only compare debug registers if any breakpoints are enabled
1516
    unsigned opa, opb, write = rw & 1;
1517
    opa = BX_HWDebugMemRW; // Read or Write always compares vs 11b
1518
    if (! write) // only compares vs 11b
1519
      opb = opa;
1520
    else // BX_WRITE or BX_RW; also compare vs 01b
1521
      opb = BX_HWDebugMemW;
1522
    Bit32u dr6_bits = hwdebug_compare(laddr, len, opa, opb);
1523
    if (dr6_bits) {
1524
      BX_CPU_THIS_PTR debug_trap |= dr6_bits;
1525
      if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_HIT) {
1526
        BX_ERROR(("#DB: Code/Data breakpoint hit - report debug trap on next instruction"));
1527
        BX_CPU_THIS_PTR async_event = 1;
1528
      }
1529
    }
1530
  }
1531
}
1532
 
1533
Bit32u BX_CPU_C::hwdebug_compare(bx_address laddr_0, unsigned size,
1534
                          unsigned opa, unsigned opb)
1535
{
1536
  Bit32u dr7 = BX_CPU_THIS_PTR dr7.get32();
1537
 
1538
  static bx_address alignment_mask[4] =
1539
    // 00b=1  01b=2  10b=undef(8)  11b=4
1540
    {  0x0,   0x1,   0x7,          0x3   };
1541
 
1542
  bx_address laddr_n = laddr_0 + (size - 1);
1543
  Bit32u dr_op[4], dr_len[4];
1544
 
1545
  // If *any* enabled breakpoints matched, then we need to
1546
  // set status bits for *all* breakpoints, even disabled ones,
1547
  // as long as they meet the other breakpoint criteria.
1548
  // dr6_mask is the return value.  These bits represent the bits
1549
  // to be OR'd into DR6 as a result of the debug event.
1550
  Bit32u dr6_mask = 0;
1551
 
1552
  dr_len[0] = BX_CPU_THIS_PTR dr7.get_LEN0();
1553
  dr_len[1] = BX_CPU_THIS_PTR dr7.get_LEN1();
1554
  dr_len[2] = BX_CPU_THIS_PTR dr7.get_LEN2();
1555
  dr_len[3] = BX_CPU_THIS_PTR dr7.get_LEN3();
1556
 
1557
  dr_op[0] = BX_CPU_THIS_PTR dr7.get_R_W0();
1558
  dr_op[1] = BX_CPU_THIS_PTR dr7.get_R_W1();
1559
  dr_op[2] = BX_CPU_THIS_PTR dr7.get_R_W2();
1560
  dr_op[3] = BX_CPU_THIS_PTR dr7.get_R_W3();
1561
 
1562
  for (unsigned n=0;n<4;n++) {
1563
    bx_address dr_start = BX_CPU_THIS_PTR dr[n] & ~alignment_mask[dr_len[n]];
1564
    bx_address dr_end = dr_start + alignment_mask[dr_len[n]];
1565
 
1566
    // See if this instruction address matches any breakpoints
1567
    if ((dr_op[n]==opa || dr_op[n]==opb) &&
1568
         (laddr_0 <= dr_end) &&
1569
         (laddr_n >= dr_start)) {
1570
      dr6_mask |= (1<<n);
1571
      // tell if breakpoint was enabled
1572
      if (dr7 & (3 << n*2)) {
1573
        dr6_mask |= BX_DEBUG_TRAP_HIT;
1574
      }
1575
    }
1576
  }
1577
 
1578
  return dr6_mask;
1579
}
1580
 
1581
#if BX_CPU_LEVEL >= 5
1582
void BX_CPU_C::iobreakpoint_match(unsigned port, unsigned len)
1583
{
1584
  // Only compare debug registers if any breakpoints are enabled
1585
  if (BX_CPU_THIS_PTR cr4.get_DE() && BX_CPU_THIS_PTR dr7.get_bp_enabled())
1586
  {
1587
    Bit32u dr6_bits = hwdebug_compare(port, len, BX_HWDebugIO, BX_HWDebugIO);
1588
    if (dr6_bits) {
1589
      BX_CPU_THIS_PTR debug_trap |= dr6_bits;
1590
      if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_HIT) {
1591
        BX_ERROR(("#DB: I/O breakpoint hit - report debug trap on next instruction"));
1592
        BX_CPU_THIS_PTR async_event = 1;
1593
      }
1594
    }
1595
  }
1596
}
1597
#endif
1598
 
1599
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.