OpenCores
URL https://opencores.org/ocsvn/ao486/ao486/trunk

Subversion Repositories ao486

[/] [ao486/] [trunk/] [bochs486/] [cpu/] [proc_ctrl.cc] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 alfik
/////////////////////////////////////////////////////////////////////////
2
// $Id: proc_ctrl.cc 11674 2013-04-09 15:43:15Z sshwarts $
3
/////////////////////////////////////////////////////////////////////////
4
//
5
//  Copyright (C) 2001-2013  The Bochs Project
6
//
7
//  This library is free software; you can redistribute it and/or
8
//  modify it under the terms of the GNU Lesser General Public
9
//  License as published by the Free Software Foundation; either
10
//  version 2 of the License, or (at your option) any later version.
11
//
12
//  This library is distributed in the hope that it will be useful,
13
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
14
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
//  Lesser General Public License for more details.
16
//
17
//  You should have received a copy of the GNU Lesser General Public
18
//  License along with this library; if not, write to the Free Software
19
//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
20
//
21
/////////////////////////////////////////////////////////////////////////
22
 
23
#define NEED_CPU_REG_SHORTCUTS 1
24
#include "bochs.h"
25
#include "param_names.h"
26
#include "cpu.h"
27
#define LOG_THIS BX_CPU_THIS_PTR
28
 
29
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::UndefinedOpcode(bxInstruction_c *i)
30
{
31
  BX_DEBUG(("UndefinedOpcode: generate #UD exception"));
32
  exception(BX_UD_EXCEPTION, 0);
33
 
34
  BX_NEXT_TRACE(i); // keep compiler happy
35
}
36
 
37
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::NOP(bxInstruction_c *i)
38
{
39
  // No operation.
40
 
41
  BX_NEXT_INSTR(i);
42
}
43
 
44
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::PAUSE(bxInstruction_c *i)
45
{
46
#if BX_SUPPORT_VMX
47
  if (BX_CPU_THIS_PTR in_vmx_guest)
48
    VMexit_PAUSE();
49
#endif
50
 
51
#if BX_SUPPORT_SVM
52
  if (BX_CPU_THIS_PTR in_svm_guest) {
53
    if (SVM_INTERCEPT(SVM_INTERCEPT0_PAUSE)) SvmInterceptPAUSE();
54
  }
55
#endif
56
 
57
  BX_NEXT_INSTR(i);
58
}
59
 
60
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::PREFETCH(bxInstruction_c *i)
61
{
62
#if BX_INSTRUMENTATION
63
  BX_INSTR_PREFETCH_HINT(BX_CPU_ID, i->src(), i->seg(), BX_CPU_CALL_METHODR(i->ResolveModrm, (i)));
64
#endif
65
 
66
  BX_NEXT_INSTR(i);
67
}
68
 
69
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CPUID(bxInstruction_c *i)
70
{
71
#if BX_CPU_LEVEL >= 4
72
 
73
#if BX_SUPPORT_VMX
74
  if (BX_CPU_THIS_PTR in_vmx_guest) {
75
    VMexit(VMX_VMEXIT_CPUID, 0);
76
  }
77
#endif
78
 
79
#if BX_SUPPORT_SVM
80
  if (BX_CPU_THIS_PTR in_svm_guest) {
81
    if (SVM_INTERCEPT(SVM_INTERCEPT0_CPUID)) Svm_Vmexit(SVM_VMEXIT_CPUID);
82
  }
83
#endif
84
 
85
  struct cpuid_function_t leaf;
86
  BX_CPU_THIS_PTR cpuid->get_cpuid_leaf(EAX, ECX, &leaf);
87
 
88
  RAX = leaf.eax;
89
  RBX = leaf.ebx;
90
  RCX = leaf.ecx;
91
  RDX = leaf.edx;
92
#endif
93
 
94
  BX_NEXT_INSTR(i);
95
}
96
 
97
//
98
// The shutdown state is very similar to the state following the exection
99
// if HLT instruction. In this mode the processor stops executing
100
// instructions until #NMI, #SMI, #RESET or #INIT is received. If
101
// shutdown occurs why in NMI interrupt handler or in SMM, a hardware
102
// reset must be used to restart the processor execution.
103
//
104
void BX_CPU_C::shutdown(void)
105
{
106
#if BX_SUPPORT_SVM
107
  if (BX_CPU_THIS_PTR in_svm_guest) {
108
    if (SVM_INTERCEPT(SVM_INTERCEPT0_SHUTDOWN)) Svm_Vmexit(SVM_VMEXIT_SHUTDOWN);
109
  }
110
#endif
111
 
112
  enter_sleep_state(BX_ACTIVITY_STATE_SHUTDOWN);
113
 
114
  longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
115
}
116
 
117
void BX_CPU_C::enter_sleep_state(unsigned state)
118
{
119
  switch(state) {
120
  case BX_ACTIVITY_STATE_ACTIVE:
121
    BX_ASSERT(0); // should not be used for entering active CPU state
122
    break;
123
 
124
  case BX_ACTIVITY_STATE_HLT:
125
    break;
126
 
127
  case BX_ACTIVITY_STATE_WAIT_FOR_SIPI:
128
    mask_event(BX_EVENT_INIT | BX_EVENT_SMI | BX_EVENT_NMI); // FIXME: all events should be masked
129
    // fall through - mask interrupts as well
130
 
131
  case BX_ACTIVITY_STATE_SHUTDOWN:
132
    BX_CPU_THIS_PTR clear_IF(); // masking interrupts
133
    break;
134
 
135
  case BX_ACTIVITY_STATE_MWAIT:
136
  case BX_ACTIVITY_STATE_MWAIT_IF:
137
    break;
138
 
139
  default:
140
    BX_PANIC(("enter_sleep_state: unknown state %d", state));
141
  }
142
 
143
  // artificial trap bit, why use another variable.
144
  BX_CPU_THIS_PTR activity_state = state;
145
  BX_CPU_THIS_PTR async_event = 1; // so processor knows to check
146
  // Execution completes.  The processor will remain in a sleep 
147
  // state until one of the wakeup conditions is met.
148
 
149
  BX_INSTR_HLT(BX_CPU_ID);
150
 
151
#if BX_DEBUGGER
152
  bx_dbg_halt(BX_CPU_ID);
153
#endif
154
 
155
#if BX_USE_IDLE_HACK
156
  bx_gui->sim_is_idle();
157
#endif
158
}
159
 
160
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::HLT(bxInstruction_c *i)
161
{
162
  // CPL is always 0 in real mode
163
  if (/* !real_mode() && */ CPL!=0) {
164
    BX_DEBUG(("HLT: %s priveledge check failed, CPL=%d, generate #GP(0)",
165
        cpu_mode_string(BX_CPU_THIS_PTR cpu_mode), CPL));
166
    exception(BX_GP_EXCEPTION, 0);
167
  }
168
 
169
  if (! BX_CPU_THIS_PTR get_IF()) {
170
    BX_INFO(("WARNING: HLT instruction with IF=0!"));
171
  }
172
 
173
#if BX_SUPPORT_VMX
174
  if (BX_CPU_THIS_PTR in_vmx_guest) {
175
    if (VMEXIT(VMX_VM_EXEC_CTRL2_HLT_VMEXIT)) {
176
      VMexit(VMX_VMEXIT_HLT, 0);
177
    }
178
  }
179
#endif
180
 
181
#if BX_SUPPORT_SVM
182
  if (BX_CPU_THIS_PTR in_svm_guest) {
183
    if (SVM_INTERCEPT(SVM_INTERCEPT0_HLT)) Svm_Vmexit(SVM_VMEXIT_HLT);
184
  }
185
#endif
186
 
187
  // stops instruction execution and places the processor in a
188
  // HALT state. An enabled interrupt, NMI, or reset will resume
189
  // execution. If interrupt (including NMI) is used to resume
190
  // execution after HLT, the saved CS:eIP points to instruction
191
  // following HLT.
192
  enter_sleep_state(BX_ACTIVITY_STATE_HLT);
193
 
194
  BX_NEXT_TRACE(i);
195
}
196
 
197
/* 0F 08 */
198
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INVD(bxInstruction_c *i)
199
{
200
  // CPL is always 0 in real mode
201
  if (/* !real_mode() && */ CPL!=0) {
202
    BX_ERROR(("INVD: priveledge check failed, generate #GP(0)"));
203
    exception(BX_GP_EXCEPTION, 0);
204
  }
205
 
206
#if BX_SUPPORT_VMX
207
  if (BX_CPU_THIS_PTR in_vmx_guest) {
208
    VMexit(VMX_VMEXIT_INVD, 0);
209
  }
210
#endif
211
 
212
#if BX_SUPPORT_SVM
213
  if (BX_CPU_THIS_PTR in_svm_guest) {
214
    if (SVM_INTERCEPT(SVM_INTERCEPT0_INVD)) Svm_Vmexit(SVM_VMEXIT_INVD);
215
  }
216
#endif
217
 
218
  invalidate_prefetch_q();
219
 
220
  BX_DEBUG(("INVD: Flush internal caches !"));
221
  BX_INSTR_CACHE_CNTRL(BX_CPU_ID, BX_INSTR_INVD);
222
 
223
  flushICaches();
224
 
225
  BX_NEXT_TRACE(i);
226
}
227
 
228
/* 0F 09 */
229
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::WBINVD(bxInstruction_c *i)
230
{
231
  // CPL is always 0 in real mode
232
  if (/* !real_mode() && */ CPL!=0) {
233
    BX_ERROR(("WBINVD: priveledge check failed, generate #GP(0)"));
234
    exception(BX_GP_EXCEPTION, 0);
235
  }
236
 
237
#if BX_SUPPORT_VMX
238
  if (BX_CPU_THIS_PTR in_vmx_guest) {
239
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_WBINVD_VMEXIT)) {
240
      VMexit(VMX_VMEXIT_WBINVD, 0);
241
    }
242
  }
243
#endif
244
 
245
#if BX_SUPPORT_SVM
246
  if (BX_CPU_THIS_PTR in_svm_guest) {
247
    if (SVM_INTERCEPT(SVM_INTERCEPT1_WBINVD)) Svm_Vmexit(SVM_VMEXIT_WBINVD);
248
  }
249
#endif
250
 
251
//invalidate_prefetch_q();
252
 
253
  BX_DEBUG(("WBINVD: WB-Invalidate internal caches !"));
254
  BX_INSTR_CACHE_CNTRL(BX_CPU_ID, BX_INSTR_WBINVD);
255
 
256
//flushICaches();
257
 
258
  BX_NEXT_TRACE(i);
259
}
260
 
261
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CLFLUSH(bxInstruction_c *i)
262
{
263
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[i->seg()];
264
 
265
  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
266
  bx_address laddr = get_laddr(i->seg(), eaddr);
267
 
268
#if BX_SUPPORT_X86_64
269
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
270
    if (! IsCanonical(laddr)) {
271
      BX_ERROR(("CLFLUSH: non-canonical access !"));
272
      exception(int_number(i->seg()), 0);
273
    }
274
  }
275
  else
276
#endif
277
  {
278
    // check if we could access the memory segment
279
    if (!(seg->cache.valid & SegAccessROK)) {
280
      if (! execute_virtual_checks(seg, (Bit32u) eaddr, 1))
281
        exception(int_number(i->seg()), 0);
282
    }
283
    else {
284
      if (eaddr > seg->cache.u.segment.limit_scaled) {
285
        BX_ERROR(("CLFLUSH: segment limit violation"));
286
        exception(int_number(i->seg()), 0);
287
      }
288
    }
289
  }
290
 
291
#if BX_INSTRUMENTATION
292
  bx_phy_address paddr =
293
#endif
294
    translate_linear(BX_TLB_ENTRY_OF(laddr), laddr, USER_PL, BX_READ);
295
 
296
  BX_INSTR_CLFLUSH(BX_CPU_ID, laddr, paddr);
297
 
298
#if BX_X86_DEBUGGER
299
  hwbreakpoint_match(laddr, 1, BX_READ);
300
#endif
301
 
302
  BX_NEXT_INSTR(i);
303
}
304
 
305
void BX_CPU_C::handleCpuModeChange(void)
306
{
307
  unsigned mode = BX_CPU_THIS_PTR cpu_mode;
308
 
309
#if BX_SUPPORT_X86_64
310
  if (BX_CPU_THIS_PTR efer.get_LMA()) {
311
    if (! BX_CPU_THIS_PTR cr0.get_PE()) {
312
      BX_PANIC(("change_cpu_mode: EFER.LMA is set when CR0.PE=0 !"));
313
    }
314
    if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l) {
315
      BX_CPU_THIS_PTR cpu_mode = BX_MODE_LONG_64;
316
    }
317
    else {
318
      BX_CPU_THIS_PTR cpu_mode = BX_MODE_LONG_COMPAT;
319
      // clear upper part of RIP/RSP when leaving 64-bit long mode
320
      BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP);
321
      BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RSP);
322
    }
323
 
324
    // switching between compatibility and long64 mode also affect SS.BASE
325
    // which is always zero in long64 mode
326
    invalidate_stack_cache();
327
  }
328
  else
329
#endif
330
  {
331
    if (BX_CPU_THIS_PTR cr0.get_PE()) {
332
      if (BX_CPU_THIS_PTR get_VM()) {
333
        BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_V8086;
334
        CPL = 3;
335
      }
336
      else
337
        BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_PROTECTED;
338
    }
339
    else {
340
      BX_CPU_THIS_PTR cpu_mode = BX_MODE_IA32_REAL;
341
 
342
      // CS segment in real mode always allows full access
343
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p        = 1;
344
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment  = 1;  /* data/code segment */
345
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_DATA_READ_WRITE_ACCESSED;
346
 
347
      CPL = 0;
348
    }
349
  }
350
 
351
  updateFetchModeMask();
352
 
353
#if BX_CPU_LEVEL >= 6
354
#if BX_SUPPORT_AVX
355
  handleAvxModeChange(); /* protected mode reloaded */
356
#endif
357
#endif
358
 
359
  if (mode != BX_CPU_THIS_PTR cpu_mode) {
360
    BX_DEBUG(("%s activated", cpu_mode_string(BX_CPU_THIS_PTR cpu_mode)));
361
#if BX_DEBUGGER
362
    if (BX_CPU_THIS_PTR mode_break) {
363
      BX_CPU_THIS_PTR stop_reason = STOP_MODE_BREAK_POINT;
364
      bx_debug_break(); // trap into debugger
365
    }
366
#endif
367
  }
368
}
369
 
370
#if BX_CPU_LEVEL >= 4
371
void BX_CPU_C::handleAlignmentCheck(void)
372
{
373
  if (CPL == 3 && BX_CPU_THIS_PTR cr0.get_AM() && BX_CPU_THIS_PTR get_AC()) {
374
#if BX_SUPPORT_ALIGNMENT_CHECK == 0
375
    BX_PANIC(("WARNING: Alignment check (#AC exception) was not compiled in !"));
376
#else
377
    BX_CPU_THIS_PTR alignment_check_mask = 0xF;
378
#endif
379
  }
380
#if BX_SUPPORT_ALIGNMENT_CHECK
381
  else {
382
    BX_CPU_THIS_PTR alignment_check_mask = 0;
383
  }
384
#endif
385
}
386
#endif
387
 
388
#if BX_CPU_LEVEL >= 6
389
void BX_CPU_C::handleSseModeChange(void)
390
{
391
  if(BX_CPU_THIS_PTR cr0.get_TS()) {
392
    BX_CPU_THIS_PTR sse_ok = 0;
393
  }
394
  else {
395
    if(BX_CPU_THIS_PTR cr0.get_EM() || !BX_CPU_THIS_PTR cr4.get_OSFXSR())
396
      BX_CPU_THIS_PTR sse_ok = 0;
397
    else
398
      BX_CPU_THIS_PTR sse_ok = 1;
399
  }
400
 
401
  updateFetchModeMask(); /* SSE_OK changed */
402
}
403
 
404
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::BxNoSSE(bxInstruction_c *i)
405
{
406
  if(BX_CPU_THIS_PTR cr0.get_EM() || !BX_CPU_THIS_PTR cr4.get_OSFXSR())
407
    exception(BX_UD_EXCEPTION, 0);
408
 
409
  if(BX_CPU_THIS_PTR cr0.get_TS())
410
    exception(BX_NM_EXCEPTION, 0);
411
 
412
  BX_ASSERT(0);
413
 
414
  BX_NEXT_TRACE(i); // keep compiler happy
415
}
416
 
417
#if BX_SUPPORT_AVX
418
void BX_CPU_C::handleAvxModeChange(void)
419
{
420
  if(BX_CPU_THIS_PTR cr0.get_TS()) {
421
    BX_CPU_THIS_PTR avx_ok = 0;
422
  }
423
  else {
424
    if (! protected_mode() || ! BX_CPU_THIS_PTR cr4.get_OSXSAVE() ||
425
        (~BX_CPU_THIS_PTR xcr0.val32 & 0x6) != 0) BX_CPU_THIS_PTR avx_ok = 0;
426
    else
427
      BX_CPU_THIS_PTR avx_ok = 1;
428
  }
429
 
430
  updateFetchModeMask(); /* AVX_OK changed */
431
}
432
 
433
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::BxNoAVX(bxInstruction_c *i)
434
{
435
  if (! protected_mode() || ! BX_CPU_THIS_PTR cr4.get_OSXSAVE())
436
    exception(BX_UD_EXCEPTION, 0);
437
 
438
  if (~BX_CPU_THIS_PTR xcr0.val32 & 0x6)
439
    exception(BX_UD_EXCEPTION, 0);
440
 
441
  if(BX_CPU_THIS_PTR cr0.get_TS())
442
    exception(BX_NM_EXCEPTION, 0);
443
 
444
  BX_ASSERT(0);
445
 
446
  BX_NEXT_TRACE(i); // keep compiler happy
447
}
448
#endif
449
 
450
#endif
451
 
452
void BX_CPU_C::handleCpuContextChange(void)
453
{
454
  TLB_flush();
455
 
456
  invalidate_prefetch_q();
457
  invalidate_stack_cache();
458
 
459
  handleInterruptMaskChange();
460
 
461
#if BX_CPU_LEVEL >= 4
462
  handleAlignmentCheck();
463
#endif
464
 
465
  handleCpuModeChange();
466
 
467
#if BX_CPU_LEVEL >= 6
468
  handleSseModeChange();
469
#if BX_SUPPORT_AVX
470
  handleAvxModeChange();
471
#endif
472
#endif
473
}
474
 
475
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDPMC(bxInstruction_c *i)
476
{
477
#if BX_CPU_LEVEL >= 5
478
  if (! BX_CPU_THIS_PTR cr4.get_PCE() && CPL != 0 ) {
479
    BX_ERROR(("RDPMC: not allowed to use instruction !"));
480
    exception(BX_GP_EXCEPTION, 0);
481
  }
482
 
483
#if BX_SUPPORT_VMX
484
  if (BX_CPU_THIS_PTR in_vmx_guest)  {
485
    if (VMEXIT(VMX_VM_EXEC_CTRL2_RDPMC_VMEXIT)) {
486
      VMexit(VMX_VMEXIT_RDPMC, 0);
487
    }
488
  }
489
#endif
490
 
491
#if BX_SUPPORT_SVM
492
  if (BX_CPU_THIS_PTR in_svm_guest) {
493
    if (SVM_INTERCEPT(SVM_INTERCEPT0_RDPMC)) Svm_Vmexit(SVM_VMEXIT_RDPMC);
494
  }
495
#endif
496
 
497
  /* According to manual, Pentium 4 has 18 counters,
498
   * previous versions have two.  And the P4 also can do
499
   * short read-out (EDX always 0).  Otherwise it is
500
   * limited to 40 bits.
501
   */
502
 
503
  if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SSE2)) { // Pentium 4 processor (see cpuid.cc)
504
    if ((ECX & 0x7fffffff) >= 18)
505
      exception(BX_GP_EXCEPTION, 0);
506
  }
507
  else {
508
    if ((ECX & 0xffffffff) >= 2)
509
      exception(BX_GP_EXCEPTION, 0);
510
  }
511
 
512
  // Most counters are for hardware specific details, which
513
  // we anyhow do not emulate (like pipeline stalls etc)
514
 
515
  // Could be interesting to count number of memory reads,
516
  // writes.  Misaligned etc...  But to monitor bochs, this
517
  // is easier done from the host.
518
 
519
  RAX = 0;
520
  RDX = 0; // if P4 and ECX & 0x10000000, then always 0 (short read 32 bits)
521
 
522
  BX_ERROR(("RDPMC: Performance Counters Support not implemented yet"));
523
#endif
524
 
525
  BX_NEXT_INSTR(i);
526
}
527
 
528
#if BX_CPU_LEVEL >= 5
529
Bit64u BX_CPU_C::get_TSC(void)
530
{
531
  Bit64u tsc = bx_pc_system.time_ticks() - BX_CPU_THIS_PTR tsc_last_reset;
532
#if BX_SUPPORT_VMX || BX_SUPPORT_SVM
533
  tsc += BX_CPU_THIS_PTR tsc_offset;
534
#endif
535
  return tsc;
536
}
537
 
538
void BX_CPU_C::set_TSC(Bit64u newval)
539
{
540
  // compute the correct setting of tsc_last_reset so that a get_TSC()
541
  // will return newval
542
  BX_CPU_THIS_PTR tsc_last_reset = bx_pc_system.time_ticks() - newval;
543
 
544
  // verify
545
  BX_ASSERT(get_TSC() == newval);
546
}
547
#endif
548
 
549
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDTSC(bxInstruction_c *i)
550
{
551
#if BX_CPU_LEVEL >= 5
552
  if (BX_CPU_THIS_PTR cr4.get_TSD() && CPL != 0) {
553
    BX_ERROR(("RDTSC: not allowed to use instruction !"));
554
    exception(BX_GP_EXCEPTION, 0);
555
  }
556
 
557
#if BX_SUPPORT_VMX
558
  if (BX_CPU_THIS_PTR in_vmx_guest) {
559
    if (VMEXIT(VMX_VM_EXEC_CTRL2_RDTSC_VMEXIT)) {
560
      VMexit(VMX_VMEXIT_RDTSC, 0);
561
    }
562
  }
563
#endif
564
 
565
#if BX_SUPPORT_SVM
566
  if (BX_CPU_THIS_PTR in_svm_guest)
567
    if (SVM_INTERCEPT(SVM_INTERCEPT0_RDTSC)) Svm_Vmexit(SVM_VMEXIT_RDTSC);
568
#endif
569
 
570
  // return ticks
571
  Bit64u ticks = BX_CPU_THIS_PTR get_TSC();
572
 
573
  RAX = GET32L(ticks);
574
  RDX = GET32H(ticks);
575
 
576
  BX_DEBUG(("RDTSC: ticks 0x%08x:%08x", EDX, EAX));
577
#endif
578
 
579
  BX_NEXT_INSTR(i);
580
}
581
 
582
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDTSCP(bxInstruction_c *i)
583
{
584
#if BX_SUPPORT_X86_64
585
 
586
#if BX_SUPPORT_VMX
587
  // RDTSCP will always #UD in legacy VMX mode
588
  if (BX_CPU_THIS_PTR in_vmx_guest) {
589
    if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_RDTSCP)) {
590
       BX_ERROR(("RDTSCP in VMX guest: not allowed to use instruction !"));
591
       exception(BX_UD_EXCEPTION, 0);
592
    }
593
  }
594
#endif
595
 
596
  if (BX_CPU_THIS_PTR cr4.get_TSD() && CPL != 0) {
597
    BX_ERROR(("RDTSCP: not allowed to use instruction !"));
598
    exception(BX_GP_EXCEPTION, 0);
599
  }
600
 
601
#if BX_SUPPORT_VMX
602
  if (BX_CPU_THIS_PTR in_vmx_guest) {
603
    if (VMEXIT(VMX_VM_EXEC_CTRL2_RDTSC_VMEXIT)) {
604
      VMexit(VMX_VMEXIT_RDTSCP, 0);
605
    }
606
  }
607
#endif
608
 
609
#if BX_SUPPORT_SVM
610
  if (BX_CPU_THIS_PTR in_svm_guest)
611
    if (SVM_INTERCEPT(SVM_INTERCEPT1_RDTSCP)) Svm_Vmexit(SVM_VMEXIT_RDTSCP);
612
#endif
613
 
614
  // return ticks
615
  Bit64u ticks = BX_CPU_THIS_PTR get_TSC();
616
 
617
  RAX = GET32L(ticks);
618
  RDX = GET32H(ticks);
619
  RCX = MSR_TSC_AUX;
620
 
621
#endif
622
 
623
  BX_NEXT_INSTR(i);
624
}
625
 
626
#if BX_SUPPORT_MONITOR_MWAIT
627
bx_bool BX_CPU_C::is_monitor(bx_phy_address begin_addr, unsigned len)
628
{
629
  if (! BX_CPU_THIS_PTR monitor.armed) return 0;
630
 
631
  bx_phy_address monitor_begin = BX_CPU_THIS_PTR monitor.monitor_addr;
632
  bx_phy_address monitor_end = monitor_begin + CACHE_LINE_SIZE - 1;
633
 
634
  bx_phy_address end_addr = begin_addr + len;
635
  if (begin_addr >= monitor_end || end_addr <= monitor_begin)
636
    return 0;
637
  else
638
    return 1;
639
}
640
 
641
void BX_CPU_C::check_monitor(bx_phy_address begin_addr, unsigned len)
642
{
643
  if (is_monitor(begin_addr, len)) {
644
    // wakeup from MWAIT state
645
    if(BX_CPU_THIS_PTR activity_state >= BX_ACTIVITY_STATE_MWAIT)
646
       BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
647
    // clear monitor
648
    BX_CPU_THIS_PTR monitor.reset_monitor();
649
  }
650
}
651
#endif
652
 
653
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i)
654
{
655
#if BX_SUPPORT_MONITOR_MWAIT
656
  // CPL is always 0 in real mode
657
  if (/* !real_mode() && */ CPL != 0) {
658
    BX_DEBUG(("MWAIT instruction not recognized when CPL != 0"));
659
    exception(BX_UD_EXCEPTION, 0);
660
  }
661
 
662
  BX_DEBUG(("MONITOR instruction executed EAX = 0x%08x", EAX));
663
 
664
#if BX_SUPPORT_VMX
665
  if (BX_CPU_THIS_PTR in_vmx_guest) {
666
    if (VMEXIT(VMX_VM_EXEC_CTRL2_MONITOR_VMEXIT)) {
667
      VMexit(VMX_VMEXIT_MONITOR, 0);
668
    }
669
  }
670
#endif
671
 
672
  if (RCX != 0) {
673
    BX_ERROR(("MONITOR: no optional extensions supported"));
674
    exception(BX_GP_EXCEPTION, 0);
675
  }
676
 
677
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[i->seg()];
678
 
679
  bx_address offset = RAX & i->asize_mask();
680
 
681
  // set MONITOR
682
  bx_address laddr = get_laddr(i->seg(), offset);
683
 
684
#if BX_SUPPORT_X86_64
685
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
686
    if (! IsCanonical(laddr)) {
687
      BX_ERROR(("MONITOR: non-canonical access !"));
688
      exception(int_number(i->seg()), 0);
689
    }
690
  }
691
  else
692
#endif
693
  {
694
    // check if we could access the memory segment
695
    if (!(seg->cache.valid & SegAccessROK)) {
696
      if (! execute_virtual_checks(seg, (Bit32u) offset, 1))
697
        exception(int_number(i->seg()), 0);
698
    }
699
    else {
700
      if (offset > seg->cache.u.segment.limit_scaled) {
701
        BX_ERROR(("MONITOR: segment limit violation"));
702
        exception(int_number(i->seg()), 0);
703
      }
704
    }
705
  }
706
 
707
  bx_phy_address paddr = translate_linear(BX_TLB_ENTRY_OF(laddr), laddr, USER_PL, BX_READ);
708
 
709
#if BX_SUPPORT_SVM
710
  if (BX_CPU_THIS_PTR in_svm_guest) {
711
    if (SVM_INTERCEPT(SVM_INTERCEPT1_MONITOR)) Svm_Vmexit(SVM_VMEXIT_MONITOR);
712
  }
713
#endif
714
 
715
  // Set the monitor immediately. If monitor is still armed when we MWAIT,
716
  // the processor will stall.
717
 
718
  bx_pc_system.invlpg(paddr);
719
 
720
  BX_CPU_THIS_PTR monitor.arm(paddr);
721
 
722
  BX_DEBUG(("MONITOR for phys_addr=0x" FMT_PHY_ADDRX, BX_CPU_THIS_PTR monitor.monitor_addr));
723
#endif
724
 
725
  BX_NEXT_INSTR(i);
726
}
727
 
728
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MWAIT(bxInstruction_c *i)
729
{
730
#if BX_SUPPORT_MONITOR_MWAIT
731
  // CPL is always 0 in real mode
732
  if (/* !real_mode() && */ CPL != 0) {
733
    BX_DEBUG(("MWAIT instruction not recognized when CPL != 0"));
734
    exception(BX_UD_EXCEPTION, 0);
735
  }
736
 
737
  BX_DEBUG(("MWAIT instruction executed ECX = 0x%08x", ECX));
738
 
739
#if BX_SUPPORT_VMX
740
  if (BX_CPU_THIS_PTR in_vmx_guest) {
741
    if (VMEXIT(VMX_VM_EXEC_CTRL2_MWAIT_VMEXIT)) {
742
      VMexit(VMX_VMEXIT_MWAIT, BX_CPU_THIS_PTR monitor.armed);
743
    }
744
  }
745
#endif
746
 
747
  // only one extension is supported
748
  //   ECX[0] - interrupt MWAIT even if EFLAGS.IF = 0
749
  if (RCX & ~(BX_CONST64(1))) {
750
    BX_ERROR(("MWAIT: incorrect optional extensions in RCX"));
751
    exception(BX_GP_EXCEPTION, 0);
752
  }
753
 
754
#if BX_SUPPORT_SVM
755
  if (BX_CPU_THIS_PTR in_svm_guest) {
756
    if (SVM_INTERCEPT(SVM_INTERCEPT1_MWAIT_ARMED))
757
      if (BX_CPU_THIS_PTR monitor.armed) Svm_Vmexit(SVM_VMEXIT_MWAIT_CONDITIONAL);
758
 
759
    if (SVM_INTERCEPT(SVM_INTERCEPT1_MWAIT)) Svm_Vmexit(SVM_VMEXIT_MWAIT);
760
  }
761
#endif
762
 
763
  // If monitor has already triggered, we just return.
764
  if (! BX_CPU_THIS_PTR monitor.armed) {
765
    BX_DEBUG(("MWAIT: the MONITOR was not armed or already triggered"));
766
    BX_NEXT_TRACE(i);
767
  }
768
 
769
  static bx_bool mwait_is_nop = SIM->get_param_bool(BXPN_MWAIT_IS_NOP)->get();
770
  if (mwait_is_nop) {
771
    BX_NEXT_TRACE(i);
772
  }
773
 
774
  // stops instruction execution and places the processor in a optimized
775
  // state.  Events that cause exit from MWAIT state are:
776
  // A store from another processor to monitored range, any unmasked
777
  // interrupt, including INTR, NMI, SMI, INIT or reset will resume
778
  // the execution. Any far control transfer between MONITOR and MWAIT
779
  // resets the monitoring logic.
780
 
781
  Bit32u new_state = BX_ACTIVITY_STATE_MWAIT;
782
  if (ECX & 1) {
783
#if BX_SUPPORT_VMX
784
    // When "interrupt window exiting" VMX control is set MWAIT instruction
785
    // won't cause the processor to enter sleep state with EFLAGS.IF = 0
786
    if (BX_CPU_THIS_PTR in_vmx_guest) {
787
      if (VMEXIT(VMX_VM_EXEC_CTRL2_INTERRUPT_WINDOW_VMEXIT) && ! BX_CPU_THIS_PTR get_IF()) {
788
        BX_NEXT_TRACE(i);
789
      }
790
    }
791
#endif
792
    new_state = BX_ACTIVITY_STATE_MWAIT_IF;
793
  }
794
 
795
  BX_INSTR_MWAIT(BX_CPU_ID, BX_CPU_THIS_PTR monitor.monitor_addr, CACHE_LINE_SIZE, ECX);
796
 
797
  enter_sleep_state(new_state);
798
#endif
799
 
800
  BX_NEXT_TRACE(i);
801
}
802
 
803
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSENTER(bxInstruction_c *i)
804
{
805
#if BX_CPU_LEVEL >= 6
806
  if (real_mode()) {
807
    BX_ERROR(("SYSENTER not recognized in real mode !"));
808
    exception(BX_GP_EXCEPTION, 0);
809
  }
810
  if ((BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK) == 0) {
811
    BX_ERROR(("SYSENTER with zero sysenter_cs_msr !"));
812
    exception(BX_GP_EXCEPTION, 0);
813
  }
814
 
815
  invalidate_prefetch_q();
816
 
817
  BX_CPU_THIS_PTR clear_VM();       // do this just like the book says to do
818
  BX_CPU_THIS_PTR clear_IF();
819
  BX_CPU_THIS_PTR clear_RF();
820
 
821
#if BX_SUPPORT_X86_64
822
  if (long_mode()) {
823
    if (!IsCanonical(BX_CPU_THIS_PTR msr.sysenter_eip_msr)) {
824
      BX_ERROR(("SYSENTER with non-canonical SYSENTER_EIP_MSR !"));
825
      exception(BX_GP_EXCEPTION, 0);
826
    }
827
    if (!IsCanonical(BX_CPU_THIS_PTR msr.sysenter_esp_msr)) {
828
      BX_ERROR(("SYSENTER with non-canonical SYSENTER_ESP_MSR !"));
829
      exception(BX_GP_EXCEPTION, 0);
830
    }
831
  }
832
#endif
833
 
834
  parse_selector(BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK,
835
                       &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
836
 
837
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
838
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
839
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 0;
840
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
841
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
842
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0;          // base address
843
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF; // scaled segment limit
844
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1;          // 4k granularity
845
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0;          // available for use by system
846
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = !long_mode();
847
#if BX_SUPPORT_X86_64
848
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            =  long_mode();
849
#endif
850
 
851
#if BX_SUPPORT_X86_64
852
  handleCpuModeChange(); // mode change could happen only when in long_mode()
853
#else
854
  updateFetchModeMask(/* CS reloaded */);
855
#endif
856
 
857
#if BX_SUPPORT_ALIGNMENT_CHECK
858
  BX_CPU_THIS_PTR alignment_check_mask = 0; // CPL=0
859
#endif
860
 
861
  parse_selector((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 8) & BX_SELECTOR_RPL_MASK,
862
                       &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
863
 
864
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid    = SegValidCache | SegAccessROK | SegAccessWOK;
865
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p        = 1;
866
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl      = 0;
867
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment  = 1; /* data/code segment */
868
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type     = BX_DATA_READ_WRITE_ACCESSED;
869
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base         = 0;          // base address
870
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF; // scaled segment limit
871
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g            = 1;          // 4k granularity
872
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b          = 1;          // 32-bit mode
873
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl          = 0;          // available for use by system
874
#if BX_SUPPORT_X86_64
875
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l            = 0;
876
#endif
877
 
878
#if BX_SUPPORT_X86_64
879
  if (long_mode()) {
880
    RSP = BX_CPU_THIS_PTR msr.sysenter_esp_msr;
881
    RIP = BX_CPU_THIS_PTR msr.sysenter_eip_msr;
882
  }
883
  else
884
#endif
885
  {
886
    ESP = (Bit32u) BX_CPU_THIS_PTR msr.sysenter_esp_msr;
887
    EIP = (Bit32u) BX_CPU_THIS_PTR msr.sysenter_eip_msr;
888
  }
889
 
890
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSENTER,
891
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
892
#endif
893
 
894
  BX_NEXT_TRACE(i);
895
}
896
 
897
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSEXIT(bxInstruction_c *i)
898
{
899
#if BX_CPU_LEVEL >= 6
900
  if (real_mode() || CPL != 0) {
901
    BX_ERROR(("SYSEXIT from real mode or with CPL<>0 !"));
902
    exception(BX_GP_EXCEPTION, 0);
903
  }
904
  if ((BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK) == 0) {
905
    BX_ERROR(("SYSEXIT with zero sysenter_cs_msr !"));
906
    exception(BX_GP_EXCEPTION, 0);
907
  }
908
 
909
  invalidate_prefetch_q();
910
 
911
#if BX_SUPPORT_X86_64
912
  if (i->os64L()) {
913
    if (!IsCanonical(RDX)) {
914
       BX_ERROR(("SYSEXIT with non-canonical RDX (RIP) pointer !"));
915
       exception(BX_GP_EXCEPTION, 0);
916
    }
917
    if (!IsCanonical(RCX)) {
918
       BX_ERROR(("SYSEXIT with non-canonical RCX (RSP) pointer !"));
919
       exception(BX_GP_EXCEPTION, 0);
920
    }
921
 
922
    parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 32) & BX_SELECTOR_RPL_MASK) | 3,
923
            &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
924
 
925
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
926
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
927
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
928
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
929
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
930
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0;           // base address
931
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  // scaled segment limit
932
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1;           // 4k granularity
933
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0;           // available for use by system
934
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 0;
935
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 1;
936
 
937
    RSP = RCX;
938
    RIP = RDX;
939
  }
940
  else
941
#endif
942
  {
943
    parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 16) & BX_SELECTOR_RPL_MASK) | 3,
944
            &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
945
 
946
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
947
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
948
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
949
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
950
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
951
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0;           // base address
952
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  // scaled segment limit
953
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1;           // 4k granularity
954
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0;           // available for use by system
955
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 1;
956
#if BX_SUPPORT_X86_64
957
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 0;
958
#endif
959
 
960
    ESP = ECX;
961
    EIP = EDX;
962
  }
963
 
964
#if BX_SUPPORT_X86_64
965
  handleCpuModeChange(); // mode change could happen only when in long_mode()
966
#else
967
  updateFetchModeMask(/* CS reloaded */);
968
#endif
969
 
970
  handleAlignmentCheck(/* CPL change */);
971
 
972
  parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + (i->os64L() ? 40:24)) & BX_SELECTOR_RPL_MASK) | 3,
973
            &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
974
 
975
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid    = SegValidCache | SegAccessROK | SegAccessWOK;
976
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p        = 1;
977
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl      = 3;
978
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment  = 1; /* data/code segment */
979
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type     = BX_DATA_READ_WRITE_ACCESSED;
980
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base         = 0;           // base address
981
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  // scaled segment limit
982
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g            = 1;           // 4k granularity
983
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b          = 1;           // 32-bit mode
984
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl          = 0;           // available for use by system
985
#if BX_SUPPORT_X86_64
986
  BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l            = 0;
987
#endif
988
 
989
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSEXIT,
990
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
991
#endif
992
 
993
  BX_NEXT_TRACE(i);
994
}
995
 
996
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSCALL(bxInstruction_c *i)
997
{
998
#if BX_CPU_LEVEL >= 5
999
  bx_address temp_RIP;
1000
 
1001
  BX_DEBUG(("Execute SYSCALL instruction"));
1002
 
1003
  if (!BX_CPU_THIS_PTR efer.get_SCE()) {
1004
    exception(BX_UD_EXCEPTION, 0);
1005
  }
1006
 
1007
  invalidate_prefetch_q();
1008
 
1009
#if BX_SUPPORT_X86_64
1010
  if (long_mode())
1011
  {
1012
    RCX = RIP;
1013
    R11 = read_eflags() & ~(EFlagsRFMask);
1014
 
1015
    if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
1016
      temp_RIP = MSR_LSTAR;
1017
    }
1018
    else {
1019
      temp_RIP = MSR_CSTAR;
1020
    }
1021
 
1022
    // set up CS segment, flat, 64-bit DPL=0
1023
    parse_selector((MSR_STAR >> 32) & BX_SELECTOR_RPL_MASK,
1024
                       &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1025
 
1026
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
1027
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
1028
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 0;
1029
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1030
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1031
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1032
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1033
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1034
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 0;
1035
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 1; /* 64-bit code */
1036
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1037
 
1038
    handleCpuModeChange(); // mode change could only happen when in long_mode()
1039
 
1040
#if BX_SUPPORT_ALIGNMENT_CHECK
1041
    BX_CPU_THIS_PTR alignment_check_mask = 0; // CPL=0
1042
#endif
1043
 
1044
    // set up SS segment, flat, 64-bit DPL=0
1045
    parse_selector(((MSR_STAR >> 32) + 8) & BX_SELECTOR_RPL_MASK,
1046
                       &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1047
 
1048
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
1049
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p       = 1;
1050
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl     = 0;
1051
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
1052
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type    = BX_DATA_READ_WRITE_ACCESSED;
1053
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base         = 0; /* base address */
1054
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1055
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g            = 1; /* 4k granularity */
1056
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b          = 1; /* 32 bit stack */
1057
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l            = 0;
1058
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl          = 0; /* available for use by system */
1059
 
1060
    writeEFlags(read_eflags() & ~MSR_FMASK & ~(EFlagsRFMask), EFlagsValidMask);
1061
    RIP = temp_RIP;
1062
  }
1063
  else
1064
#endif
1065
  {
1066
    // legacy mode
1067
 
1068
    ECX = EIP;
1069
    temp_RIP = MSR_STAR & 0xFFFFFFFF;
1070
 
1071
    // set up CS segment, flat, 32-bit DPL=0
1072
    parse_selector((MSR_STAR >> 32) & BX_SELECTOR_RPL_MASK,
1073
                       &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1074
 
1075
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
1076
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
1077
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 0;
1078
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1079
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1080
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1081
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1082
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1083
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 1;
1084
#if BX_SUPPORT_X86_64
1085
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 0; /* 32-bit code */
1086
#endif
1087
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1088
 
1089
    updateFetchModeMask(/* CS reloaded */);
1090
 
1091
#if BX_SUPPORT_ALIGNMENT_CHECK
1092
    BX_CPU_THIS_PTR alignment_check_mask = 0; // CPL=0
1093
#endif
1094
 
1095
    // set up SS segment, flat, 32-bit DPL=0
1096
    parse_selector(((MSR_STAR >> 32) + 8) & BX_SELECTOR_RPL_MASK,
1097
                       &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1098
 
1099
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
1100
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p       = 1;
1101
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl     = 0;
1102
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */
1103
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type    = BX_DATA_READ_WRITE_ACCESSED;
1104
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base         = 0; /* base address */
1105
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1106
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.g            = 1; /* 4k granularity */
1107
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b          = 1; /* 32 bit stack */
1108
#if BX_SUPPORT_X86_64
1109
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l            = 0;
1110
#endif
1111
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.avl          = 0; /* available for use by system */
1112
 
1113
    BX_CPU_THIS_PTR clear_VM();
1114
    BX_CPU_THIS_PTR clear_IF();
1115
    BX_CPU_THIS_PTR clear_RF();
1116
    RIP = temp_RIP;
1117
  }
1118
 
1119
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSCALL,
1120
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
1121
#endif
1122
 
1123
  BX_NEXT_TRACE(i);
1124
}
1125
 
1126
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSRET(bxInstruction_c *i)
1127
{
1128
#if BX_CPU_LEVEL >= 5
1129
  bx_address temp_RIP;
1130
 
1131
  BX_DEBUG(("Execute SYSRET instruction"));
1132
 
1133
  if (!BX_CPU_THIS_PTR efer.get_SCE()) {
1134
    exception(BX_UD_EXCEPTION, 0);
1135
  }
1136
 
1137
  if(!protected_mode() || CPL != 0) {
1138
    BX_ERROR(("SYSRET: priveledge check failed, generate #GP(0)"));
1139
    exception(BX_GP_EXCEPTION, 0);
1140
  }
1141
 
1142
  invalidate_prefetch_q();
1143
 
1144
#if BX_SUPPORT_X86_64
1145
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
1146
  {
1147
    if (i->os64L()) {
1148
      if (!IsCanonical(RCX)) {
1149
        BX_ERROR(("SYSRET: canonical failure for RCX (RIP)"));
1150
        exception(BX_GP_EXCEPTION, 0);
1151
      }
1152
 
1153
      // Return to 64-bit mode, set up CS segment, flat, 64-bit DPL=3
1154
      parse_selector((((MSR_STAR >> 48) + 16) & BX_SELECTOR_RPL_MASK) | 3,
1155
                       &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1156
 
1157
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
1158
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
1159
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
1160
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1161
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1162
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1163
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1164
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1165
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 0;
1166
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 1; /* 64-bit code */
1167
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1168
 
1169
      temp_RIP = RCX;
1170
    }
1171
    else {
1172
      // Return to 32-bit compatibility mode, set up CS segment, flat, 32-bit DPL=3
1173
      parse_selector((MSR_STAR >> 48) | 3,
1174
                       &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1175
 
1176
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
1177
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
1178
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
1179
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1180
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1181
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1182
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1183
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1184
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 1;
1185
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 0; /* 32-bit code */
1186
      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1187
 
1188
      temp_RIP = ECX;
1189
    }
1190
 
1191
    handleCpuModeChange(); // mode change could only happen when in long64 mode
1192
 
1193
    handleAlignmentCheck(/* CPL change */);
1194
 
1195
    // SS base, limit, attributes unchanged
1196
    parse_selector((Bit16u)(((MSR_STAR >> 48) + 8) | 3),
1197
                       &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1198
 
1199
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
1200
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p       = 1;
1201
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl     = 3;
1202
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1;  /* data/code segment */
1203
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type    = BX_DATA_READ_WRITE_ACCESSED;
1204
 
1205
    writeEFlags((Bit32u) R11, EFlagsValidMask);
1206
  }
1207
  else // (!64BIT_MODE)
1208
#endif
1209
  {
1210
    // Return to 32-bit legacy mode, set up CS segment, flat, 32-bit DPL=3
1211
    parse_selector((MSR_STAR >> 48) | 3,
1212
                     &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
1213
 
1214
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
1215
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p       = 1;
1216
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl     = 3;
1217
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1;  /* data/code segment */
1218
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type    = BX_CODE_EXEC_READ_ACCESSED;
1219
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base         = 0; /* base address */
1220
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFFFFFF;  /* scaled segment limit */
1221
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g            = 1; /* 4k granularity */
1222
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b          = 1;
1223
#if BX_SUPPORT_X86_64
1224
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l            = 0; /* 32-bit code */
1225
#endif
1226
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl          = 0; /* available for use by system */
1227
 
1228
    updateFetchModeMask(/* CS reloaded */);
1229
 
1230
    handleAlignmentCheck(/* CPL change */);
1231
 
1232
    // SS base, limit, attributes unchanged
1233
    parse_selector((Bit16u)(((MSR_STAR >> 48) + 8) | 3),
1234
                     &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector);
1235
 
1236
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid   = SegValidCache | SegAccessROK | SegAccessWOK;
1237
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p       = 1;
1238
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl     = 3;
1239
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1;  /* data/code segment */
1240
    BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.type    = BX_DATA_READ_WRITE_ACCESSED;
1241
 
1242
    BX_CPU_THIS_PTR assert_IF();
1243
    temp_RIP = ECX;
1244
  }
1245
 
1246
  handleCpuModeChange();
1247
 
1248
  RIP = temp_RIP;
1249
 
1250
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_SYSRET,
1251
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
1252
#endif
1253
 
1254
  BX_NEXT_TRACE(i);
1255
}
1256
 
1257
#if BX_SUPPORT_X86_64
1258
 
1259
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SWAPGS(bxInstruction_c *i)
1260
{
1261
  if(CPL != 0)
1262
    exception(BX_GP_EXCEPTION, 0);
1263
 
1264
  Bit64u temp_GS_base = MSR_GSBASE;
1265
  MSR_GSBASE = MSR_KERNELGSBASE;
1266
  MSR_KERNELGSBASE = temp_GS_base;
1267
 
1268
  BX_NEXT_INSTR(i);
1269
}
1270
 
1271
/* F3 0F AE /0 */
1272
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDFSBASE(bxInstruction_c *i)
1273
{
1274
  if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1275
    exception(BX_UD_EXCEPTION, 0);
1276
 
1277
  if (i->os64L()) {
1278
    BX_WRITE_64BIT_REG(i->dst(), MSR_FSBASE);
1279
  }
1280
  else {
1281
    BX_WRITE_32BIT_REGZ(i->dst(), (Bit32u) MSR_FSBASE);
1282
  }
1283
 
1284
  BX_NEXT_INSTR(i);
1285
}
1286
 
1287
/* F3 0F AE /1 */
1288
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDGSBASE(bxInstruction_c *i)
1289
{
1290
  if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1291
    exception(BX_UD_EXCEPTION, 0);
1292
 
1293
  if (i->os64L()) {
1294
    BX_WRITE_64BIT_REG(i->dst(), MSR_GSBASE);
1295
  }
1296
  else {
1297
    BX_WRITE_32BIT_REGZ(i->dst(), (Bit32u) MSR_GSBASE);
1298
  }
1299
 
1300
  BX_NEXT_INSTR(i);
1301
}
1302
 
1303
/* F3 0F AE /2 */
1304
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::WRFSBASE(bxInstruction_c *i)
1305
{
1306
  if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1307
    exception(BX_UD_EXCEPTION, 0);
1308
 
1309
  if (i->os64L()) {
1310
    Bit64u fsbase = BX_READ_64BIT_REG(i->src());
1311
    if (!IsCanonical(fsbase)) {
1312
      BX_ERROR(("WRFSBASE: canonical failure !"));
1313
      exception(BX_GP_EXCEPTION, 0);
1314
    }
1315
    MSR_FSBASE = fsbase;
1316
  }
1317
  else {
1318
    // 32-bit value is always canonical
1319
    MSR_FSBASE = BX_READ_32BIT_REG(i->src());
1320
  }
1321
 
1322
  BX_NEXT_INSTR(i);
1323
}
1324
 
1325
/* F3 0F AE /3 */
1326
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::WRGSBASE(bxInstruction_c *i)
1327
{
1328
  if (! BX_CPU_THIS_PTR cr4.get_FSGSBASE())
1329
    exception(BX_UD_EXCEPTION, 0);
1330
 
1331
  if (i->os64L()) {
1332
    Bit64u gsbase = BX_READ_64BIT_REG(i->src());
1333
    if (!IsCanonical(gsbase)) {
1334
      BX_ERROR(("WRGSBASE: canonical failure !"));
1335
      exception(BX_GP_EXCEPTION, 0);
1336
    }
1337
    MSR_GSBASE = gsbase;
1338
  }
1339
  else {
1340
    // 32-bit value is always canonical
1341
    MSR_GSBASE = BX_READ_32BIT_REG(i->src());
1342
  }
1343
 
1344
  BX_NEXT_INSTR(i);
1345
}
1346
 
1347
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.