OpenCores
URL https://opencores.org/ocsvn/ao486/ao486/trunk

Subversion Repositories ao486

[/] [ao486/] [trunk/] [bochs486/] [cpu/] [protect_ctrl.cc] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 alfik
/////////////////////////////////////////////////////////////////////////
2
// $Id: protect_ctrl.cc 11643 2013-02-25 19:36:41Z sshwarts $
3
/////////////////////////////////////////////////////////////////////////
4
//
5
//  Copyright (C) 2001-2012  The Bochs Project
6
//
7
//  This library is free software; you can redistribute it and/or
8
//  modify it under the terms of the GNU Lesser General Public
9
//  License as published by the Free Software Foundation; either
10
//  version 2 of the License, or (at your option) any later version.
11
//
12
//  This library is distributed in the hope that it will be useful,
13
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
14
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
//  Lesser General Public License for more details.
16
//
17
//  You should have received a copy of the GNU Lesser General Public
18
//  License along with this library; if not, write to the Free Software
19
//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
20
/////////////////////////////////////////////////////////////////////////
21
 
22
#define NEED_CPU_REG_SHORTCUTS 1
23
#include "bochs.h"
24
#include "cpu.h"
25
#define LOG_THIS BX_CPU_THIS_PTR
26
 
27
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::ARPL_EwGw(bxInstruction_c *i)
28
{
29
  Bit16u op2_16, op1_16;
30
 
31
  if (! protected_mode()) {
32
    BX_DEBUG(("ARPL: not recognized in real or virtual-8086 mode"));
33
    exception(BX_UD_EXCEPTION, 0);
34
  }
35
 
36
  /* op1_16 is a register or memory reference */
37
  if (i->modC0()) {
38
    op1_16 = BX_READ_16BIT_REG(i->dst());
39
  }
40
  else {
41
    bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
42
    /* pointer, segment address pair */
43
    op1_16 = read_RMW_virtual_word(i->seg(), eaddr);
44
  }
45
 
46
  op2_16 = BX_READ_16BIT_REG(i->src());
47
 
48
  if ((op1_16 & 0x03) < (op2_16 & 0x03)) {
49
    op1_16 = (op1_16 & 0xfffc) | (op2_16 & 0x03);
50
    /* now write back to destination */
51
    if (i->modC0()) {
52
      BX_WRITE_16BIT_REG(i->dst(), op1_16);
53
    }
54
    else {
55
      write_RMW_virtual_word(op1_16);
56
    }
57
    assert_ZF();
58
  }
59
  else {
60
    clear_ZF();
61
  }
62
 
63
  BX_NEXT_INSTR(i);
64
}
65
 
66
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LAR_GvEw(bxInstruction_c *i)
67
{
68
  /* for 16 bit operand size mode */
69
  Bit16u raw_selector;
70
  bx_descriptor_t descriptor;
71
  bx_selector_t selector;
72
  Bit32u dword1, dword2;
73
#if BX_SUPPORT_X86_64
74
  Bit32u dword3 = 0;
75
#endif
76
 
77
  if (! protected_mode()) {
78
    BX_ERROR(("LAR: not recognized in real or virtual-8086 mode"));
79
    exception(BX_UD_EXCEPTION, 0);
80
  }
81
 
82
  if (i->modC0()) {
83
    raw_selector = BX_READ_16BIT_REG(i->src());
84
  }
85
  else {
86
    bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
87
    /* pointer, segment address pair */
88
    raw_selector = read_virtual_word(i->seg(), eaddr);
89
  }
90
 
91
  /* if selector null, clear ZF and done */
92
  if ((raw_selector & 0xfffc) == 0) {
93
    clear_ZF();
94
    BX_NEXT_INSTR(i);
95
  }
96
 
97
  parse_selector(raw_selector, &selector);
98
 
99
  if (!fetch_raw_descriptor2(&selector, &dword1, &dword2)) {
100
    BX_DEBUG(("LAR: failed to fetch descriptor"));
101
    clear_ZF();
102
    BX_NEXT_INSTR(i);
103
  }
104
 
105
  parse_descriptor(dword1, dword2, &descriptor);
106
 
107
  if (descriptor.valid==0) {
108
    BX_DEBUG(("LAR: descriptor not valid"));
109
    clear_ZF();
110
    BX_NEXT_INSTR(i);
111
  }
112
 
113
  /* if source selector is visible at CPL & RPL,
114
   * within the descriptor table, and of type accepted by LAR instruction,
115
   * then load register with segment limit and set ZF
116
   */
117
 
118
  if (descriptor.segment) { /* normal segment */
119
    if (IS_CODE_SEGMENT(descriptor.type) && IS_CODE_SEGMENT_CONFORMING(descriptor.type)) {
120
      /* ignore DPL for conforming segments */
121
    }
122
    else {
123
      if (descriptor.dpl < CPL || descriptor.dpl < selector.rpl) {
124
        clear_ZF();
125
        BX_NEXT_INSTR(i);
126
      }
127
    }
128
  }
129
  else { /* system or gate segment */
130
    switch (descriptor.type) {
131
      case BX_SYS_SEGMENT_AVAIL_286_TSS:
132
      case BX_SYS_SEGMENT_BUSY_286_TSS:
133
      case BX_286_CALL_GATE:
134
      case BX_TASK_GATE:
135
        if (long_mode()) {
136
          BX_DEBUG(("LAR: descriptor type in not accepted in long mode"));
137
          clear_ZF();
138
          BX_NEXT_INSTR(i);
139
        }
140
        /* fall through */
141
      case BX_SYS_SEGMENT_LDT:
142
      case BX_SYS_SEGMENT_AVAIL_386_TSS:
143
      case BX_SYS_SEGMENT_BUSY_386_TSS:
144
      case BX_386_CALL_GATE:
145
#if BX_SUPPORT_X86_64
146
        if (long64_mode() || (descriptor.type == BX_386_CALL_GATE && long_mode()) ) {
147
          if (!fetch_raw_descriptor2_64(&selector, &dword1, &dword2, &dword3)) {
148
            BX_ERROR(("LAR: failed to fetch 64-bit descriptor"));
149
            clear_ZF();
150
            BX_NEXT_INSTR(i);
151
          }
152
        }
153
#endif
154
        break;
155
      default: /* rest not accepted types to LAR */
156
        BX_DEBUG(("LAR: not accepted descriptor type"));
157
        clear_ZF();
158
        BX_NEXT_INSTR(i);
159
    }
160
 
161
    if (descriptor.dpl < CPL || descriptor.dpl < selector.rpl) {
162
      clear_ZF();
163
      BX_NEXT_INSTR(i);
164
    }
165
  }
166
 
167
  assert_ZF();
168
  if (i->os32L()) {
169
    /* masked by 00FxFF00, where x is undefined */
170
    BX_WRITE_32BIT_REGZ(i->dst(), dword2 & 0x00ffff00);
171
  }
172
  else {
173
    BX_WRITE_16BIT_REG(i->dst(), dword2 & 0xff00);
174
  }
175
 
176
  BX_NEXT_INSTR(i);
177
}
178
 
179
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LSL_GvEw(bxInstruction_c *i)
180
{
181
  /* for 16 bit operand size mode */
182
  Bit16u raw_selector;
183
  Bit32u limit32;
184
  bx_selector_t selector;
185
  Bit32u dword1, dword2;
186
#if BX_SUPPORT_X86_64
187
  Bit32u dword3 = 0;
188
#endif
189
 
190
  if (! protected_mode()) {
191
    BX_ERROR(("LSL: not recognized in real or virtual-8086 mode"));
192
    exception(BX_UD_EXCEPTION, 0);
193
  }
194
 
195
  if (i->modC0()) {
196
    raw_selector = BX_READ_16BIT_REG(i->src());
197
  }
198
  else {
199
    bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
200
    /* pointer, segment address pair */
201
    raw_selector = read_virtual_word(i->seg(), eaddr);
202
  }
203
 
204
  /* if selector null, clear ZF and done */
205
  if ((raw_selector & 0xfffc) == 0) {
206
    clear_ZF();
207
    BX_NEXT_INSTR(i);
208
  }
209
 
210
  parse_selector(raw_selector, &selector);
211
 
212
  if (!fetch_raw_descriptor2(&selector, &dword1, &dword2)) {
213
    BX_DEBUG(("LSL: failed to fetch descriptor"));
214
    clear_ZF();
215
    BX_NEXT_INSTR(i);
216
  }
217
 
218
  Bit32u descriptor_dpl = (dword2 >> 13) & 0x03;
219
 
220
  if ((dword2 & 0x00001000) == 0) { // system segment
221
    Bit32u type = (dword2 >> 8) & 0x0000000f;
222
    switch (type) {
223
      case BX_SYS_SEGMENT_AVAIL_286_TSS:
224
      case BX_SYS_SEGMENT_BUSY_286_TSS:
225
        if (long_mode()) {
226
          clear_ZF();
227
          BX_NEXT_INSTR(i);
228
        }
229
        /* fall through */
230
      case BX_SYS_SEGMENT_LDT:
231
      case BX_SYS_SEGMENT_AVAIL_386_TSS:
232
      case BX_SYS_SEGMENT_BUSY_386_TSS:
233
#if BX_SUPPORT_X86_64
234
        if (long64_mode()) {
235
          if (!fetch_raw_descriptor2_64(&selector, &dword1, &dword2, &dword3)) {
236
            BX_ERROR(("LSL: failed to fetch 64-bit descriptor"));
237
            clear_ZF();
238
            BX_NEXT_INSTR(i);
239
          }
240
        }
241
#endif
242
        if (descriptor_dpl < CPL || descriptor_dpl < selector.rpl) {
243
          clear_ZF();
244
          BX_NEXT_INSTR(i);
245
        }
246
        limit32 = (dword1 & 0x0000ffff) | (dword2 & 0x000f0000);
247
        if (dword2 & 0x00800000)
248
          limit32 = (limit32 << 12) | 0x00000fff;
249
        break;
250
      default: /* rest not accepted types to LSL */
251
        clear_ZF();
252
        BX_NEXT_INSTR(i);
253
    }
254
  }
255
  else { // data & code segment
256
    limit32 = (dword1 & 0x0000ffff) | (dword2 & 0x000f0000);
257
    if (dword2 & 0x00800000)
258
      limit32 = (limit32 << 12) | 0x00000fff;
259
    if ((dword2 & 0x00000c00) != 0x00000c00) {
260
      // non-conforming code segment
261
      if (descriptor_dpl < CPL || descriptor_dpl < selector.rpl) {
262
        clear_ZF();
263
        BX_NEXT_INSTR(i);
264
      }
265
    }
266
  }
267
 
268
  /* all checks pass, limit32 is now byte granular, write to op1 */
269
  assert_ZF();
270
 
271
  if (i->os32L()) {
272
    BX_WRITE_32BIT_REGZ(i->dst(), limit32);
273
  }
274
  else {
275
    // chop off upper 16 bits
276
    BX_WRITE_16BIT_REG(i->dst(), (Bit16u) limit32);
277
  }
278
 
279
  BX_NEXT_INSTR(i);
280
}
281
 
282
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SLDT_Ew(bxInstruction_c *i)
283
{
284
  if (! protected_mode()) {
285
    BX_ERROR(("SLDT: not recognized in real or virtual-8086 mode"));
286
    exception(BX_UD_EXCEPTION, 0);
287
  }
288
 
289
#if BX_SUPPORT_VMX >= 2
290
  if (BX_CPU_THIS_PTR in_vmx_guest)
291
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
292
      VMexit_Instruction(i, VMX_VMEXIT_LDTR_TR_ACCESS, BX_WRITE);
293
#endif
294
 
295
#if BX_SUPPORT_SVM
296
  if (BX_CPU_THIS_PTR in_svm_guest) {
297
    if (SVM_INTERCEPT(SVM_INTERCEPT0_LDTR_READ)) Svm_Vmexit(SVM_VMEXIT_LDTR_READ);
298
  }
299
#endif
300
 
301
  Bit16u val16 = BX_CPU_THIS_PTR ldtr.selector.value;
302
  if (i->modC0()) {
303
    if (i->os32L()) {
304
      BX_WRITE_32BIT_REGZ(i->dst(), val16);
305
    }
306
    else {
307
      BX_WRITE_16BIT_REG(i->dst(), val16);
308
    }
309
  }
310
  else {
311
    bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
312
    /* pointer, segment address pair */
313
    write_virtual_word(i->seg(), eaddr, val16);
314
  }
315
 
316
  BX_NEXT_INSTR(i);
317
}
318
 
319
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::STR_Ew(bxInstruction_c *i)
320
{
321
  if (! protected_mode()) {
322
    BX_ERROR(("STR: not recognized in real or virtual-8086 mode"));
323
    exception(BX_UD_EXCEPTION, 0);
324
  }
325
 
326
#if BX_SUPPORT_VMX >= 2
327
  if (BX_CPU_THIS_PTR in_vmx_guest)
328
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
329
      VMexit_Instruction(i, VMX_VMEXIT_LDTR_TR_ACCESS, BX_WRITE);
330
#endif
331
 
332
#if BX_SUPPORT_SVM
333
  if (BX_CPU_THIS_PTR in_svm_guest) {
334
    if (SVM_INTERCEPT(SVM_INTERCEPT0_TR_READ)) Svm_Vmexit(SVM_VMEXIT_TR_READ);
335
  }
336
#endif
337
 
338
  Bit16u val16 = BX_CPU_THIS_PTR tr.selector.value;
339
  if (i->modC0()) {
340
    if (i->os32L()) {
341
      BX_WRITE_32BIT_REGZ(i->dst(), val16);
342
    }
343
    else {
344
      BX_WRITE_16BIT_REG(i->dst(), val16);
345
    }
346
  }
347
  else {
348
    bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
349
    /* pointer, segment address pair */
350
    write_virtual_word(i->seg(), eaddr, val16);
351
  }
352
 
353
  BX_NEXT_INSTR(i);
354
}
355
 
356
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LLDT_Ew(bxInstruction_c *i)
357
{
358
  /* protected mode */
359
  bx_descriptor_t  descriptor;
360
  bx_selector_t    selector;
361
  Bit16u raw_selector;
362
  Bit32u dword1, dword2;
363
#if BX_SUPPORT_X86_64
364
  Bit32u dword3 = 0;
365
#endif
366
 
367
  if (! protected_mode()) {
368
    BX_ERROR(("LLDT: not recognized in real or virtual-8086 mode"));
369
    exception(BX_UD_EXCEPTION, 0);
370
  }
371
 
372
  if (CPL != 0) {
373
    BX_ERROR(("LLDT: The current priveledge level is not 0"));
374
    exception(BX_GP_EXCEPTION, 0);
375
  }
376
 
377
#if BX_SUPPORT_VMX >= 2
378
  if (BX_CPU_THIS_PTR in_vmx_guest)
379
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
380
      VMexit_Instruction(i, VMX_VMEXIT_LDTR_TR_ACCESS, BX_READ);
381
#endif
382
 
383
#if BX_SUPPORT_SVM
384
  if (BX_CPU_THIS_PTR in_svm_guest) {
385
    if (SVM_INTERCEPT(SVM_INTERCEPT0_LDTR_WRITE)) Svm_Vmexit(SVM_VMEXIT_LDTR_WRITE);
386
  }
387
#endif
388
 
389
  if (i->modC0()) {
390
    raw_selector = BX_READ_16BIT_REG(i->src());
391
  }
392
  else {
393
    bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
394
    /* pointer, segment address pair */
395
    raw_selector = read_virtual_word(i->seg(), eaddr);
396
  }
397
 
398
  /* if selector is NULL, invalidate and done */
399
  if ((raw_selector & 0xfffc) == 0) {
400
    BX_CPU_THIS_PTR ldtr.selector.value = raw_selector;
401
    BX_CPU_THIS_PTR ldtr.cache.valid = 0;
402
    BX_NEXT_INSTR(i);
403
  }
404
 
405
  /* parse fields in selector */
406
  parse_selector(raw_selector, &selector);
407
 
408
  // #GP(selector) if the selector operand does not point into GDT
409
  if (selector.ti != 0) {
410
    BX_ERROR(("LLDT: selector.ti != 0"));
411
    exception(BX_GP_EXCEPTION, raw_selector & 0xfffc);
412
  }
413
 
414
  /* fetch descriptor; call handles out of limits checks */
415
#if BX_SUPPORT_X86_64
416
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
417
    fetch_raw_descriptor_64(&selector, &dword1, &dword2, &dword3, BX_GP_EXCEPTION);
418
  }
419
  else
420
#endif
421
  {
422
    fetch_raw_descriptor(&selector, &dword1, &dword2, BX_GP_EXCEPTION);
423
  }
424
 
425
  parse_descriptor(dword1, dword2, &descriptor);
426
 
427
  /* if selector doesn't point to an LDT descriptor #GP(selector) */
428
  if (descriptor.valid == 0 || descriptor.segment ||
429
         descriptor.type != BX_SYS_SEGMENT_LDT)
430
  {
431
    BX_ERROR(("LLDT: doesn't point to an LDT descriptor!"));
432
    exception(BX_GP_EXCEPTION, raw_selector & 0xfffc);
433
  }
434
 
435
  /* #NP(selector) if LDT descriptor is not present */
436
  if (! IS_PRESENT(descriptor)) {
437
    BX_ERROR(("LLDT: LDT descriptor not present!"));
438
    exception(BX_NP_EXCEPTION, raw_selector & 0xfffc);
439
  }
440
 
441
#if BX_SUPPORT_X86_64
442
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
443
    descriptor.u.segment.base |= ((Bit64u)(dword3) << 32);
444
    BX_DEBUG(("64 bit LDT base = 0x%08x%08x",
445
       GET32H(descriptor.u.segment.base), GET32L(descriptor.u.segment.base)));
446
    if (!IsCanonical(descriptor.u.segment.base)) {
447
      BX_ERROR(("LLDT: non-canonical LDT descriptor base!"));
448
      exception(BX_GP_EXCEPTION, raw_selector & 0xfffc);
449
    }
450
  }
451
#endif
452
 
453
  BX_CPU_THIS_PTR ldtr.selector = selector;
454
  BX_CPU_THIS_PTR ldtr.cache = descriptor;
455
  BX_CPU_THIS_PTR ldtr.cache.valid = 1;
456
 
457
  BX_NEXT_INSTR(i);
458
}
459
 
460
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LTR_Ew(bxInstruction_c *i)
461
{
462
  bx_descriptor_t descriptor;
463
  bx_selector_t selector;
464
  Bit16u raw_selector;
465
  Bit32u dword1, dword2;
466
#if BX_SUPPORT_X86_64
467
  Bit32u dword3 = 0;
468
#endif
469
 
470
  if (! protected_mode()) {
471
    BX_ERROR(("LTR: not recognized in real or virtual-8086 mode"));
472
    exception(BX_UD_EXCEPTION, 0);
473
  }
474
 
475
  if (CPL != 0) {
476
    BX_ERROR(("LTR: The current priveledge level is not 0"));
477
    exception(BX_GP_EXCEPTION, 0);
478
  }
479
 
480
#if BX_SUPPORT_VMX >= 2
481
  if (BX_CPU_THIS_PTR in_vmx_guest)
482
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
483
      VMexit_Instruction(i, VMX_VMEXIT_LDTR_TR_ACCESS, BX_READ);
484
#endif
485
 
486
#if BX_SUPPORT_SVM
487
  if (BX_CPU_THIS_PTR in_svm_guest) {
488
    if (SVM_INTERCEPT(SVM_INTERCEPT0_TR_WRITE)) Svm_Vmexit(SVM_VMEXIT_TR_WRITE);
489
  }
490
#endif
491
 
492
  if (i->modC0()) {
493
    raw_selector = BX_READ_16BIT_REG(i->src());
494
  }
495
  else {
496
    bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
497
    /* pointer, segment address pair */
498
    raw_selector = read_virtual_word(i->seg(), eaddr);
499
  }
500
 
501
  /* if selector is NULL, invalidate and done */
502
  if ((raw_selector & BX_SELECTOR_RPL_MASK) == 0) {
503
    BX_ERROR(("LTR: loading with NULL selector!"));
504
    exception(BX_GP_EXCEPTION, 0);
505
  }
506
 
507
  /* parse fields in selector, then check for null selector */
508
  parse_selector(raw_selector, &selector);
509
 
510
  if (selector.ti) {
511
    BX_ERROR(("LTR: selector.ti != 0"));
512
    exception(BX_GP_EXCEPTION, raw_selector & 0xfffc);
513
  }
514
 
515
  /* fetch descriptor; call handles out of limits checks */
516
#if BX_SUPPORT_X86_64
517
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
518
    fetch_raw_descriptor_64(&selector, &dword1, &dword2, &dword3, BX_GP_EXCEPTION);
519
  }
520
  else
521
#endif
522
  {
523
    fetch_raw_descriptor(&selector, &dword1, &dword2, BX_GP_EXCEPTION);
524
  }
525
 
526
  parse_descriptor(dword1, dword2, &descriptor);
527
 
528
  /* #GP(selector) if object is not a TSS or is already busy */
529
  if (descriptor.valid==0 || descriptor.segment ||
530
         (descriptor.type!=BX_SYS_SEGMENT_AVAIL_286_TSS &&
531
          descriptor.type!=BX_SYS_SEGMENT_AVAIL_386_TSS))
532
  {
533
    BX_ERROR(("LTR: doesn't point to an available TSS descriptor!"));
534
    exception(BX_GP_EXCEPTION, raw_selector & 0xfffc);
535
  }
536
 
537
#if BX_SUPPORT_X86_64
538
  if (long_mode() && descriptor.type!=BX_SYS_SEGMENT_AVAIL_386_TSS) {
539
    BX_ERROR(("LTR: doesn't point to an available TSS386 descriptor in long mode!"));
540
    exception(BX_GP_EXCEPTION, raw_selector & 0xfffc);
541
  }
542
#endif
543
 
544
  /* #NP(selector) if TSS descriptor is not present */
545
  if (! IS_PRESENT(descriptor)) {
546
    BX_ERROR(("LTR: TSS descriptor not present!"));
547
    exception(BX_NP_EXCEPTION, raw_selector & 0xfffc);
548
  }
549
 
550
#if BX_SUPPORT_X86_64
551
  if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
552
    descriptor.u.segment.base |= ((Bit64u)(dword3) << 32);
553
    BX_DEBUG(("64 bit TSS base = 0x%08x%08x",
554
       GET32H(descriptor.u.segment.base), GET32L(descriptor.u.segment.base)));
555
    if (!IsCanonical(descriptor.u.segment.base)) {
556
      BX_ERROR(("LTR: non-canonical TSS descriptor base!"));
557
      exception(BX_GP_EXCEPTION, raw_selector & 0xfffc);
558
    }
559
  }
560
#endif
561
 
562
  BX_CPU_THIS_PTR tr.selector = selector;
563
  BX_CPU_THIS_PTR tr.cache    = descriptor;
564
  BX_CPU_THIS_PTR tr.cache.valid = 1;
565
  // tr.cache.type should not have busy bit, or it would not get
566
  // through the conditions above.
567
  BX_ASSERT((BX_CPU_THIS_PTR tr.cache.type & 2) == 0);
568
  BX_CPU_THIS_PTR tr.cache.type |= 2; // mark as busy
569
 
570
  /* mark as busy */
571
  if (!(dword2 & 0x0200)) {
572
    dword2 |= 0x0200; /* set busy bit */
573
    access_write_linear(BX_CPU_THIS_PTR gdtr.base + selector.index*8 + 4, 4, 0, &dword2);
574
  }
575
 
576
  BX_NEXT_INSTR(i);
577
}
578
 
579
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VERR_Ew(bxInstruction_c *i)
580
{
581
  /* for 16 bit operand size mode */
582
  Bit16u raw_selector;
583
  bx_descriptor_t descriptor;
584
  bx_selector_t selector;
585
  Bit32u dword1, dword2;
586
 
587
  if (! protected_mode()) {
588
    BX_ERROR(("VERR: not recognized in real or virtual-8086 mode"));
589
    exception(BX_UD_EXCEPTION, 0);
590
  }
591
 
592
  if (i->modC0()) {
593
    raw_selector = BX_READ_16BIT_REG(i->src());
594
  }
595
  else {
596
    bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
597
    /* pointer, segment address pair */
598
    raw_selector = read_virtual_word(i->seg(), eaddr);
599
  }
600
 
601
  /* if selector null, clear ZF and done */
602
  if ((raw_selector & 0xfffc) == 0) {
603
    BX_DEBUG(("VERR: null selector"));
604
    clear_ZF();
605
    BX_NEXT_INSTR(i);
606
  }
607
 
608
  /* if source selector is visible at CPL & RPL,
609
   * within the descriptor table, and of type accepted by VERR instruction,
610
   * then load register with segment limit and set ZF */
611
  parse_selector(raw_selector, &selector);
612
 
613
  if (!fetch_raw_descriptor2(&selector, &dword1, &dword2)) {
614
    /* not within descriptor table */
615
    BX_DEBUG(("VERR: not within descriptor table"));
616
    clear_ZF();
617
    BX_NEXT_INSTR(i);
618
  }
619
 
620
  parse_descriptor(dword1, dword2, &descriptor);
621
 
622
  if (descriptor.segment==0) { /* system or gate descriptor */
623
    BX_DEBUG(("VERR: system descriptor"));
624
    clear_ZF();  /* inaccessible */
625
    BX_NEXT_INSTR(i);
626
  }
627
 
628
  if (descriptor.valid==0) {
629
    BX_DEBUG(("VERR: valid bit cleared"));
630
    clear_ZF();  /* inaccessible */
631
    BX_NEXT_INSTR(i);
632
  }
633
 
634
  /* normal data/code segment */
635
  if (IS_CODE_SEGMENT(descriptor.type)) { /* code segment */
636
    /* ignore DPL for readable conforming segments */
637
    if (IS_CODE_SEGMENT_CONFORMING(descriptor.type) &&
638
        IS_CODE_SEGMENT_READABLE(descriptor.type))
639
    {
640
      BX_DEBUG(("VERR: conforming code, OK"));
641
      assert_ZF(); /* accessible */
642
      BX_NEXT_INSTR(i);
643
    }
644
    if (!IS_CODE_SEGMENT_READABLE(descriptor.type)) {
645
      BX_DEBUG(("VERR: code not readable"));
646
      clear_ZF();  /* inaccessible */
647
      BX_NEXT_INSTR(i);
648
    }
649
    /* readable, non-conforming code segment */
650
    if ((descriptor.dpl<CPL) || (descriptor.dpl<selector.rpl)) {
651
      BX_DEBUG(("VERR: non-conforming code not withing priv level"));
652
      clear_ZF();  /* inaccessible */
653
    }
654
    else {
655
      assert_ZF(); /* accessible */
656
    }
657
  }
658
  else { /* data segment */
659
    if ((descriptor.dpl<CPL) || (descriptor.dpl<selector.rpl)) {
660
      BX_DEBUG(("VERR: data seg not withing priv level"));
661
      clear_ZF(); /* not accessible */
662
    }
663
    else {
664
      assert_ZF(); /* accessible */
665
    }
666
  }
667
 
668
  BX_NEXT_INSTR(i);
669
}
670
 
671
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VERW_Ew(bxInstruction_c *i)
672
{
673
  /* for 16 bit operand size mode */
674
  Bit16u raw_selector;
675
  bx_descriptor_t descriptor;
676
  bx_selector_t selector;
677
  Bit32u dword1, dword2;
678
 
679
  if (! protected_mode()) {
680
    BX_ERROR(("VERW: not recognized in real or virtual-8086 mode"));
681
    exception(BX_UD_EXCEPTION, 0);
682
  }
683
 
684
  if (i->modC0()) {
685
    raw_selector = BX_READ_16BIT_REG(i->src());
686
  }
687
  else {
688
    bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
689
    /* pointer, segment address pair */
690
    raw_selector = read_virtual_word(i->seg(), eaddr);
691
  }
692
 
693
  /* if selector null, clear ZF and done */
694
  if ((raw_selector & 0xfffc) == 0) {
695
    BX_DEBUG(("VERW: null selector"));
696
    clear_ZF();
697
    BX_NEXT_INSTR(i);
698
  }
699
 
700
  /* if source selector is visible at CPL & RPL,
701
   * within the descriptor table, and of type accepted by VERW instruction,
702
   * then load register with segment limit and set ZF */
703
  parse_selector(raw_selector, &selector);
704
 
705
  if (!fetch_raw_descriptor2(&selector, &dword1, &dword2)) {
706
    /* not within descriptor table */
707
    BX_DEBUG(("VERW: not within descriptor table"));
708
    clear_ZF();
709
    BX_NEXT_INSTR(i);
710
  }
711
 
712
  parse_descriptor(dword1, dword2, &descriptor);
713
 
714
  /* rule out system segments & code segments */
715
  if (descriptor.segment==0 || IS_CODE_SEGMENT(descriptor.type)) {
716
    BX_DEBUG(("VERW: system seg or code"));
717
    clear_ZF();
718
    BX_NEXT_INSTR(i);
719
  }
720
 
721
  if (descriptor.valid==0) {
722
    BX_DEBUG(("VERW: valid bit cleared"));
723
    clear_ZF();
724
    BX_NEXT_INSTR(i);
725
  }
726
 
727
  /* data segment */
728
  if (IS_DATA_SEGMENT_WRITEABLE(descriptor.type)) { /* writable */
729
    if ((descriptor.dpl<CPL) || (descriptor.dpl<selector.rpl)) {
730
      BX_DEBUG(("VERW: writable data seg not within priv level"));
731
      clear_ZF(); /* not accessible */
732
    }
733
    else {
734
      assert_ZF();  /* accessible */
735
    }
736
  }
737
  else {
738
    BX_DEBUG(("VERW: data seg not writable"));
739
    clear_ZF(); /* not accessible */
740
  }
741
 
742
  BX_NEXT_INSTR(i);
743
}
744
 
745
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SGDT_Ms(bxInstruction_c *i)
746
{
747
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
748
 
749
#if BX_SUPPORT_VMX >= 2
750
  if (BX_CPU_THIS_PTR in_vmx_guest)
751
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
752
      VMexit_Instruction(i, VMX_VMEXIT_GDTR_IDTR_ACCESS, BX_WRITE);
753
#endif
754
 
755
#if BX_SUPPORT_SVM
756
  if (BX_CPU_THIS_PTR in_svm_guest) {
757
    if (SVM_INTERCEPT(SVM_INTERCEPT0_GDTR_READ)) Svm_Vmexit(SVM_VMEXIT_GDTR_READ);
758
  }
759
#endif
760
 
761
  Bit16u limit_16 = BX_CPU_THIS_PTR gdtr.limit;
762
  Bit32u base_32  = (Bit32u) BX_CPU_THIS_PTR gdtr.base;
763
 
764
  Bit32u eaddr = (Bit32u) BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
765
 
766
  write_virtual_word_32(i->seg(), eaddr, limit_16);
767
  write_virtual_dword_32(i->seg(), (eaddr+2) & i->asize_mask(), base_32);
768
 
769
  BX_NEXT_INSTR(i);
770
}
771
 
772
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SIDT_Ms(bxInstruction_c *i)
773
{
774
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
775
 
776
#if BX_SUPPORT_VMX >= 2
777
  if (BX_CPU_THIS_PTR in_vmx_guest)
778
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
779
      VMexit_Instruction(i, VMX_VMEXIT_GDTR_IDTR_ACCESS, BX_WRITE);
780
#endif
781
 
782
#if BX_SUPPORT_SVM
783
  if (BX_CPU_THIS_PTR in_svm_guest) {
784
    if (SVM_INTERCEPT(SVM_INTERCEPT0_IDTR_READ)) Svm_Vmexit(SVM_VMEXIT_IDTR_READ);
785
  }
786
#endif
787
 
788
  Bit16u limit_16 = BX_CPU_THIS_PTR idtr.limit;
789
  Bit32u base_32  = (Bit32u) BX_CPU_THIS_PTR idtr.base;
790
 
791
  Bit32u eaddr = (Bit32u) BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
792
 
793
  write_virtual_word_32(i->seg(), eaddr, limit_16);
794
  write_virtual_dword_32(i->seg(), (eaddr+2) & i->asize_mask(), base_32);
795
 
796
  BX_NEXT_INSTR(i);
797
}
798
 
799
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LGDT_Ms(bxInstruction_c *i)
800
{
801
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
802
 
803
  // CPL is always 0 is real mode
804
  if (/* !real_mode() && */ CPL!=0) {
805
    BX_ERROR(("LGDT: CPL != 0 causes #GP"));
806
    exception(BX_GP_EXCEPTION, 0);
807
  }
808
 
809
#if BX_SUPPORT_VMX >= 2
810
  if (BX_CPU_THIS_PTR in_vmx_guest)
811
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
812
      VMexit_Instruction(i, VMX_VMEXIT_GDTR_IDTR_ACCESS, BX_READ);
813
#endif
814
 
815
#if BX_SUPPORT_SVM
816
  if (BX_CPU_THIS_PTR in_svm_guest) {
817
    if (SVM_INTERCEPT(SVM_INTERCEPT0_GDTR_WRITE)) Svm_Vmexit(SVM_VMEXIT_GDTR_WRITE);
818
  }
819
#endif
820
 
821
  Bit32u eaddr = (Bit32u) BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
822
 
823
  Bit16u limit_16 = read_virtual_word_32(i->seg(), eaddr);
824
  Bit32u base_32 = read_virtual_dword_32(i->seg(), (eaddr + 2) & i->asize_mask());
825
 
826
  if (i->os32L() == 0) base_32 &= 0x00ffffff; /* ignore upper 8 bits */
827
 
828
  BX_CPU_THIS_PTR gdtr.limit = limit_16;
829
  BX_CPU_THIS_PTR gdtr.base = base_32;
830
 
831
  BX_NEXT_INSTR(i);
832
}
833
 
834
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LIDT_Ms(bxInstruction_c *i)
835
{
836
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
837
 
838
  // CPL is always 0 is real mode
839
  if (/* !real_mode() && */ CPL!=0) {
840
    BX_ERROR(("LIDT: CPL != 0 causes #GP"));
841
    exception(BX_GP_EXCEPTION, 0);
842
  }
843
 
844
#if BX_SUPPORT_VMX >= 2
845
  if (BX_CPU_THIS_PTR in_vmx_guest)
846
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
847
      VMexit_Instruction(i, VMX_VMEXIT_GDTR_IDTR_ACCESS, BX_READ);
848
#endif
849
 
850
#if BX_SUPPORT_SVM
851
  if (BX_CPU_THIS_PTR in_svm_guest) {
852
    if (SVM_INTERCEPT(SVM_INTERCEPT0_IDTR_WRITE)) Svm_Vmexit(SVM_VMEXIT_IDTR_WRITE);
853
  }
854
#endif
855
 
856
  Bit32u eaddr = (Bit32u) BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
857
 
858
  Bit16u limit_16 = read_virtual_word_32(i->seg(), eaddr);
859
  Bit32u base_32 = read_virtual_dword_32(i->seg(), (eaddr + 2) & i->asize_mask());
860
 
861
  if (i->os32L() == 0) base_32 &= 0x00ffffff; /* ignore upper 8 bits */
862
 
863
  BX_CPU_THIS_PTR idtr.limit = limit_16;
864
  BX_CPU_THIS_PTR idtr.base = base_32;
865
 
866
  BX_NEXT_INSTR(i);
867
}
868
 
869
#if BX_SUPPORT_X86_64
870
 
871
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SGDT64_Ms(bxInstruction_c *i)
872
{
873
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
874
 
875
#if BX_SUPPORT_VMX >= 2
876
  if (BX_CPU_THIS_PTR in_vmx_guest)
877
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
878
      VMexit_Instruction(i, VMX_VMEXIT_GDTR_IDTR_ACCESS, BX_WRITE);
879
#endif
880
 
881
#if BX_SUPPORT_SVM
882
  if (BX_CPU_THIS_PTR in_svm_guest) {
883
    if (SVM_INTERCEPT(SVM_INTERCEPT0_GDTR_READ)) Svm_Vmexit(SVM_VMEXIT_GDTR_READ);
884
  }
885
#endif
886
 
887
  Bit16u limit_16 = BX_CPU_THIS_PTR gdtr.limit;
888
  Bit64u base_64  = BX_CPU_THIS_PTR gdtr.base;
889
 
890
  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
891
 
892
  write_virtual_word_64(i->seg(), eaddr, limit_16);
893
  write_virtual_qword_64(i->seg(), (eaddr+2) & i->asize_mask(), base_64);
894
 
895
  BX_NEXT_INSTR(i);
896
}
897
 
898
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::SIDT64_Ms(bxInstruction_c *i)
899
{
900
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
901
 
902
#if BX_SUPPORT_VMX >= 2
903
  if (BX_CPU_THIS_PTR in_vmx_guest)
904
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
905
      VMexit_Instruction(i, VMX_VMEXIT_GDTR_IDTR_ACCESS, BX_WRITE);
906
#endif
907
 
908
#if BX_SUPPORT_SVM
909
  if (BX_CPU_THIS_PTR in_svm_guest) {
910
    if (SVM_INTERCEPT(SVM_INTERCEPT0_IDTR_READ)) Svm_Vmexit(SVM_VMEXIT_IDTR_READ);
911
  }
912
#endif
913
 
914
  Bit16u limit_16 = BX_CPU_THIS_PTR idtr.limit;
915
  Bit64u base_64  = BX_CPU_THIS_PTR idtr.base;
916
 
917
  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
918
 
919
  write_virtual_word_64(i->seg(), eaddr, limit_16);
920
  write_virtual_qword_64(i->seg(), (eaddr+2) & i->asize_mask(), base_64);
921
 
922
  BX_NEXT_INSTR(i);
923
}
924
 
925
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LGDT64_Ms(bxInstruction_c *i)
926
{
927
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
928
 
929
  if (CPL!=0) {
930
    BX_ERROR(("LGDT64_Ms: CPL != 0 in long mode"));
931
    exception(BX_GP_EXCEPTION, 0);
932
  }
933
 
934
#if BX_SUPPORT_VMX >= 2
935
  if (BX_CPU_THIS_PTR in_vmx_guest)
936
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
937
      VMexit_Instruction(i, VMX_VMEXIT_GDTR_IDTR_ACCESS, BX_READ);
938
#endif
939
 
940
#if BX_SUPPORT_SVM
941
  if (BX_CPU_THIS_PTR in_svm_guest) {
942
    if (SVM_INTERCEPT(SVM_INTERCEPT0_GDTR_WRITE)) Svm_Vmexit(SVM_VMEXIT_GDTR_WRITE);
943
  }
944
#endif
945
 
946
  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
947
 
948
  Bit64u base_64 = read_virtual_qword_64(i->seg(), (eaddr + 2) & i->asize_mask());
949
  if (! IsCanonical(base_64)) {
950
    BX_ERROR(("LGDT64_Ms: loaded base64 address is not in canonical form!"));
951
    exception(BX_GP_EXCEPTION, 0);
952
  }
953
  Bit16u limit_16 = read_virtual_word_64(i->seg(), eaddr);
954
 
955
  BX_CPU_THIS_PTR gdtr.limit = limit_16;
956
  BX_CPU_THIS_PTR gdtr.base = base_64;
957
 
958
  BX_NEXT_INSTR(i);
959
}
960
 
961
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LIDT64_Ms(bxInstruction_c *i)
962
{
963
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
964
 
965
  if (CPL != 0) {
966
    BX_ERROR(("LIDT64_Ms: CPL != 0 in long mode"));
967
    exception(BX_GP_EXCEPTION, 0);
968
  }
969
 
970
#if BX_SUPPORT_VMX >= 2
971
  if (BX_CPU_THIS_PTR in_vmx_guest)
972
    if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT))
973
      VMexit_Instruction(i, VMX_VMEXIT_GDTR_IDTR_ACCESS, BX_READ);
974
#endif
975
 
976
#if BX_SUPPORT_SVM
977
  if (BX_CPU_THIS_PTR in_svm_guest) {
978
    if (SVM_INTERCEPT(SVM_INTERCEPT0_IDTR_WRITE)) Svm_Vmexit(SVM_VMEXIT_IDTR_WRITE);
979
  }
980
#endif
981
 
982
  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
983
 
984
  Bit64u base_64 = read_virtual_qword_64(i->seg(), (eaddr + 2) & i->asize_mask());
985
  if (! IsCanonical(base_64)) {
986
    BX_ERROR(("LIDT64_Ms: loaded base64 address is not in canonical form!"));
987
    exception(BX_GP_EXCEPTION, 0);
988
  }
989
  Bit16u limit_16 = read_virtual_word_64(i->seg(), eaddr);
990
 
991
  BX_CPU_THIS_PTR idtr.limit = limit_16;
992
  BX_CPU_THIS_PTR idtr.base = base_64;
993
 
994
  BX_NEXT_INSTR(i);
995
}
996
 
997
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.