OpenCores
URL https://opencores.org/ocsvn/ao486/ao486/trunk

Subversion Repositories ao486

[/] [ao486/] [trunk/] [bochs486/] [cpu/] [access32.cc] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 alfik
/////////////////////////////////////////////////////////////////////////
2
// $Id: access32.cc 11580 2013-01-19 20:45:03Z sshwarts $
3
/////////////////////////////////////////////////////////////////////////
4
//
5
//   Copyright (c) 2008-2013 Stanislav Shwartsman
6
//          Written by Stanislav Shwartsman [sshwarts at sourceforge net]
7
//
8
//  This library is free software; you can redistribute it and/or
9
//  modify it under the terms of the GNU Lesser General Public
10
//  License as published by the Free Software Foundation; either
11
//  version 2 of the License, or (at your option) any later version.
12
//
13
//  This library is distributed in the hope that it will be useful,
14
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
15
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
//  Lesser General Public License for more details.
17
//
18
//  You should have received a copy of the GNU Lesser General Public
19
//  License along with this library; if not, write to the Free Software
20
//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
21
//
22
/////////////////////////////////////////////////////////////////////////
23
 
24
#define NEED_CPU_REG_SHORTCUTS 1
25
#include "bochs.h"
26
#include "cpu.h"
27
#define LOG_THIS BX_CPU_THIS_PTR
28
 
29
  void BX_CPP_AttrRegparmN(3)
30
BX_CPU_C::write_virtual_byte_32(unsigned s, Bit32u offset, Bit8u data)
31
{
32
  Bit32u laddr;
33
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
34
 
35
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
36
 
37
  if (seg->cache.valid & SegAccessWOK) {
38
    if (offset <= seg->cache.u.segment.limit_scaled) {
39
accessOK:
40
      laddr = get_laddr32(s, offset);
41
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
42
      Bit32u lpf = LPFOf(laddr);
43
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
44
      if (tlbEntry->lpf == lpf) {
45
        // See if the TLB entry privilege level allows us write access
46
        // from this CPL.
47
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
48
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
49
          Bit32u pageOffset = PAGE_OFFSET(laddr);
50
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
51
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 1, CPL, BX_WRITE, (Bit8u*) &data);
52
          Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
53
          pageWriteStampTable.decWriteStamp(pAddr, 1);
54
          *hostAddr = data;
55
          return;
56
        }
57
      }
58
      access_write_linear(laddr, 1, CPL, (void *) &data);
59
      return;
60
    }
61
    else {
62
      BX_ERROR(("write_virtual_byte_32(): segment limit violation"));
63
      exception(int_number(s), 0);
64
    }
65
  }
66
 
67
  if (!write_virtual_checks(seg, offset, 1))
68
    exception(int_number(s), 0);
69
  goto accessOK;
70
}
71
 
72
  void BX_CPP_AttrRegparmN(3)
73
BX_CPU_C::write_virtual_word_32(unsigned s, Bit32u offset, Bit16u data)
74
{
75
  Bit32u laddr;
76
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
77
 
78
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
79
 
80
  if (seg->cache.valid & SegAccessWOK) {
81
    if (offset < seg->cache.u.segment.limit_scaled) {
82
accessOK:
83
      laddr = get_laddr32(s, offset);
84
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
85
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
86
      Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
87
#else
88
      Bit32u lpf = LPFOf(laddr);
89
#endif    
90
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
91
      if (tlbEntry->lpf == lpf) {
92
        // See if the TLB entry privilege level allows us write access
93
        // from this CPL.
94
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
95
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
96
          Bit32u pageOffset = PAGE_OFFSET(laddr);
97
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
98
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 2, CPL, BX_WRITE, (Bit8u*) &data);
99
          Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
100
          pageWriteStampTable.decWriteStamp(pAddr, 2);
101
          WriteHostWordToLittleEndian(hostAddr, data);
102
          return;
103
        }
104
      }
105
 
106
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
107
      if (BX_CPU_THIS_PTR alignment_check()) {
108
        if (laddr & 1) {
109
          BX_ERROR(("write_virtual_word_32(): #AC misaligned access"));
110
          exception(BX_AC_EXCEPTION, 0);
111
        }
112
      }
113
#endif
114
 
115
      access_write_linear(laddr, 2, CPL, (void *) &data);
116
      return;
117
    }
118
    else {
119
      BX_ERROR(("write_virtual_word_32(): segment limit violation"));
120
      exception(int_number(s), 0);
121
    }
122
  }
123
 
124
  if (!write_virtual_checks(seg, offset, 2))
125
    exception(int_number(s), 0);
126
  goto accessOK;
127
}
128
 
129
  void BX_CPP_AttrRegparmN(3)
130
BX_CPU_C::write_virtual_dword_32(unsigned s, Bit32u offset, Bit32u data)
131
{
132
  Bit32u laddr;
133
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
134
 
135
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
136
 
137
  if (seg->cache.valid & SegAccessWOK) {
138
    if (offset < (seg->cache.u.segment.limit_scaled-2)) {
139
accessOK:
140
      laddr = get_laddr32(s, offset);
141
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
142
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
143
      Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
144
#else
145
      Bit32u lpf = LPFOf(laddr);
146
#endif    
147
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
148
      if (tlbEntry->lpf == lpf) {
149
        // See if the TLB entry privilege level allows us write access
150
        // from this CPL.
151
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
152
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
153
          Bit32u pageOffset = PAGE_OFFSET(laddr);
154
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
155
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 4, CPL, BX_WRITE, (Bit8u*) &data);
156
          Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
157
          pageWriteStampTable.decWriteStamp(pAddr, 4);
158
          WriteHostDWordToLittleEndian(hostAddr, data);
159
          return;
160
        }
161
      }
162
 
163
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
164
      if (BX_CPU_THIS_PTR alignment_check()) {
165
        if (laddr & 3) {
166
          BX_ERROR(("write_virtual_dword_32(): #AC misaligned access"));
167
          exception(BX_AC_EXCEPTION, 0);
168
        }
169
      }
170
#endif
171
 
172
      access_write_linear(laddr, 4, CPL, (void *) &data);
173
      return;
174
    }
175
    else {
176
      BX_ERROR(("write_virtual_dword_32(): segment limit violation"));
177
      exception(int_number(s), 0);
178
    }
179
  }
180
 
181
  if (!write_virtual_checks(seg, offset, 4))
182
    exception(int_number(s), 0);
183
  goto accessOK;
184
}
185
 
186
  void BX_CPP_AttrRegparmN(3)
187
BX_CPU_C::write_virtual_qword_32(unsigned s, Bit32u offset, Bit64u data)
188
{
189
  Bit32u laddr;
190
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
191
 
192
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
193
 
194
  if (seg->cache.valid & SegAccessWOK) {
195
    if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
196
accessOK:
197
      laddr = get_laddr32(s, offset);
198
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
199
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
200
      Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
201
#else
202
      Bit32u lpf = LPFOf(laddr);
203
#endif    
204
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
205
      if (tlbEntry->lpf == lpf) {
206
        // See if the TLB entry privilege level allows us write access
207
        // from this CPL.
208
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
209
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
210
          Bit32u pageOffset = PAGE_OFFSET(laddr);
211
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
212
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 8, CPL, BX_WRITE, (Bit8u*) &data);
213
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
214
          pageWriteStampTable.decWriteStamp(pAddr, 8);
215
          WriteHostQWordToLittleEndian(hostAddr, data);
216
          return;
217
        }
218
      }
219
 
220
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
221
      if (BX_CPU_THIS_PTR alignment_check()) {
222
        if (laddr & 7) {
223
          BX_ERROR(("write_virtual_qword_32(): #AC misaligned access"));
224
          exception(BX_AC_EXCEPTION, 0);
225
        }
226
      }
227
#endif
228
 
229
      access_write_linear(laddr, 8, CPL, (void *) &data);
230
      return;
231
    }
232
    else {
233
      BX_ERROR(("write_virtual_qword_32(): segment limit violation"));
234
      exception(int_number(s), 0);
235
    }
236
  }
237
 
238
  if (!write_virtual_checks(seg, offset, 8))
239
    exception(int_number(s), 0);
240
  goto accessOK;
241
}
242
 
243
#if BX_CPU_LEVEL >= 6
244
 
245
  void BX_CPP_AttrRegparmN(3)
246
BX_CPU_C::write_virtual_xmmword_32(unsigned s, Bit32u offset, const BxPackedXmmRegister *data)
247
{
248
  Bit32u laddr;
249
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
250
 
251
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
252
 
253
  if (seg->cache.valid & SegAccessWOK) {
254
    if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
255
accessOK:
256
      laddr = get_laddr32(s, offset);
257
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
258
      Bit32u lpf = LPFOf(laddr);
259
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
260
      if (tlbEntry->lpf == lpf) {
261
        // See if the TLB entry privilege level allows us write access
262
        // from this CPL.
263
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
264
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
265
          Bit32u pageOffset = PAGE_OFFSET(laddr);
266
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
267
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 16, CPL, BX_WRITE, (Bit8u*) data);
268
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
269
          pageWriteStampTable.decWriteStamp(pAddr, 16);
270
          WriteHostQWordToLittleEndian(hostAddr,   data->xmm64u(0));
271
          WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
272
          return;
273
        }
274
      }
275
 
276
      access_write_linear(laddr, 16, CPL, (void *) data);
277
      return;
278
    }
279
    else {
280
      BX_ERROR(("write_virtual_xmmword_32(): segment limit violation"));
281
      exception(int_number(s), 0);
282
    }
283
  }
284
 
285
  if (!write_virtual_checks(seg, offset, 16))
286
    exception(int_number(s), 0);
287
  goto accessOK;
288
}
289
 
290
  void BX_CPP_AttrRegparmN(3)
291
BX_CPU_C::write_virtual_xmmword_aligned_32(unsigned s, Bit32u offset, const BxPackedXmmRegister *data)
292
{
293
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
294
 
295
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
296
 
297
  Bit32u laddr = get_laddr32(s, offset);
298
  // must check alignment here because #GP on misaligned access is higher
299
  // priority than other segment related faults
300
  if (laddr & 15) {
301
    BX_ERROR(("write_virtual_xmmword_aligned_32(): #GP misaligned access"));
302
    exception(BX_GP_EXCEPTION, 0);
303
  }
304
 
305
  if (seg->cache.valid & SegAccessWOK) {
306
    if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
307
accessOK:
308
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
309
      Bit32u lpf = LPFOf(laddr);
310
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
311
      if (tlbEntry->lpf == lpf) {
312
        // See if the TLB entry privilege level allows us write access
313
        // from this CPL.
314
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
315
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
316
          Bit32u pageOffset = PAGE_OFFSET(laddr);
317
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
318
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 16, CPL, BX_WRITE, (Bit8u*) data);
319
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
320
          pageWriteStampTable.decWriteStamp(pAddr, 16);
321
          WriteHostQWordToLittleEndian(hostAddr,   data->xmm64u(0));
322
          WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1));
323
          return;
324
        }
325
      }
326
      access_write_linear(laddr, 16, CPL, (void *) data);
327
      return;
328
    }
329
    else {
330
      BX_ERROR(("write_virtual_xmmword_aligned_32(): segment limit violation"));
331
      exception(int_number(s), 0);
332
    }
333
  }
334
 
335
  if (!write_virtual_checks(seg, offset, 16))
336
    exception(int_number(s), 0);
337
  goto accessOK;
338
}
339
 
340
#if BX_SUPPORT_AVX
341
 
342
void BX_CPU_C::write_virtual_ymmword_32(unsigned s, Bit32u offset, const BxPackedAvxRegister *data)
343
{
344
  Bit32u laddr;
345
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
346
 
347
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
348
 
349
  if (seg->cache.valid & SegAccessWOK) {
350
    if (offset <= (seg->cache.u.segment.limit_scaled-31)) {
351
accessOK:
352
      laddr = get_laddr32(s, offset);
353
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 31);
354
      Bit32u lpf = LPFOf(laddr);
355
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
356
      if (tlbEntry->lpf == lpf) {
357
        // See if the TLB entry privilege level allows us write access
358
        // from this CPL.
359
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
360
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
361
          Bit32u pageOffset = PAGE_OFFSET(laddr);
362
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
363
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 32, CPL, BX_WRITE, (Bit8u*) data);
364
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
365
          pageWriteStampTable.decWriteStamp(pAddr, 32);
366
          for (unsigned n=0; n < 4; n++) {
367
            WriteHostQWordToLittleEndian(hostAddr+n, data->avx64u(n));
368
          }
369
          return;
370
        }
371
      }
372
 
373
      access_write_linear(laddr, 32, CPL, (void *) data);
374
      return;
375
    }
376
    else {
377
      BX_ERROR(("write_virtual_ymmword_32(): segment limit violation"));
378
      exception(int_number(s), 0);
379
    }
380
  }
381
 
382
  if (!write_virtual_checks(seg, offset, 32))
383
    exception(int_number(s), 0);
384
  goto accessOK;
385
}
386
 
387
void BX_CPU_C::write_virtual_ymmword_aligned_32(unsigned s, Bit32u offset, const BxPackedAvxRegister *data)
388
{
389
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
390
 
391
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
392
 
393
  Bit32u laddr = get_laddr32(s, offset);
394
  // must check alignment here because #GP on misaligned access is higher
395
  // priority than other segment related faults
396
  if (laddr & 31) {
397
    BX_ERROR(("write_virtual_ymmword_aligned_32(): #GP misaligned access"));
398
    exception(BX_GP_EXCEPTION, 0);
399
  }
400
 
401
  if (seg->cache.valid & SegAccessWOK) {
402
    if (offset <= (seg->cache.u.segment.limit_scaled-31)) {
403
accessOK:
404
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
405
      Bit32u lpf = LPFOf(laddr);
406
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
407
      if (tlbEntry->lpf == lpf) {
408
        // See if the TLB entry privilege level allows us write access
409
        // from this CPL.
410
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
411
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
412
          Bit32u pageOffset = PAGE_OFFSET(laddr);
413
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
414
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 32, CPL, BX_WRITE, (Bit8u*) data);
415
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
416
          pageWriteStampTable.decWriteStamp(pAddr, 32);
417
          for (unsigned n=0; n < 4; n++) {
418
            WriteHostQWordToLittleEndian(hostAddr+n, data->avx64u(n));
419
          }
420
          return;
421
        }
422
      }
423
      access_write_linear(laddr, 32, CPL, (void *) data);
424
      return;
425
    }
426
    else {
427
      BX_ERROR(("write_virtual_ymmword_aligned_32(): segment limit violation"));
428
      exception(int_number(s), 0);
429
    }
430
  }
431
 
432
  if (!write_virtual_checks(seg, offset, 32))
433
    exception(int_number(s), 0);
434
  goto accessOK;
435
}
436
 
437
#endif // BX_SUPPORT_AVX
438
 
439
#endif
440
 
441
  Bit8u BX_CPP_AttrRegparmN(2)
442
BX_CPU_C::read_virtual_byte_32(unsigned s, Bit32u offset)
443
{
444
  Bit32u laddr;
445
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
446
  Bit8u data;
447
 
448
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
449
 
450
  if (seg->cache.valid & SegAccessROK) {
451
    if (offset <= seg->cache.u.segment.limit_scaled) {
452
accessOK:
453
      laddr = get_laddr32(s, offset);
454
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
455
      Bit32u lpf = LPFOf(laddr);
456
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
457
      if (tlbEntry->lpf == lpf) {
458
        // See if the TLB entry privilege level allows us read access
459
        // from this CPL.
460
        if (tlbEntry->accessBits & (0x01 << USER_PL)) {
461
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
462
          Bit32u pageOffset = PAGE_OFFSET(laddr);
463
          Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
464
          data = *hostAddr;
465
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 1, CPL, BX_READ, (Bit8u*) &data);
466
 
467
          return data;
468
        }
469
      }
470
      access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
471
      return data;
472
    }
473
    else {
474
      BX_ERROR(("read_virtual_byte_32(): segment limit violation"));
475
      exception(int_number(s), 0);
476
    }
477
  }
478
 
479
  if (!read_virtual_checks(seg, offset, 1))
480
    exception(int_number(s), 0);
481
  goto accessOK;
482
}
483
 
484
  Bit16u BX_CPP_AttrRegparmN(2)
485
BX_CPU_C::read_virtual_word_32(unsigned s, Bit32u offset)
486
{
487
  Bit32u laddr;
488
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
489
  Bit16u data;
490
 
491
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
492
 
493
  if (seg->cache.valid & SegAccessROK) {
494
    if (offset < seg->cache.u.segment.limit_scaled) {
495
accessOK:
496
      laddr = get_laddr32(s, offset);
497
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
498
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
499
      Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
500
#else
501
      Bit32u lpf = LPFOf(laddr);
502
#endif    
503
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
504
      if (tlbEntry->lpf == lpf) {
505
        // See if the TLB entry privilege level allows us read access
506
        // from this CPL.
507
        if (tlbEntry->accessBits & (0x01 << USER_PL)) {
508
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
509
          Bit32u pageOffset = PAGE_OFFSET(laddr);
510
          Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
511
          ReadHostWordFromLittleEndian(hostAddr, data);
512
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 2, CPL, BX_READ, (Bit8u*) &data);
513
          return data;
514
        }
515
      }
516
 
517
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
518
      if (BX_CPU_THIS_PTR alignment_check()) {
519
        if (laddr & 1) {
520
          BX_ERROR(("read_virtual_word_32(): #AC misaligned access"));
521
          exception(BX_AC_EXCEPTION, 0);
522
        }
523
      }
524
#endif
525
 
526
      access_read_linear(laddr, 2, CPL, BX_READ, (void *) &data);
527
      return data;
528
    }
529
    else {
530
      BX_ERROR(("read_virtual_word_32(): segment limit violation"));
531
      exception(int_number(s), 0);
532
    }
533
  }
534
 
535
  if (!read_virtual_checks(seg, offset, 2))
536
    exception(int_number(s), 0);
537
  goto accessOK;
538
}
539
 
540
  Bit32u BX_CPP_AttrRegparmN(2)
541
BX_CPU_C::read_virtual_dword_32(unsigned s, Bit32u offset)
542
{
543
  Bit32u laddr;
544
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
545
  Bit32u data;
546
 
547
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
548
 
549
  if (seg->cache.valid & SegAccessROK) {
550
    if (offset < (seg->cache.u.segment.limit_scaled-2)) {
551
accessOK:
552
      laddr = get_laddr32(s, offset);
553
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
554
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
555
      Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
556
#else
557
      Bit32u lpf = LPFOf(laddr);
558
#endif    
559
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
560
      if (tlbEntry->lpf == lpf) {
561
        // See if the TLB entry privilege level allows us read access
562
        // from this CPL.
563
        if (tlbEntry->accessBits & (0x01 << USER_PL)) {
564
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
565
          Bit32u pageOffset = PAGE_OFFSET(laddr);
566
          Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
567
          ReadHostDWordFromLittleEndian(hostAddr, data);
568
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 4, CPL, BX_READ, (Bit8u*) &data);
569
          return data;
570
        }
571
      }
572
 
573
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
574
      if (BX_CPU_THIS_PTR alignment_check()) {
575
        if (laddr & 3) {
576
          BX_ERROR(("read_virtual_dword_32(): #AC misaligned access"));
577
          exception(BX_AC_EXCEPTION, 0);
578
        }
579
      }
580
#endif
581
 
582
      access_read_linear(laddr, 4, CPL, BX_READ, (void *) &data);
583
      return data;
584
    }
585
    else {
586
      BX_ERROR(("read_virtual_dword_32(): segment limit violation"));
587
      exception(int_number(s), 0);
588
    }
589
  }
590
 
591
  if (!read_virtual_checks(seg, offset, 4))
592
    exception(int_number(s), 0);
593
  goto accessOK;
594
}
595
 
596
  Bit64u BX_CPP_AttrRegparmN(2)
597
BX_CPU_C::read_virtual_qword_32(unsigned s, Bit32u offset)
598
{
599
  Bit32u laddr;
600
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
601
  Bit64u data;
602
 
603
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
604
 
605
  if (seg->cache.valid & SegAccessROK) {
606
    if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
607
accessOK:
608
      laddr = get_laddr32(s, offset);
609
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
610
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
611
      Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
612
#else
613
      Bit32u lpf = LPFOf(laddr);
614
#endif    
615
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
616
      if (tlbEntry->lpf == lpf) {
617
        // See if the TLB entry privilege level allows us read access
618
        // from this CPL.
619
        if (tlbEntry->accessBits & (0x01 << USER_PL)) {
620
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
621
          Bit32u pageOffset = PAGE_OFFSET(laddr);
622
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
623
          ReadHostQWordFromLittleEndian(hostAddr, data);
624
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 8, CPL, BX_READ, (Bit8u*) &data);
625
          return data;
626
        }
627
      }
628
 
629
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
630
      if (BX_CPU_THIS_PTR alignment_check()) {
631
        if (laddr & 7) {
632
          BX_ERROR(("read_virtual_qword_32(): #AC misaligned access"));
633
          exception(BX_AC_EXCEPTION, 0);
634
        }
635
      }
636
#endif
637
 
638
      access_read_linear(laddr, 8, CPL, BX_READ, (void *) &data);
639
      return data;
640
    }
641
    else {
642
      BX_ERROR(("read_virtual_qword_32(): segment limit violation"));
643
      exception(int_number(s), 0);
644
    }
645
  }
646
 
647
  if (!read_virtual_checks(seg, offset, 8))
648
    exception(int_number(s), 0);
649
  goto accessOK;
650
}
651
 
652
#if BX_CPU_LEVEL >= 6
653
 
654
  void BX_CPP_AttrRegparmN(3)
655
BX_CPU_C::read_virtual_xmmword_32(unsigned s, Bit32u offset, BxPackedXmmRegister *data)
656
{
657
  Bit32u laddr;
658
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
659
 
660
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
661
 
662
  if (seg->cache.valid & SegAccessROK) {
663
    if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
664
accessOK:
665
      laddr = get_laddr32(s, offset);
666
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
667
      Bit32u lpf = LPFOf(laddr);
668
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
669
      if (tlbEntry->lpf == lpf) {
670
        // See if the TLB entry privilege level allows us read access
671
        // from this CPL.
672
        if (tlbEntry->accessBits & (0x01 << USER_PL)) {
673
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
674
          Bit32u pageOffset = PAGE_OFFSET(laddr);
675
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
676
          ReadHostQWordFromLittleEndian(hostAddr,   data->xmm64u(0));
677
          ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1));
678
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 16, CPL, BX_READ, (Bit8u*) data);
679
          return;
680
        }
681
      }
682
      access_read_linear(laddr, 16, CPL, BX_READ, (void *) data);
683
      return;
684
    }
685
    else {
686
      BX_ERROR(("read_virtual_xmmword_32(): segment limit violation"));
687
      exception(int_number(s), 0);
688
    }
689
  }
690
 
691
  if (!read_virtual_checks(seg, offset, 16))
692
    exception(int_number(s), 0);
693
  goto accessOK;
694
}
695
 
696
  void BX_CPP_AttrRegparmN(3)
697
BX_CPU_C::read_virtual_xmmword_aligned_32(unsigned s, Bit32u offset, BxPackedXmmRegister *data)
698
{
699
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
700
 
701
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
702
 
703
  Bit32u laddr = get_laddr32(s, offset);
704
  // must check alignment here because #GP on misaligned access is higher
705
  // priority than other segment related faults
706
  if (laddr & 15) {
707
    BX_ERROR(("read_virtual_xmmword_aligned_32(): #GP misaligned access"));
708
    exception(BX_GP_EXCEPTION, 0);
709
  }
710
 
711
  if (seg->cache.valid & SegAccessROK) {
712
    if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
713
accessOK:
714
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
715
      Bit32u lpf = LPFOf(laddr);
716
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
717
      if (tlbEntry->lpf == lpf) {
718
        // See if the TLB entry privilege level allows us read access
719
        // from this CPL.
720
        if (tlbEntry->accessBits & (0x01 << USER_PL)) {
721
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
722
          Bit32u pageOffset = PAGE_OFFSET(laddr);
723
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
724
          ReadHostQWordFromLittleEndian(hostAddr,   data->xmm64u(0));
725
          ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1));
726
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 16, CPL, BX_READ, (Bit8u*) data);
727
          return;
728
        }
729
      }
730
      access_read_linear(laddr, 16, CPL, BX_READ, (void *) data);
731
      return;
732
    }
733
    else {
734
      BX_ERROR(("read_virtual_xmmword_aligned_32(): segment limit violation"));
735
      exception(int_number(s), 0);
736
    }
737
  }
738
 
739
  if (!read_virtual_checks(seg, offset, 16))
740
    exception(int_number(s), 0);
741
  goto accessOK;
742
}
743
 
744
#if BX_SUPPORT_AVX
745
 
746
void BX_CPU_C::read_virtual_ymmword_32(unsigned s, Bit32u offset, BxPackedAvxRegister *data)
747
{
748
  Bit32u laddr;
749
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
750
 
751
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
752
 
753
  if (seg->cache.valid & SegAccessROK) {
754
    if (offset <= (seg->cache.u.segment.limit_scaled-31)) {
755
accessOK:
756
      laddr = get_laddr32(s, offset);
757
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 31);
758
      Bit32u lpf = LPFOf(laddr);
759
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
760
      if (tlbEntry->lpf == lpf) {
761
        // See if the TLB entry privilege level allows us read access
762
        // from this CPL.
763
        if (tlbEntry->accessBits & (0x01 << USER_PL)) {
764
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
765
          Bit32u pageOffset = PAGE_OFFSET(laddr);
766
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
767
          for (unsigned n=0; n < 4; n++) {
768
            ReadHostQWordFromLittleEndian(hostAddr+n, data->avx64u(n));
769
          }
770
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 32, CPL, BX_READ, (Bit8u*) data);
771
          return;
772
        }
773
      }
774
      access_read_linear(laddr, 32, CPL, BX_READ, (void *) data);
775
      return;
776
    }
777
    else {
778
      BX_ERROR(("read_virtual_ymmword_32: segment limit violation"));
779
      exception(int_number(s), 0);
780
    }
781
  }
782
 
783
  if (!read_virtual_checks(seg, offset, 32))
784
    exception(int_number(s), 0);
785
  goto accessOK;
786
}
787
 
788
void BX_CPU_C::read_virtual_ymmword_aligned_32(unsigned s, Bit32u offset, BxPackedAvxRegister *data)
789
{
790
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
791
 
792
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
793
 
794
  Bit32u laddr = get_laddr32(s, offset);
795
  // must check alignment here because #GP on misaligned access is higher
796
  // priority than other segment related faults
797
  if (laddr & 31) {
798
    BX_ERROR(("read_virtual_ymmword_aligned_32(): #GP misaligned access"));
799
    exception(BX_GP_EXCEPTION, 0);
800
  }
801
 
802
  if (seg->cache.valid & SegAccessROK) {
803
    if (offset <= (seg->cache.u.segment.limit_scaled-31)) {
804
accessOK:
805
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
806
      Bit32u lpf = LPFOf(laddr);
807
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
808
      if (tlbEntry->lpf == lpf) {
809
        // See if the TLB entry privilege level allows us read access
810
        // from this CPL.
811
        if (tlbEntry->accessBits & (0x01 << USER_PL)) {
812
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
813
          Bit32u pageOffset = PAGE_OFFSET(laddr);
814
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
815
          for (unsigned n=0; n < 4; n++) {
816
            ReadHostQWordFromLittleEndian(hostAddr+n, data->avx64u(n));
817
          }
818
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 32, CPL, BX_READ, (Bit8u*) data);
819
          return;
820
        }
821
      }
822
      access_read_linear(laddr, 32, CPL, BX_READ, (void *) data);
823
      return;
824
    }
825
    else {
826
      BX_ERROR(("read_virtual_ymmword_aligned_32(): segment limit violation"));
827
      exception(int_number(s), 0);
828
    }
829
  }
830
 
831
  if (!read_virtual_checks(seg, offset, 32))
832
    exception(int_number(s), 0);
833
  goto accessOK;
834
}
835
 
836
#endif
837
 
838
#endif
839
 
840
//////////////////////////////////////////////////////////////
841
// special Read-Modify-Write operations                     //
842
// address translation info is kept across read/write calls //
843
//////////////////////////////////////////////////////////////
844
 
845
  Bit8u BX_CPP_AttrRegparmN(2)
846
BX_CPU_C::read_RMW_virtual_byte_32(unsigned s, Bit32u offset)
847
{
848
  Bit32u laddr;
849
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
850
  Bit8u data;
851
 
852
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
853
 
854
  if (seg->cache.valid & SegAccessWOK) {
855
    if (offset <= seg->cache.u.segment.limit_scaled) {
856
accessOK:
857
      laddr = get_laddr32(s, offset);
858
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
859
      Bit32u lpf = LPFOf(laddr);
860
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
861
//AO change next line
862
      if (0) { //tlbEntry->lpf == lpf) {
863
        // See if the TLB entry privilege level allows us write access
864
        // from this CPL.
865
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
866
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
867
          Bit32u pageOffset = PAGE_OFFSET(laddr);
868
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
869
          Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
870
          pageWriteStampTable.decWriteStamp(pAddr, 1);
871
          data = *hostAddr;
872
          BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
873
          BX_CPU_THIS_PTR address_xlation.paddress1 = pAddr;
874
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 1, CPL, BX_READ, (Bit8u*) &data);
875
          return data;
876
        }
877
      }
878
      access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
879
      return data;
880
    }
881
    else {
882
      BX_ERROR(("read_RMW_virtual_byte_32(): segment limit violation"));
883
      exception(int_number(s), 0);
884
    }
885
  }
886
 
887
  if (!write_virtual_checks(seg, offset, 1))
888
    exception(int_number(s), 0);
889
  goto accessOK;
890
}
891
 
892
  Bit16u BX_CPP_AttrRegparmN(2)
893
BX_CPU_C::read_RMW_virtual_word_32(unsigned s, Bit32u offset)
894
{
895
  Bit32u laddr;
896
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
897
  Bit16u data;
898
 
899
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
900
 
901
  if (seg->cache.valid & SegAccessWOK) {
902
    if (offset < seg->cache.u.segment.limit_scaled) {
903
accessOK:
904
      laddr = get_laddr32(s, offset);
905
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
906
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
907
      Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
908
#else
909
      Bit32u lpf = LPFOf(laddr);
910
#endif    
911
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
912
//AO change next line
913
      if (0) { //tlbEntry->lpf == lpf) {
914
        // See if the TLB entry privilege level allows us write access
915
        // from this CPL.
916
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
917
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
918
          Bit32u pageOffset = PAGE_OFFSET(laddr);
919
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
920
          Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
921
          pageWriteStampTable.decWriteStamp(pAddr, 2);
922
          ReadHostWordFromLittleEndian(hostAddr, data);
923
          BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
924
          BX_CPU_THIS_PTR address_xlation.paddress1 = pAddr;
925
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 2, CPL, BX_READ, (Bit8u*) &data);
926
          return data;
927
        }
928
      }
929
 
930
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
931
      if (BX_CPU_THIS_PTR alignment_check()) {
932
        if (laddr & 1) {
933
          BX_ERROR(("read_RMW_virtual_word_32(): #AC misaligned access"));
934
          exception(BX_AC_EXCEPTION, 0);
935
        }
936
      }
937
#endif
938
 
939
      access_read_linear(laddr, 2, CPL, BX_RW, (void *) &data);
940
      return data;
941
    }
942
    else {
943
      BX_ERROR(("read_RMW_virtual_word_32(): segment limit violation"));
944
      exception(int_number(s), 0);
945
    }
946
  }
947
 
948
  if (!write_virtual_checks(seg, offset, 2))
949
    exception(int_number(s), 0);
950
  goto accessOK;
951
}
952
 
953
  Bit32u BX_CPP_AttrRegparmN(2)
954
BX_CPU_C::read_RMW_virtual_dword_32(unsigned s, Bit32u offset)
955
{
956
  Bit32u laddr;
957
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
958
  Bit32u data;
959
 
960
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
961
 
962
  if (seg->cache.valid & SegAccessWOK) {
963
    if (offset < (seg->cache.u.segment.limit_scaled-2)) {
964
accessOK:
965
      laddr = get_laddr32(s, offset);
966
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
967
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
968
      Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
969
#else
970
      Bit32u lpf = LPFOf(laddr);
971
#endif    
972
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
973
//AO change next line
974
      if (0) { //tlbEntry->lpf == lpf) {
975
        // See if the TLB entry privilege level allows us write access
976
        // from this CPL.
977
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
978
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
979
          Bit32u pageOffset = PAGE_OFFSET(laddr);
980
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
981
          Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
982
          pageWriteStampTable.decWriteStamp(pAddr, 4);
983
          ReadHostDWordFromLittleEndian(hostAddr, data);
984
          BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
985
          BX_CPU_THIS_PTR address_xlation.paddress1 = pAddr;
986
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 4, CPL, BX_READ, (Bit8u*) &data);
987
          return data;
988
        }
989
      }
990
 
991
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
992
      if (BX_CPU_THIS_PTR alignment_check()) {
993
        if (laddr & 3) {
994
          BX_ERROR(("read_RMW_virtual_dword_32(): #AC misaligned access"));
995
          exception(BX_AC_EXCEPTION, 0);
996
        }
997
      }
998
#endif
999
 
1000
      access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data);
1001
      return data;
1002
    }
1003
    else {
1004
      BX_ERROR(("read_RMW_virtual_dword_32(): segment limit violation"));
1005
      exception(int_number(s), 0);
1006
    }
1007
  }
1008
 
1009
  if (!write_virtual_checks(seg, offset, 4))
1010
    exception(int_number(s), 0);
1011
  goto accessOK;
1012
}
1013
 
1014
  Bit64u BX_CPP_AttrRegparmN(2)
1015
BX_CPU_C::read_RMW_virtual_qword_32(unsigned s, Bit32u offset)
1016
{
1017
  Bit32u laddr;
1018
  bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
1019
  Bit64u data;
1020
 
1021
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
1022
 
1023
  if (seg->cache.valid & SegAccessWOK) {
1024
    if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
1025
accessOK:
1026
      laddr = get_laddr32(s, offset);
1027
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
1028
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1029
      Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
1030
#else
1031
      Bit32u lpf = LPFOf(laddr);
1032
#endif    
1033
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
1034
      if (tlbEntry->lpf == lpf) {
1035
        // See if the TLB entry privilege level allows us write access
1036
        // from this CPL.
1037
        if (tlbEntry->accessBits & (0x04 << USER_PL)) {
1038
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1039
          Bit32u pageOffset = PAGE_OFFSET(laddr);
1040
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
1041
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
1042
          pageWriteStampTable.decWriteStamp(pAddr, 8);
1043
          ReadHostQWordFromLittleEndian(hostAddr, data);
1044
          BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
1045
          BX_CPU_THIS_PTR address_xlation.paddress1 = pAddr;
1046
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 8, CPL, BX_READ, (Bit8u*) &data);
1047
          return data;
1048
        }
1049
      }
1050
 
1051
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1052
      if (BX_CPU_THIS_PTR alignment_check()) {
1053
        if (laddr & 7) {
1054
          BX_ERROR(("read_RMW_virtual_qword_32(): #AC misaligned access"));
1055
          exception(BX_AC_EXCEPTION, 0);
1056
        }
1057
      }
1058
#endif
1059
 
1060
      access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
1061
      return data;
1062
    }
1063
    else {
1064
      BX_ERROR(("read_RMW_virtual_qword_32(): segment limit violation"));
1065
      exception(int_number(s), 0);
1066
    }
1067
  }
1068
 
1069
  if (!write_virtual_checks(seg, offset, 8))
1070
    exception(int_number(s), 0);
1071
  goto accessOK;
1072
}
1073
 
1074
  void BX_CPP_AttrRegparmN(1)
1075
BX_CPU_C::write_RMW_virtual_byte(Bit8u val8)
1076
{
1077
  BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1078
    BX_CPU_THIS_PTR address_xlation.paddress1, 1, BX_WRITE, 0, (Bit8u*) &val8);
1079
 
1080
  if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
1081
    // Pages > 2 means it stores a host address for direct access.
1082
    Bit8u *hostAddr = (Bit8u *) BX_CPU_THIS_PTR address_xlation.pages;
1083
    *hostAddr = val8;
1084
  }
1085
  else {
1086
    // address_xlation.pages must be 1
1087
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 1, &val8);
1088
  }
1089
}
1090
 
1091
  void BX_CPP_AttrRegparmN(1)
1092
BX_CPU_C::write_RMW_virtual_word(Bit16u val16)
1093
{
1094
  if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
1095
    // Pages > 2 means it stores a host address for direct access.
1096
    Bit16u *hostAddr = (Bit16u *) BX_CPU_THIS_PTR address_xlation.pages;
1097
    WriteHostWordToLittleEndian(hostAddr, val16);
1098
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1099
        BX_CPU_THIS_PTR address_xlation.paddress1, 2, BX_WRITE, 0, (Bit8u*) &val16);
1100
  }
1101
  else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
1102
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 2, &val16);
1103
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1104
        BX_CPU_THIS_PTR address_xlation.paddress1, 2, BX_WRITE, 0, (Bit8u*) &val16);
1105
  }
1106
  else {
1107
#ifdef BX_LITTLE_ENDIAN
1108
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 1, &val16);
1109
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1110
        BX_CPU_THIS_PTR address_xlation.paddress1, 1, BX_WRITE, 0,  (Bit8u*) &val16);
1111
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2, 1, ((Bit8u *) &val16) + 1);
1112
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1113
        BX_CPU_THIS_PTR address_xlation.paddress2, 1, BX_WRITE, 0, ((Bit8u*) &val16)+1);
1114
#else
1115
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 1, ((Bit8u *) &val16) + 1);
1116
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1117
        BX_CPU_THIS_PTR address_xlation.paddress1, 1, BX_WRITE, 0, ((Bit8u*) &val16)+1);
1118
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2, 1, &val16);
1119
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1120
        BX_CPU_THIS_PTR address_xlation.paddress2, 1, BX_WRITE, 0,  (Bit8u*) &val16);
1121
#endif
1122
  }
1123
}
1124
 
1125
  void BX_CPP_AttrRegparmN(1)
1126
BX_CPU_C::write_RMW_virtual_dword(Bit32u val32)
1127
{
1128
  if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
1129
    // Pages > 2 means it stores a host address for direct access.
1130
    Bit32u *hostAddr = (Bit32u *) BX_CPU_THIS_PTR address_xlation.pages;
1131
    WriteHostDWordToLittleEndian(hostAddr, val32);
1132
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1133
        BX_CPU_THIS_PTR address_xlation.paddress1, 4, BX_WRITE, 0, (Bit8u*) &val32);
1134
  }
1135
  else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
1136
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 4, &val32);
1137
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1138
        BX_CPU_THIS_PTR address_xlation.paddress1, 4, BX_WRITE, 0, (Bit8u*) &val32);
1139
  }
1140
  else {
1141
#ifdef BX_LITTLE_ENDIAN
1142
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1,
1143
        BX_CPU_THIS_PTR address_xlation.len1, &val32);
1144
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1145
        BX_CPU_THIS_PTR address_xlation.paddress1,
1146
        BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE, 0, (Bit8u*) &val32);
1147
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2,
1148
        BX_CPU_THIS_PTR address_xlation.len2,
1149
        ((Bit8u *) &val32) + BX_CPU_THIS_PTR address_xlation.len1);
1150
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1151
        BX_CPU_THIS_PTR address_xlation.paddress2,
1152
        BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE, 0,
1153
        ((Bit8u *) &val32) + BX_CPU_THIS_PTR address_xlation.len1);
1154
#else
1155
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1,
1156
        BX_CPU_THIS_PTR address_xlation.len1,
1157
        ((Bit8u *) &val32) + (4 - BX_CPU_THIS_PTR address_xlation.len1));
1158
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1159
        BX_CPU_THIS_PTR address_xlation.paddress1,
1160
        BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE, 0,
1161
        ((Bit8u *) &val32) + (4 - BX_CPU_THIS_PTR address_xlation.len1));
1162
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2,
1163
        BX_CPU_THIS_PTR address_xlation.len2, &val32);
1164
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1165
        BX_CPU_THIS_PTR address_xlation.paddress2,
1166
        BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE, 0, (Bit8u*) &val32);
1167
#endif
1168
  }
1169
}
1170
 
1171
  void BX_CPP_AttrRegparmN(1)
1172
BX_CPU_C::write_RMW_virtual_qword(Bit64u val64)
1173
{
1174
  if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
1175
    // Pages > 2 means it stores a host address for direct access.
1176
    Bit64u *hostAddr = (Bit64u *) BX_CPU_THIS_PTR address_xlation.pages;
1177
    WriteHostQWordToLittleEndian(hostAddr, val64);
1178
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1179
        BX_CPU_THIS_PTR address_xlation.paddress1, 8, BX_WRITE, 0, (Bit8u*) &val64);
1180
  }
1181
  else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
1182
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1, 8, &val64);
1183
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1184
        BX_CPU_THIS_PTR address_xlation.paddress1, 8, BX_WRITE, 0, (Bit8u*) &val64);
1185
  }
1186
  else {
1187
#ifdef BX_LITTLE_ENDIAN
1188
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1,
1189
        BX_CPU_THIS_PTR address_xlation.len1, &val64);
1190
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1191
        BX_CPU_THIS_PTR address_xlation.paddress1,
1192
        BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE, 0, (Bit8u*) &val64);
1193
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2,
1194
        BX_CPU_THIS_PTR address_xlation.len2,
1195
        ((Bit8u *) &val64) + BX_CPU_THIS_PTR address_xlation.len1);
1196
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1197
        BX_CPU_THIS_PTR address_xlation.paddress2,
1198
        BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE, 0,
1199
        ((Bit8u *) &val64) + BX_CPU_THIS_PTR address_xlation.len1);
1200
#else
1201
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress1,
1202
        BX_CPU_THIS_PTR address_xlation.len1,
1203
        ((Bit8u *) &val64) + (8 - BX_CPU_THIS_PTR address_xlation.len1));
1204
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1205
        BX_CPU_THIS_PTR address_xlation.paddress1,
1206
        BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE, 0,
1207
        ((Bit8u *) &val64) + (8 - BX_CPU_THIS_PTR address_xlation.len1));
1208
    access_write_physical(BX_CPU_THIS_PTR address_xlation.paddress2,
1209
        BX_CPU_THIS_PTR address_xlation.len2, &val64);
1210
    BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
1211
        BX_CPU_THIS_PTR address_xlation.paddress2,
1212
        BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE, 0, (Bit8u*) &val64);
1213
#endif
1214
  }
1215
}
1216
 
1217
//
1218
// Write data to new stack, these methods are required for emulation
1219
// correctness but not performance critical.
1220
//
1221
 
1222
// assuming the write happens in legacy mode
1223
void BX_CPU_C::write_new_stack_word_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit16u data)
1224
{
1225
  Bit32u laddr;
1226
 
1227
  if (seg->cache.valid & SegAccessWOK) {
1228
    if (offset < seg->cache.u.segment.limit_scaled) {
1229
accessOK:
1230
      laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
1231
      bx_bool user = (curr_pl == 3);
1232
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
1233
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1234
      Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
1235
#else
1236
      Bit32u lpf = LPFOf(laddr);
1237
#endif    
1238
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
1239
      if (tlbEntry->lpf == lpf) {
1240
        // See if the TLB entry privilege level allows us write access
1241
        // from this CPL.
1242
        if (tlbEntry->accessBits & (0x04 << user)) {
1243
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1244
          Bit32u pageOffset = PAGE_OFFSET(laddr);
1245
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
1246
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 2, curr_pl, BX_WRITE, (Bit8u*) &data);
1247
          Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
1248
          pageWriteStampTable.decWriteStamp(pAddr, 2);
1249
          WriteHostWordToLittleEndian(hostAddr, data);
1250
          return;
1251
        }
1252
      }
1253
 
1254
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1255
      if (BX_CPU_THIS_PTR alignment_check() && user) {
1256
        if (laddr & 1) {
1257
          BX_ERROR(("write_new_stack_word_32(): #AC misaligned access"));
1258
          exception(BX_AC_EXCEPTION, 0);
1259
        }
1260
      }
1261
#endif
1262
 
1263
      access_write_linear(laddr, 2, curr_pl, (void *) &data);
1264
      return;
1265
    }
1266
    else {
1267
      BX_ERROR(("write_new_stack_word_32(): segment limit violation"));
1268
      exception(BX_SS_EXCEPTION,
1269
         seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0);
1270
    }
1271
  }
1272
 
1273
  // add error code when segment violation occurs when pushing into new stack
1274
  if (!write_virtual_checks(seg, offset, 2))
1275
    exception(BX_SS_EXCEPTION,
1276
         seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0);
1277
  goto accessOK;
1278
}
1279
 
1280
// assuming the write happens in legacy mode
1281
void BX_CPU_C::write_new_stack_dword_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit32u data)
1282
{
1283
  Bit32u laddr;
1284
 
1285
  if (seg->cache.valid & SegAccessWOK) {
1286
    if (offset < (seg->cache.u.segment.limit_scaled-2)) {
1287
accessOK:
1288
      laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
1289
      bx_bool user = (curr_pl == 3);
1290
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
1291
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1292
      Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
1293
#else
1294
      Bit32u lpf = LPFOf(laddr);
1295
#endif    
1296
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
1297
      if (tlbEntry->lpf == lpf) {
1298
        // See if the TLB entry privilege level allows us write access
1299
        // from this CPL.
1300
        if (tlbEntry->accessBits & (0x04 << user)) {
1301
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1302
          Bit32u pageOffset = PAGE_OFFSET(laddr);
1303
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
1304
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 4, curr_pl, BX_WRITE, (Bit8u*) &data);
1305
          Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
1306
          pageWriteStampTable.decWriteStamp(pAddr, 4);
1307
          WriteHostDWordToLittleEndian(hostAddr, data);
1308
          return;
1309
        }
1310
      }
1311
 
1312
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1313
      if (BX_CPU_THIS_PTR alignment_check() && user) {
1314
        if (laddr & 3) {
1315
          BX_ERROR(("write_new_stack_dword_32(): #AC misaligned access"));
1316
          exception(BX_AC_EXCEPTION, 0);
1317
        }
1318
      }
1319
#endif
1320
 
1321
      access_write_linear(laddr, 4, curr_pl, (void *) &data);
1322
      return;
1323
    }
1324
    else {
1325
      BX_ERROR(("write_new_stack_dword_32(): segment limit violation"));
1326
      exception(BX_SS_EXCEPTION,
1327
         seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0);
1328
    }
1329
  }
1330
 
1331
  // add error code when segment violation occurs when pushing into new stack
1332
  if (!write_virtual_checks(seg, offset, 4))
1333
    exception(BX_SS_EXCEPTION,
1334
         seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0);
1335
  goto accessOK;
1336
}
1337
 
1338
// assuming the write happens in legacy mode
1339
void BX_CPU_C::write_new_stack_qword_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit64u data)
1340
{
1341
  Bit32u laddr;
1342
 
1343
  if (seg->cache.valid & SegAccessWOK) {
1344
    if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
1345
accessOK:
1346
      laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
1347
      bx_bool user = (curr_pl == 3);
1348
      unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
1349
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
1350
      Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
1351
#else
1352
      Bit32u lpf = LPFOf(laddr);
1353
#endif    
1354
      bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
1355
      if (tlbEntry->lpf == lpf) {
1356
        // See if the TLB entry privilege level allows us write access
1357
        // from this CPL.
1358
        if (tlbEntry->accessBits & (0x04 << user)) {
1359
          bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
1360
          Bit32u pageOffset = PAGE_OFFSET(laddr);
1361
          bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
1362
          BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 8, curr_pl, BX_WRITE, (Bit8u*) &data);
1363
          Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
1364
          pageWriteStampTable.decWriteStamp(pAddr, 8);
1365
          WriteHostQWordToLittleEndian(hostAddr, data);
1366
          return;
1367
        }
1368
      }
1369
 
1370
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
1371
      if (BX_CPU_THIS_PTR alignment_check() && user) {
1372
        if (laddr & 7) {
1373
          BX_ERROR(("write_new_stack_qword_32(): #AC misaligned access"));
1374
          exception(BX_AC_EXCEPTION, 0);
1375
        }
1376
      }
1377
#endif
1378
 
1379
      access_write_linear(laddr, 8, curr_pl, (void *) &data);
1380
      return;
1381
    }
1382
    else {
1383
      BX_ERROR(("write_new_stack_qword_32(): segment limit violation"));
1384
      exception(BX_SS_EXCEPTION,
1385
         seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0);
1386
    }
1387
  }
1388
 
1389
  // add error code when segment violation occurs when pushing into new stack
1390
  if (!write_virtual_checks(seg, offset, 8))
1391
    exception(BX_SS_EXCEPTION,
1392
        seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0);
1393
  goto accessOK;
1394
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.