OpenCores
URL https://opencores.org/ocsvn/ao486/ao486/trunk

Subversion Repositories ao486

[/] [ao486/] [trunk/] [bochs486/] [cpu/] [access.cc] - Blame information for rev 2

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 alfik
/////////////////////////////////////////////////////////////////////////
2
// $Id: access.cc 11574 2013-01-16 17:28:20Z sshwarts $
3
/////////////////////////////////////////////////////////////////////////
4
//
5
//  Copyright (C) 2005-2010  The Bochs Project
6
//
7
//  This library is free software; you can redistribute it and/or
8
//  modify it under the terms of the GNU Lesser General Public
9
//  License as published by the Free Software Foundation; either
10
//  version 2 of the License, or (at your option) any later version.
11
//
12
//  This library is distributed in the hope that it will be useful,
13
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
14
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
//  Lesser General Public License for more details.
16
//
17
//  You should have received a copy of the GNU Lesser General Public
18
//  License along with this library; if not, write to the Free Software
19
//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
20
//
21
/////////////////////////////////////////////////////////////////////////
22
 
23
#define NEED_CPU_REG_SHORTCUTS 1
24
#include "bochs.h"
25
#include "cpu.h"
26
#define LOG_THIS BX_CPU_THIS_PTR
27
 
28
bx_address bx_asize_mask[] = {
29
  0xffff,                         // as16 (asize = '00)
30
  0xffffffff,                     // as32 (asize = '01)
31
#if BX_SUPPORT_X86_64
32
  BX_CONST64(0xffffffffffffffff), // as64 (asize = '10)
33
  BX_CONST64(0xffffffffffffffff)  // as64 (asize = '11)
34
#endif
35
};
36
 
37
  bx_bool BX_CPP_AttrRegparmN(3)
38
BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned length)
39
{
40
  Bit32u upper_limit;
41
 
42
  if (seg->cache.valid==0) {
43
    BX_DEBUG(("write_virtual_checks(): segment descriptor not valid"));
44
    return 0;
45
  }
46
 
47
  if (seg->cache.p == 0) { /* not present */
48
    BX_ERROR(("write_virtual_checks(): segment not present"));
49
    return 0;
50
  }
51
 
52
  length--;
53
 
54
  switch (seg->cache.type) {
55
    case 0: case 1:   // read only
56
    case 4: case 5:   // read only, expand down
57
    case 8: case 9:   // execute only
58
    case 10: case 11: // execute/read
59
    case 12: case 13: // execute only, conforming
60
    case 14: case 15: // execute/read-only, conforming
61
      BX_ERROR(("write_virtual_checks(): no write access to seg"));
62
      return 0;
63
 
64
    case 2: case 3: /* read/write */
65
      if (offset > (seg->cache.u.segment.limit_scaled - length)
66
          || length > seg->cache.u.segment.limit_scaled)
67
      {
68
        BX_ERROR(("write_virtual_checks(): write beyond limit, r/w"));
69
        return 0;
70
      }
71
      if (seg->cache.u.segment.limit_scaled >= 31) {
72
        // Mark cache as being OK type for succeeding read/writes. The limit
73
        // checks still needs to be done though, but is more simple. We
74
        // could probably also optimize that out with a flag for the case
75
        // when limit is the maximum 32bit value. Limit should accomodate
76
        // at least a dword, since we subtract from it in the simple
77
        // limit check in other functions, and we don't want the value to roll.
78
        // Only normal segments (not expand down) are handled this way.
79
        seg->cache.valid |= SegAccessROK | SegAccessWOK;
80
      }
81
      break;
82
 
83
    case 6: case 7: /* read/write, expand down */
84
      if (seg->cache.u.segment.d_b)
85
        upper_limit = 0xffffffff;
86
      else
87
        upper_limit = 0x0000ffff;
88
      if (offset <= seg->cache.u.segment.limit_scaled ||
89
           offset > upper_limit || (upper_limit - offset) < length)
90
      {
91
        BX_ERROR(("write_virtual_checks(): write beyond limit, r/w ED"));
92
        return 0;
93
      }
94
      break;
95
 
96
    default:
97
      BX_PANIC(("write_virtual_checks(): unknown descriptor type=%d", seg->cache.type));
98
  }
99
 
100
  return 1;
101
}
102
 
103
  bx_bool BX_CPP_AttrRegparmN(3)
104
BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned length)
105
{
106
  Bit32u upper_limit;
107
 
108
  if (seg->cache.valid==0) {
109
    BX_DEBUG(("read_virtual_checks(): segment descriptor not valid"));
110
    return 0;
111
  }
112
 
113
  if (seg->cache.p == 0) { /* not present */
114
    BX_ERROR(("read_virtual_checks(): segment not present"));
115
    return 0;
116
  }
117
 
118
  length--;
119
 
120
  switch (seg->cache.type) {
121
    case 0: case 1: /* read only */
122
    case 2: case 3: /* read/write */
123
    case 10: case 11: /* execute/read */
124
    case 14: case 15: /* execute/read-only, conforming */
125
      if (offset > (seg->cache.u.segment.limit_scaled - length)
126
          || length > seg->cache.u.segment.limit_scaled)
127
      {
128
        BX_ERROR(("read_virtual_checks(): read beyond limit"));
129
        return 0;
130
      }
131
      if (seg->cache.u.segment.limit_scaled >= 31) {
132
        // Mark cache as being OK type for succeeding reads. See notes for
133
        // write checks; similar code.
134
        seg->cache.valid |= SegAccessROK;
135
      }
136
      break;
137
 
138
    case 4: case 5: /* read only, expand down */
139
    case 6: case 7: /* read/write, expand down */
140
      if (seg->cache.u.segment.d_b)
141
        upper_limit = 0xffffffff;
142
      else
143
        upper_limit = 0x0000ffff;
144
      if (offset <= seg->cache.u.segment.limit_scaled ||
145
           offset > upper_limit || (upper_limit - offset) < length)
146
      {
147
        BX_ERROR(("read_virtual_checks(): read beyond limit ED"));
148
        return 0;
149
      }
150
      break;
151
 
152
    case 8: case 9: /* execute only */
153
    case 12: case 13: /* execute only, conforming */
154
      /* can't read or write an execute-only segment */
155
      BX_ERROR(("read_virtual_checks(): execute only"));
156
      return 0;
157
 
158
    default:
159
      BX_PANIC(("read_virtual_checks(): unknown descriptor type=%d", seg->cache.type));
160
  }
161
 
162
  return 1;
163
}
164
 
165
  bx_bool BX_CPP_AttrRegparmN(3)
166
BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned length)
167
{
168
  Bit32u upper_limit;
169
 
170
  if (seg->cache.valid==0) {
171
    BX_DEBUG(("execute_virtual_checks(): segment descriptor not valid"));
172
    return 0;
173
  }
174
 
175
  if (seg->cache.p == 0) { /* not present */
176
    BX_ERROR(("execute_virtual_checks(): segment not present"));
177
    return 0;
178
  }
179
 
180
  length--;
181
 
182
  switch (seg->cache.type) {
183
    case 0: case 1: /* read only */
184
    case 2: case 3: /* read/write */
185
    case 10: case 11: /* execute/read */
186
    case 14: case 15: /* execute/read-only, conforming */
187
      if (offset > (seg->cache.u.segment.limit_scaled - length)
188
          || length > seg->cache.u.segment.limit_scaled)
189
      {
190
        BX_ERROR(("execute_virtual_checks(): read beyond limit"));
191
        return 0;
192
      }
193
      if (seg->cache.u.segment.limit_scaled >= 31) {
194
        // Mark cache as being OK type for succeeding reads. See notes for
195
        // write checks; similar code.
196
        seg->cache.valid |= SegAccessROK;
197
      }
198
      break;
199
 
200
    case 8: case 9: /* execute only */
201
    case 12: case 13: /* execute only, conforming */
202
      if (offset > (seg->cache.u.segment.limit_scaled - length)
203
          || length > seg->cache.u.segment.limit_scaled)
204
      {
205
        BX_ERROR(("execute_virtual_checks(): read beyond limit execute only"));
206
        return 0;
207
      }
208
      break;
209
 
210
    case 4: case 5: /* read only, expand down */
211
    case 6: case 7: /* read/write, expand down */
212
      if (seg->cache.u.segment.d_b)
213
        upper_limit = 0xffffffff;
214
      else
215
        upper_limit = 0x0000ffff;
216
      if (offset <= seg->cache.u.segment.limit_scaled ||
217
           offset > upper_limit || (upper_limit - offset) < length)
218
      {
219
        BX_ERROR(("execute_virtual_checks(): read beyond limit ED"));
220
        return 0;
221
      }
222
      break;
223
 
224
    default:
225
      BX_PANIC(("execute_virtual_checks(): unknown descriptor type=%d", seg->cache.type));
226
  }
227
 
228
  return 1;
229
}
230
 
231
const char *BX_CPU_C::strseg(bx_segment_reg_t *seg)
232
{
233
  if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES]) return("ES");
234
  else if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS]) return("CS");
235
  else if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS]) return("SS");
236
  else if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS]) return("DS");
237
  else if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS]) return("FS");
238
  else if (seg == &BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS]) return("GS");
239
  else {
240
    BX_PANIC(("undefined segment passed to strseg()!"));
241
    return("??");
242
  }
243
}
244
 
245
int BX_CPU_C::int_number(unsigned s)
246
{
247
  if (s == BX_SEG_REG_SS)
248
    return BX_SS_EXCEPTION;
249
  else
250
    return BX_GP_EXCEPTION;
251
}
252
 
253
  Bit8u BX_CPP_AttrRegparmN(1)
254
BX_CPU_C::system_read_byte(bx_address laddr)
255
{
256
  Bit8u data;
257
 
258
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
259
  bx_address lpf = LPFOf(laddr);
260
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
261
  if (tlbEntry->lpf == lpf) {
262
    // See if the TLB entry privilege level allows us read access
263
    // from this CPL.
264
    if (tlbEntry->accessBits & 0x01) {
265
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
266
      Bit32u pageOffset = PAGE_OFFSET(laddr);
267
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
268
      data = *hostAddr;
269
      BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 1, 0, BX_READ, (Bit8u*) &data);
270
      return data;
271
    }
272
  }
273
 
274
#if BX_SUPPORT_X86_64
275
  if (! IsCanonical(laddr)) {
276
    BX_ERROR(("system_read_byte(): canonical failure"));
277
    exception(BX_GP_EXCEPTION, 0);
278
  }
279
#endif
280
 
281
  access_read_linear(laddr, 1, 0, BX_READ, (void *) &data);
282
  return data;
283
}
284
 
285
  Bit16u BX_CPP_AttrRegparmN(1)
286
BX_CPU_C::system_read_word(bx_address laddr)
287
{
288
  Bit16u data;
289
 
290
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
291
  bx_address lpf = LPFOf(laddr);
292
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
293
  if (tlbEntry->lpf == lpf) {
294
    // See if the TLB entry privilege level allows us read access
295
    // from this CPL.
296
    if (tlbEntry->accessBits & 0x01) {
297
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
298
      Bit32u pageOffset = PAGE_OFFSET(laddr);
299
      Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
300
      ReadHostWordFromLittleEndian(hostAddr, data);
301
      BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 2, 0, BX_READ, (Bit8u*) &data);
302
      return data;
303
    }
304
  }
305
 
306
#if BX_SUPPORT_X86_64
307
  if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
308
    BX_ERROR(("system_read_word(): canonical failure"));
309
    exception(BX_GP_EXCEPTION, 0);
310
  }
311
#endif
312
 
313
  access_read_linear(laddr, 2, 0, BX_READ, (void *) &data);
314
  return data;
315
}
316
 
317
  Bit32u BX_CPP_AttrRegparmN(1)
318
BX_CPU_C::system_read_dword(bx_address laddr)
319
{
320
  Bit32u data;
321
 
322
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
323
  bx_address lpf = LPFOf(laddr);
324
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
325
  if (tlbEntry->lpf == lpf) {
326
    // See if the TLB entry privilege level allows us read access
327
    // from this CPL.
328
    if (tlbEntry->accessBits & 0x01) {
329
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
330
      Bit32u pageOffset = PAGE_OFFSET(laddr);
331
      Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
332
      ReadHostDWordFromLittleEndian(hostAddr, data);
333
      BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 4, 0, BX_READ, (Bit8u*) &data);
334
      return data;
335
    }
336
  }
337
 
338
#if BX_SUPPORT_X86_64
339
  if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
340
    BX_ERROR(("system_read_dword(): canonical failure"));
341
    exception(BX_GP_EXCEPTION, 0);
342
  }
343
#endif
344
 
345
  access_read_linear(laddr, 4, 0, BX_READ, (void *) &data);
346
  return data;
347
}
348
 
349
  Bit64u BX_CPP_AttrRegparmN(1)
350
BX_CPU_C::system_read_qword(bx_address laddr)
351
{
352
  Bit64u data;
353
 
354
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
355
  bx_address lpf = LPFOf(laddr);
356
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
357
  if (tlbEntry->lpf == lpf) {
358
    // See if the TLB entry privilege level allows us read access
359
    // from this CPL.
360
    if (tlbEntry->accessBits & 0x01) {
361
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
362
      Bit32u pageOffset = PAGE_OFFSET(laddr);
363
      Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
364
      ReadHostQWordFromLittleEndian(hostAddr, data);
365
      BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, (tlbEntry->ppf | pageOffset), 8, 0, BX_READ, (Bit8u*) &data);
366
      return data;
367
    }
368
  }
369
 
370
#if BX_SUPPORT_X86_64
371
  if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
372
    BX_ERROR(("system_read_qword(): canonical failure"));
373
    exception(BX_GP_EXCEPTION, 0);
374
  }
375
#endif
376
 
377
  access_read_linear(laddr, 8, 0, BX_READ, (void *) &data);
378
  return data;
379
}
380
 
381
  void BX_CPP_AttrRegparmN(2)
382
BX_CPU_C::system_write_byte(bx_address laddr, Bit8u data)
383
{
384
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
385
  Bit32u lpf = LPFOf(laddr);
386
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
387
  if (tlbEntry->lpf == lpf) {
388
    // See if the TLB entry privilege level allows us write access
389
    // from this CPL.
390
    if (tlbEntry->accessBits & 0x04) {
391
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
392
      Bit32u pageOffset = PAGE_OFFSET(laddr);
393
      bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
394
      BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 1, 0, BX_WRITE, (Bit8u*) &data);
395
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
396
      pageWriteStampTable.decWriteStamp(pAddr, 1);
397
     *hostAddr = data;
398
      return;
399
    }
400
  }
401
 
402
#if BX_SUPPORT_X86_64
403
  if (! IsCanonical(laddr)) {
404
    BX_ERROR(("system_write_byte(): canonical failure"));
405
    exception(BX_GP_EXCEPTION, 0);
406
  }
407
#endif
408
 
409
  access_write_linear(laddr, 1, 0, (void *) &data);
410
}
411
 
412
  void BX_CPP_AttrRegparmN(2)
413
BX_CPU_C::system_write_word(bx_address laddr, Bit16u data)
414
{
415
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
416
  Bit32u lpf = LPFOf(laddr);
417
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
418
  if (tlbEntry->lpf == lpf) {
419
    // See if the TLB entry privilege level allows us write access
420
    // from this CPL.
421
    if (tlbEntry->accessBits & 0x04) {
422
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
423
      Bit32u pageOffset = PAGE_OFFSET(laddr);
424
      bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
425
      BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 2, 0, BX_WRITE, (Bit8u*) &data);
426
      Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
427
      pageWriteStampTable.decWriteStamp(pAddr, 2);
428
      WriteHostWordToLittleEndian(hostAddr, data);
429
      return;
430
    }
431
  }
432
 
433
#if BX_SUPPORT_X86_64
434
  if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
435
    BX_ERROR(("system_write_word(): canonical failure"));
436
    exception(BX_GP_EXCEPTION, 0);
437
  }
438
#endif
439
 
440
  access_write_linear(laddr, 2, 0, (void *) &data);
441
}
442
 
443
  void BX_CPP_AttrRegparmN(2)
444
BX_CPU_C::system_write_dword(bx_address laddr, Bit32u data)
445
{
446
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
447
  Bit32u lpf = LPFOf(laddr);
448
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
449
  if (tlbEntry->lpf == lpf) {
450
    // See if the TLB entry privilege level allows us write access
451
    // from this CPL.
452
    if (tlbEntry->accessBits & 0x04) {
453
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
454
      Bit32u pageOffset = PAGE_OFFSET(laddr);
455
      bx_phy_address pAddr = tlbEntry->ppf | pageOffset;
456
      BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 4, 0, BX_WRITE, (Bit8u*) &data);
457
      Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
458
      pageWriteStampTable.decWriteStamp(pAddr, 4);
459
      WriteHostDWordToLittleEndian(hostAddr, data);
460
      return;
461
    }
462
  }
463
 
464
#if BX_SUPPORT_X86_64
465
  if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
466
    BX_ERROR(("system_write_dword(): canonical failure"));
467
    exception(BX_GP_EXCEPTION, 0);
468
  }
469
#endif
470
 
471
  access_write_linear(laddr, 4, 0, (void *) &data);
472
}
473
 
474
  Bit8u* BX_CPP_AttrRegparmN(2)
475
BX_CPU_C::v2h_read_byte(bx_address laddr, bx_bool user)
476
{
477
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
478
  bx_address lpf = LPFOf(laddr);
479
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
480
  if (tlbEntry->lpf == lpf) {
481
    // See if the TLB entry privilege level allows us read access
482
    // from this CPL.
483
    if (tlbEntry->accessBits & (0x01 << user)) {
484
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
485
      Bit32u pageOffset = PAGE_OFFSET(laddr);
486
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
487
      return hostAddr;
488
    }
489
  }
490
 
491
  return 0;
492
}
493
 
494
  Bit8u* BX_CPP_AttrRegparmN(2)
495
BX_CPU_C::v2h_write_byte(bx_address laddr, bx_bool user)
496
{
497
  unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
498
  bx_address lpf = LPFOf(laddr);
499
  bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
500
  if (tlbEntry->lpf == lpf)
501
  {
502
    // See if the TLB entry privilege level allows us write access
503
    // from this CPL.
504
    if (tlbEntry->accessBits & (0x04 << user)) {
505
      bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
506
      Bit32u pageOffset = PAGE_OFFSET(laddr);
507
      Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
508
      pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
509
      return hostAddr;
510
    }
511
  }
512
 
513
  return 0;
514
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.