OpenCores
URL https://opencores.org/ocsvn/ao486/ao486/trunk

Subversion Repositories ao486

[/] [ao486/] [trunk/] [bochs486/] [cpu/] [ctrl_xfer32.cc] - Blame information for rev 8

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 alfik
/////////////////////////////////////////////////////////////////////////
2
// $Id: ctrl_xfer32.cc 11648 2013-03-06 21:11:23Z sshwarts $
3
/////////////////////////////////////////////////////////////////////////
4
//
5
//  Copyright (C) 2001-2012  The Bochs Project
6
//
7
//  This library is free software; you can redistribute it and/or
8
//  modify it under the terms of the GNU Lesser General Public
9
//  License as published by the Free Software Foundation; either
10
//  version 2 of the License, or (at your option) any later version.
11
//
12
//  This library is distributed in the hope that it will be useful,
13
//  but WITHOUT ANY WARRANTY; without even the implied warranty of
14
//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
//  Lesser General Public License for more details.
16
//
17
//  You should have received a copy of the GNU Lesser General Public
18
//  License along with this library; if not, write to the Free Software
19
//  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
20
/////////////////////////////////////////////////////////////////////////
21
 
22
#define NEED_CPU_REG_SHORTCUTS 1
23
#include "bochs.h"
24
#include "cpu.h"
25
#define LOG_THIS BX_CPU_THIS_PTR
26
 
27
#if BX_CPU_LEVEL >= 3
28
 
29
BX_CPP_INLINE void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near32(Bit32u new_EIP)
30
{
31
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
32
 
33
  // check always, not only in protected mode
34
  if (new_EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
35
  {
36
    BX_ERROR(("branch_near32: offset outside of CS limits"));
37
    exception(BX_GP_EXCEPTION, 0);
38
  }
39
 
40
  EIP = new_EIP;
41
 
42
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS == 0
43
  // assert magic async_event to stop trace execution
44
  BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
45
#endif
46
}
47
 
48
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear32_Iw(bxInstruction_c *i)
49
{
50
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
51
 
52
#if BX_DEBUGGER
53
  BX_CPU_THIS_PTR show_flag |= Flag_ret;
54
#endif
55
 
56
  RSP_SPECULATIVE;
57
 
58
  Bit16u imm16 = i->Iw();
59
  Bit32u return_EIP = pop_32();
60
  if (return_EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
61
  {
62
    BX_ERROR(("RETnear32_Iw: offset outside of CS limits"));
63
    exception(BX_GP_EXCEPTION, 0);
64
  }
65
  EIP = return_EIP;
66
 
67
  if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
68
    ESP += imm16;
69
  else
70
     SP += imm16;
71
 
72
  RSP_COMMIT;
73
 
74
  BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, PREV_RIP, EIP);
75
 
76
  BX_NEXT_TRACE(i);
77
}
78
 
79
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear32(bxInstruction_c *i)
80
{
81
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
82
 
83
#if BX_DEBUGGER
84
  BX_CPU_THIS_PTR show_flag |= Flag_ret;
85
#endif
86
 
87
  RSP_SPECULATIVE;
88
 
89
  Bit32u return_EIP = pop_32();
90
  if (return_EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
91
  {
92
    BX_ERROR(("RETnear32: offset outside of CS limits"));
93
    exception(BX_GP_EXCEPTION, 0);
94
  }
95
  EIP = return_EIP;
96
 
97
  RSP_COMMIT;
98
 
99
  BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, PREV_RIP, EIP);
100
 
101
  BX_NEXT_TRACE(i);
102
}
103
 
104
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar32_Iw(bxInstruction_c *i)
105
{
106
  invalidate_prefetch_q();
107
 
108
#if BX_DEBUGGER
109
  BX_CPU_THIS_PTR show_flag |= Flag_ret;
110
#endif
111
 
112
  Bit16u imm16 = i->Iw();
113
  Bit16u cs_raw;
114
  Bit32u eip;
115
 
116
  if (protected_mode()) {
117
    return_protected(i, imm16);
118
    goto done;
119
  }
120
 
121
  RSP_SPECULATIVE;
122
 
123
  eip    =          pop_32();
124
  cs_raw = (Bit16u) pop_32(); /* 32bit pop, MSW discarded */
125
 
126
  // CS.LIMIT can't change when in real/v8086 mode
127
  if (eip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
128
    BX_ERROR(("RETfar32_Iw: instruction pointer not within code segment limits"));
129
    exception(BX_GP_EXCEPTION, 0);
130
  }
131
 
132
  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
133
  EIP = eip;
134
 
135
  if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
136
    ESP += imm16;
137
  else
138
     SP += imm16;
139
 
140
  RSP_COMMIT;
141
 
142
done:
143
 
144
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
145
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
146
 
147
  BX_NEXT_TRACE(i);
148
}
149
 
150
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_Jd(bxInstruction_c *i)
151
{
152
#if BX_DEBUGGER
153
  BX_CPU_THIS_PTR show_flag |= Flag_call;
154
#endif
155
 
156
  Bit32u new_EIP = EIP + i->Id();
157
 
158
  RSP_SPECULATIVE;
159
 
160
  /* push 32 bit EA of next instruction */
161
  push_32(EIP);
162
 
163
  branch_near32(new_EIP);
164
 
165
  RSP_COMMIT;
166
 
167
  BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL, PREV_RIP, EIP);
168
 
169
  BX_NEXT_TRACE(i);
170
}
171
 
172
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL32_Ap(bxInstruction_c *i)
173
{
174
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
175
 
176
  Bit16u cs_raw;
177
  Bit32u disp32;
178
 
179
  invalidate_prefetch_q();
180
 
181
#if BX_DEBUGGER
182
  BX_CPU_THIS_PTR show_flag |= Flag_call;
183
#endif
184
 
185
  disp32 = i->Id();
186
  cs_raw = i->Iw2();
187
 
188
  RSP_SPECULATIVE;
189
 
190
  if (protected_mode()) {
191
    call_protected(i, cs_raw, disp32);
192
    goto done;
193
  }
194
 
195
  // CS.LIMIT can't change when in real/v8086 mode
196
  if (disp32 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
197
    BX_ERROR(("CALL32_Ap: instruction pointer not within code segment limits"));
198
    exception(BX_GP_EXCEPTION, 0);
199
  }
200
 
201
  push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
202
  push_32(EIP);
203
 
204
  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
205
  EIP = disp32;
206
 
207
done:
208
  RSP_COMMIT;
209
 
210
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL,
211
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
212
 
213
  BX_NEXT_TRACE(i);
214
}
215
 
216
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_EdR(bxInstruction_c *i)
217
{
218
#if BX_DEBUGGER
219
  BX_CPU_THIS_PTR show_flag |= Flag_call;
220
#endif
221
 
222
  Bit32u new_EIP = BX_READ_32BIT_REG(i->dst());
223
 
224
  RSP_SPECULATIVE;
225
 
226
  /* push 32 bit EA of next instruction */
227
  push_32(EIP);
228
 
229
  branch_near32(new_EIP);
230
 
231
  RSP_COMMIT;
232
 
233
  BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL_INDIRECT, PREV_RIP, EIP);
234
 
235
  BX_NEXT_TRACE(i);
236
}
237
 
238
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL32_Ep(bxInstruction_c *i)
239
{
240
  Bit16u cs_raw;
241
  Bit32u op1_32;
242
 
243
  invalidate_prefetch_q();
244
 
245
#if BX_DEBUGGER
246
  BX_CPU_THIS_PTR show_flag |= Flag_call;
247
#endif
248
 
249
  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
250
 
251
  /* pointer, segment address pair */
252
  op1_32 = read_virtual_dword(i->seg(), eaddr);
253
  cs_raw = read_virtual_word (i->seg(), (eaddr+4) & i->asize_mask());
254
 
255
  RSP_SPECULATIVE;
256
 
257
  if (protected_mode()) {
258
    call_protected(i, cs_raw, op1_32);
259
    goto done;
260
  }
261
 
262
  // CS.LIMIT can't change when in real/v8086 mode
263
  if (op1_32 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
264
    BX_ERROR(("CALL32_Ep: instruction pointer not within code segment limits"));
265
    exception(BX_GP_EXCEPTION, 0);
266
  }
267
 
268
  push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
269
  push_32(EIP);
270
 
271
  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
272
  EIP = op1_32;
273
 
274
done:
275
  RSP_COMMIT;
276
 
277
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL_INDIRECT,
278
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
279
 
280
  BX_NEXT_TRACE(i);
281
}
282
 
283
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Jd(bxInstruction_c *i)
284
{
285
  Bit32u new_EIP = EIP + (Bit32s) i->Id();
286
  branch_near32(new_EIP);
287
  BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, PREV_RIP, new_EIP);
288
 
289
  BX_LINK_TRACE(i);
290
}
291
 
292
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JO_Jd(bxInstruction_c *i)
293
{
294
  if (get_OF()) {
295
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
296
    branch_near32(new_EIP);
297
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
298
    BX_LINK_TRACE(i);
299
  }
300
 
301
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
302
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
303
}
304
 
305
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNO_Jd(bxInstruction_c *i)
306
{
307
  if (! get_OF()) {
308
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
309
    branch_near32(new_EIP);
310
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
311
    BX_LINK_TRACE(i);
312
  }
313
 
314
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
315
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
316
}
317
 
318
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JB_Jd(bxInstruction_c *i)
319
{
320
  if (get_CF()) {
321
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
322
    branch_near32(new_EIP);
323
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
324
    BX_LINK_TRACE(i);
325
  }
326
 
327
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
328
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
329
}
330
 
331
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNB_Jd(bxInstruction_c *i)
332
{
333
  if (! get_CF()) {
334
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
335
    branch_near32(new_EIP);
336
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
337
    BX_LINK_TRACE(i);
338
  }
339
 
340
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
341
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
342
}
343
 
344
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JZ_Jd(bxInstruction_c *i)
345
{
346
  if (get_ZF()) {
347
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
348
    branch_near32(new_EIP);
349
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
350
    BX_LINK_TRACE(i);
351
  }
352
 
353
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
354
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
355
}
356
 
357
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNZ_Jd(bxInstruction_c *i)
358
{
359
  if (! get_ZF()) {
360
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
361
    branch_near32(new_EIP);
362
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
363
    BX_LINK_TRACE(i);
364
  }
365
 
366
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
367
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
368
}
369
 
370
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JBE_Jd(bxInstruction_c *i)
371
{
372
  if (get_CF() || get_ZF()) {
373
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
374
    branch_near32(new_EIP);
375
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
376
    BX_LINK_TRACE(i);
377
  }
378
 
379
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
380
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
381
}
382
 
383
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNBE_Jd(bxInstruction_c *i)
384
{
385
  if (! (get_CF() || get_ZF())) {
386
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
387
    branch_near32(new_EIP);
388
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
389
    BX_LINK_TRACE(i);
390
  }
391
 
392
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
393
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
394
}
395
 
396
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JS_Jd(bxInstruction_c *i)
397
{
398
  if (get_SF()) {
399
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
400
    branch_near32(new_EIP);
401
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
402
    BX_LINK_TRACE(i);
403
  }
404
 
405
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
406
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
407
}
408
 
409
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNS_Jd(bxInstruction_c *i)
410
{
411
  if (! get_SF()) {
412
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
413
    branch_near32(new_EIP);
414
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
415
    BX_LINK_TRACE(i);
416
  }
417
 
418
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
419
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
420
}
421
 
422
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JP_Jd(bxInstruction_c *i)
423
{
424
  if (get_PF()) {
425
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
426
    branch_near32(new_EIP);
427
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
428
    BX_LINK_TRACE(i);
429
  }
430
 
431
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
432
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
433
}
434
 
435
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNP_Jd(bxInstruction_c *i)
436
{
437
  if (! get_PF()) {
438
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
439
    branch_near32(new_EIP);
440
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
441
    BX_LINK_TRACE(i);
442
  }
443
 
444
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
445
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
446
}
447
 
448
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JL_Jd(bxInstruction_c *i)
449
{
450
  if (getB_SF() != getB_OF()) {
451
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
452
    branch_near32(new_EIP);
453
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
454
    BX_LINK_TRACE(i);
455
  }
456
 
457
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
458
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
459
}
460
 
461
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNL_Jd(bxInstruction_c *i)
462
{
463
  if (getB_SF() == getB_OF()) {
464
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
465
    branch_near32(new_EIP);
466
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
467
    BX_LINK_TRACE(i);
468
  }
469
 
470
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
471
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
472
}
473
 
474
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JLE_Jd(bxInstruction_c *i)
475
{
476
  if (get_ZF() || (getB_SF() != getB_OF())) {
477
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
478
    branch_near32(new_EIP);
479
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
480
    BX_LINK_TRACE(i);
481
  }
482
 
483
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
484
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
485
}
486
 
487
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNLE_Jd(bxInstruction_c *i)
488
{
489
  if (! get_ZF() && (getB_SF() == getB_OF())) {
490
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
491
    branch_near32(new_EIP);
492
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
493
    BX_LINK_TRACE(i);
494
  }
495
 
496
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
497
  BX_NEXT_INSTR(i); // trace can continue over non-taken branch
498
}
499
 
500
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Ap(bxInstruction_c *i)
501
{
502
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
503
 
504
  Bit32u disp32;
505
  Bit16u cs_raw;
506
 
507
  invalidate_prefetch_q();
508
 
509
  if (i->os32L()) {
510
    disp32 = i->Id();
511
  }
512
  else {
513
    disp32 = i->Iw();
514
  }
515
  cs_raw = i->Iw2();
516
 
517
  // jump_protected doesn't affect ESP so it is ESP safe
518
  if (protected_mode()) {
519
    jump_protected(i, cs_raw, disp32);
520
    goto done;
521
  }
522
 
523
  // CS.LIMIT can't change when in real/v8086 mode
524
  if (disp32 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
525
    BX_ERROR(("JMP_Ap: instruction pointer not within code segment limits"));
526
    exception(BX_GP_EXCEPTION, 0);
527
  }
528
 
529
  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
530
  EIP = disp32;
531
 
532
done:
533
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP,
534
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
535
 
536
  BX_NEXT_TRACE(i);
537
}
538
 
539
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_EdR(bxInstruction_c *i)
540
{
541
  Bit32u new_EIP = BX_READ_32BIT_REG(i->dst());
542
  branch_near32(new_EIP);
543
  BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP_INDIRECT, PREV_RIP, new_EIP);
544
 
545
  BX_NEXT_TRACE(i);
546
}
547
 
548
/* Far indirect jump */
549
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP32_Ep(bxInstruction_c *i)
550
{
551
  Bit16u cs_raw;
552
  Bit32u op1_32;
553
 
554
  invalidate_prefetch_q();
555
 
556
  bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
557
 
558
  /* pointer, segment address pair */
559
  op1_32 = read_virtual_dword(i->seg(), eaddr);
560
  cs_raw = read_virtual_word (i->seg(), (eaddr+4) & i->asize_mask());
561
 
562
  // jump_protected doesn't affect RSP so it is RSP safe
563
  if (protected_mode()) {
564
    jump_protected(i, cs_raw, op1_32);
565
    goto done;
566
  }
567
 
568
  // CS.LIMIT can't change when in real/v8086 mode
569
  if (op1_32 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
570
    BX_ERROR(("JMP32_Ep: instruction pointer not within code segment limits"));
571
    exception(BX_GP_EXCEPTION, 0);
572
  }
573
 
574
  load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
575
  EIP = op1_32;
576
 
577
done:
578
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP_INDIRECT,
579
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
580
 
581
  BX_NEXT_TRACE(i);
582
}
583
 
584
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET32(bxInstruction_c *i)
585
{
586
  BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
587
 
588
  invalidate_prefetch_q();
589
 
590
#if BX_SUPPORT_SVM
591
  if (BX_CPU_THIS_PTR in_svm_guest) {
592
    if (SVM_INTERCEPT(SVM_INTERCEPT0_IRET)) Svm_Vmexit(SVM_VMEXIT_IRET);
593
  }
594
#endif
595
 
596
#if BX_SUPPORT_VMX
597
  if (BX_CPU_THIS_PTR in_vmx_guest)
598
    if (is_masked_event(PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) ? BX_EVENT_VMX_VIRTUAL_NMI : BX_EVENT_NMI))
599
      BX_CPU_THIS_PTR nmi_unblocking_iret = 1;
600
 
601
  if (BX_CPU_THIS_PTR in_vmx_guest && PIN_VMEXIT(VMX_VM_EXEC_CTRL1_NMI_EXITING)) {
602
    if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)) unmask_event(BX_EVENT_VMX_VIRTUAL_NMI);
603
  }
604
  else
605
#endif
606
    unmask_event(BX_EVENT_NMI);
607
 
608
#if BX_DEBUGGER
609
  BX_CPU_THIS_PTR show_flag |= Flag_iret;
610
#endif
611
 
612
  if (protected_mode()) {
613
    iret_protected(i);
614
    goto done;
615
  }
616
 
617
  RSP_SPECULATIVE;
618
 
619
  if (v8086_mode()) {
620
    // IOPL check in stack_return_from_v86()
621
    iret32_stack_return_from_v86(i);
622
  }
623
  else {
624
    Bit32u eip      =          pop_32();
625
    Bit16u cs_raw   = (Bit16u) pop_32(); // #SS has higher priority
626
    Bit32u eflags32 =          pop_32();
627
 
628
    // CS.LIMIT can't change when in real/v8086 mode
629
    if (eip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
630
      BX_ERROR(("IRET32: instruction pointer not within code segment limits"));
631
      exception(BX_GP_EXCEPTION, 0);
632
    }
633
 
634
    load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
635
    EIP = eip;
636
    writeEFlags(eflags32, 0x00257fd5); // VIF, VIP, VM unchanged
637
  }
638
 
639
  RSP_COMMIT;
640
 
641
done:
642
 
643
#if BX_SUPPORT_VMX
644
  BX_CPU_THIS_PTR nmi_unblocking_iret = 0;
645
#endif
646
 
647
  BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_IRET,
648
                      BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
649
 
650
  BX_NEXT_TRACE(i);
651
}
652
 
653
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JECXZ_Jb(bxInstruction_c *i)
654
{
655
  // it is impossible to get this instruction in long mode
656
  BX_ASSERT(i->as64L() == 0);
657
 
658
  Bit32u temp_ECX;
659
 
660
  if (i->as32L())
661
    temp_ECX = ECX;
662
  else
663
    temp_ECX = CX;
664
 
665
  if (temp_ECX == 0) {
666
    Bit32u new_EIP = EIP + (Bit32s) i->Id();
667
    branch_near32(new_EIP);
668
    BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
669
    BX_LINK_TRACE(i);
670
  }
671
 
672
  BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
673
  BX_NEXT_TRACE(i);
674
}
675
 
676
//
677
// There is some weirdness in LOOP instructions definition. If an exception
678
// was generated during the instruction execution (for example #GP fault
679
// because EIP was beyond CS segment limits) CPU state should restore the
680
// state prior to instruction execution.
681
//
682
// The final point that we are not allowed to decrement ECX register before
683
// it is known that no exceptions can happen.
684
//
685
 
686
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPNE32_Jb(bxInstruction_c *i)
687
{
688
  // it is impossible to get this instruction in long mode
689
  BX_ASSERT(i->as64L() == 0);
690
 
691
  if (i->as32L()) {
692
    Bit32u count = ECX;
693
 
694
    count--;
695
    if (count != 0 && (get_ZF()==0)) {
696
      Bit32u new_EIP = EIP + (Bit32s) i->Id();
697
      branch_near32(new_EIP);
698
      BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
699
    }
700
#if BX_INSTRUMENTATION
701
    else {
702
      BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
703
    }
704
#endif
705
 
706
    ECX = count;
707
  }
708
  else {
709
    Bit16u count = CX;
710
 
711
    count--;
712
    if (count != 0 && (get_ZF()==0)) {
713
      Bit32u new_EIP = EIP + (Bit32s) i->Id();
714
      branch_near32(new_EIP);
715
      BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
716
    }
717
#if BX_INSTRUMENTATION
718
    else {
719
      BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
720
    }
721
#endif
722
 
723
    CX = count;
724
  }
725
 
726
  BX_NEXT_TRACE(i);
727
}
728
 
729
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPE32_Jb(bxInstruction_c *i)
730
{
731
  // it is impossible to get this instruction in long mode
732
  BX_ASSERT(i->as64L() == 0);
733
 
734
  if (i->as32L()) {
735
    Bit32u count = ECX;
736
 
737
    count--;
738
    if (count != 0 && get_ZF()) {
739
      Bit32u new_EIP = EIP + (Bit32s) i->Id();
740
      branch_near32(new_EIP);
741
      BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
742
    }
743
#if BX_INSTRUMENTATION
744
    else {
745
      BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
746
    }
747
#endif
748
 
749
    ECX = count;
750
  }
751
  else {
752
    Bit16u count = CX;
753
 
754
    count--;
755
    if (count != 0 && get_ZF()) {
756
      Bit32u new_EIP = EIP + (Bit32s) i->Id();
757
      branch_near32(new_EIP);
758
      BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
759
    }
760
#if BX_INSTRUMENTATION
761
    else {
762
      BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
763
    }
764
#endif
765
 
766
    CX = count;
767
  }
768
 
769
  BX_NEXT_TRACE(i);
770
}
771
 
772
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOP32_Jb(bxInstruction_c *i)
773
{
774
  // it is impossible to get this instruction in long mode
775
  BX_ASSERT(i->as64L() == 0);
776
 
777
  if (i->as32L()) {
778
    Bit32u count = ECX;
779
 
780
    count--;
781
    if (count != 0) {
782
      Bit32u new_EIP = EIP + (Bit32s) i->Id();
783
      branch_near32(new_EIP);
784
      BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
785
    }
786
#if BX_INSTRUMENTATION
787
    else {
788
      BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
789
    }
790
#endif
791
 
792
    ECX = count;
793
  }
794
  else {
795
    Bit16u count = CX;
796
 
797
    count--;
798
    if (count != 0) {
799
      Bit32u new_EIP = EIP + (Bit32s) i->Id();
800
      branch_near32(new_EIP);
801
      BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_EIP);
802
    }
803
#if BX_INSTRUMENTATION
804
    else {
805
      BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
806
    }
807
#endif
808
 
809
    CX = count;
810
  }
811
 
812
  BX_NEXT_TRACE(i);
813
}
814
 
815
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.