1 |
2 |
alfik |
/////////////////////////////////////////////////////////////////////////
|
2 |
|
|
// $Id: icache.cc 11402 2012-09-04 15:45:05Z sshwarts $
|
3 |
|
|
/////////////////////////////////////////////////////////////////////////
|
4 |
|
|
//
|
5 |
|
|
// Copyright (c) 2007-2011 Stanislav Shwartsman
|
6 |
|
|
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
|
7 |
|
|
//
|
8 |
|
|
// This library is free software; you can redistribute it and/or
|
9 |
|
|
// modify it under the terms of the GNU Lesser General Public
|
10 |
|
|
// License as published by the Free Software Foundation; either
|
11 |
|
|
// version 2 of the License, or (at your option) any later version.
|
12 |
|
|
//
|
13 |
|
|
// This library is distributed in the hope that it will be useful,
|
14 |
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
16 |
|
|
// Lesser General Public License for more details.
|
17 |
|
|
//
|
18 |
|
|
// You should have received a copy of the GNU Lesser General Public
|
19 |
|
|
// License along with this library; if not, write to the Free Software
|
20 |
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
21 |
|
|
//
|
22 |
|
|
/////////////////////////////////////////////////////////////////////////
|
23 |
|
|
|
24 |
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
25 |
|
|
#include "bochs.h"
|
26 |
|
|
#include "cpu.h"
|
27 |
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
28 |
|
|
|
29 |
|
|
#include "param_names.h"
|
30 |
|
|
|
31 |
|
|
bxPageWriteStampTable pageWriteStampTable;
|
32 |
|
|
|
33 |
|
|
void flushICaches(void)
|
34 |
|
|
{
|
35 |
|
|
for (unsigned i=0; i<BX_SMP_PROCESSORS; i++) {
|
36 |
|
|
BX_CPU(i)->iCache.flushICacheEntries();
|
37 |
|
|
BX_CPU(i)->async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
38 |
|
|
}
|
39 |
|
|
|
40 |
|
|
pageWriteStampTable.resetWriteStamps();
|
41 |
|
|
}
|
42 |
|
|
|
43 |
|
|
void handleSMC(bx_phy_address pAddr, Bit32u mask)
|
44 |
|
|
{
|
45 |
|
|
for (unsigned i=0; i<BX_SMP_PROCESSORS; i++) {
|
46 |
|
|
BX_CPU(i)->async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
47 |
|
|
BX_CPU(i)->iCache.handleSMC(pAddr, mask);
|
48 |
|
|
}
|
49 |
|
|
}
|
50 |
|
|
|
51 |
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
52 |
|
|
|
53 |
|
|
BX_INSF_TYPE BX_CPU_C::BxEndTrace(bxInstruction_c *i)
|
54 |
|
|
{
|
55 |
|
|
// do nothing, return to main cpu_loop
|
56 |
|
|
}
|
57 |
|
|
|
58 |
|
|
void genDummyICacheEntry(bxInstruction_c *i)
|
59 |
|
|
{
|
60 |
|
|
i->setILen(0);
|
61 |
|
|
i->setIaOpcode(BX_INSERTED_OPCODE);
|
62 |
|
|
i->execute1 = &BX_CPU_C::BxEndTrace;
|
63 |
|
|
}
|
64 |
|
|
|
65 |
|
|
#endif
|
66 |
|
|
|
67 |
|
|
bxICacheEntry_c* BX_CPU_C::serveICacheMiss(bxICacheEntry_c *entry, Bit32u eipBiased, bx_phy_address pAddr)
|
68 |
|
|
{
|
69 |
|
|
entry = BX_CPU_THIS_PTR iCache.get_entry(pAddr, BX_CPU_THIS_PTR fetchModeMask);
|
70 |
|
|
|
71 |
|
|
BX_CPU_THIS_PTR iCache.victim_entry(entry, BX_CPU_THIS_PTR fetchModeMask);
|
72 |
|
|
|
73 |
|
|
BX_CPU_THIS_PTR iCache.alloc_trace(entry);
|
74 |
|
|
|
75 |
|
|
// Cache miss. We weren't so lucky, but let's be optimistic - try to build
|
76 |
|
|
// trace from incoming instruction bytes stream !
|
77 |
|
|
entry->pAddr = pAddr;
|
78 |
|
|
entry->traceMask = 0;
|
79 |
|
|
|
80 |
|
|
unsigned remainingInPage = BX_CPU_THIS_PTR eipPageWindowSize - eipBiased;
|
81 |
|
|
const Bit8u *fetchPtr = BX_CPU_THIS_PTR eipFetchPtr + eipBiased;
|
82 |
|
|
int ret;
|
83 |
|
|
|
84 |
|
|
bxInstruction_c *i = entry->i;
|
85 |
|
|
|
86 |
|
|
Bit32u pageOffset = PAGE_OFFSET((Bit32u) pAddr);
|
87 |
|
|
Bit32u traceMask = 0;
|
88 |
|
|
|
89 |
|
|
// Don't allow traces longer than cpu_loop can execute
|
90 |
|
|
static unsigned quantum =
|
91 |
|
|
#if BX_SUPPORT_SMP
|
92 |
|
|
(BX_SMP_PROCESSORS > 1) ? SIM->get_param_num(BXPN_SMP_QUANTUM)->get() :
|
93 |
|
|
#endif
|
94 |
|
|
BX_MAX_TRACE_LENGTH;
|
95 |
|
|
|
96 |
|
|
for (unsigned n=0;n < quantum;n++)
|
97 |
|
|
{
|
98 |
|
|
#if BX_SUPPORT_X86_64
|
99 |
|
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
|
100 |
|
|
ret = fetchDecode64(fetchPtr, i, remainingInPage);
|
101 |
|
|
else
|
102 |
|
|
#endif
|
103 |
|
|
ret = fetchDecode32(fetchPtr, i, remainingInPage);
|
104 |
|
|
|
105 |
|
|
if (ret < 0) {
|
106 |
|
|
// Fetching instruction on segment/page boundary
|
107 |
|
|
if (n > 0) {
|
108 |
|
|
// The trace is already valid, it has several instructions inside,
|
109 |
|
|
// in this case just drop the boundary instruction and stop
|
110 |
|
|
// tracing.
|
111 |
|
|
break;
|
112 |
|
|
}
|
113 |
|
|
// First instruction is boundary fetch, leave the trace cache entry
|
114 |
|
|
// invalid for now because boundaryFetch() can fault
|
115 |
|
|
entry->pAddr = ~entry->pAddr;
|
116 |
|
|
entry->tlen = 1;
|
117 |
|
|
boundaryFetch(fetchPtr, remainingInPage, i);
|
118 |
|
|
|
119 |
|
|
// Add the instruction to trace cache
|
120 |
|
|
entry->pAddr = ~entry->pAddr;
|
121 |
|
|
entry->traceMask = 0x80000000; /* last line in page */
|
122 |
|
|
pageWriteStampTable.markICacheMask(entry->pAddr, entry->traceMask);
|
123 |
|
|
pageWriteStampTable.markICacheMask(BX_CPU_THIS_PTR pAddrFetchPage, 0x1);
|
124 |
|
|
|
125 |
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
126 |
|
|
entry->tlen++; /* Add the inserted end of trace opcode */
|
127 |
|
|
genDummyICacheEntry(++i);
|
128 |
|
|
#endif
|
129 |
|
|
|
130 |
|
|
BX_CPU_THIS_PTR iCache.commit_page_split_trace(BX_CPU_THIS_PTR pAddrFetchPage, entry);
|
131 |
|
|
return entry;
|
132 |
|
|
}
|
133 |
|
|
|
134 |
|
|
// add instruction to the trace
|
135 |
|
|
unsigned iLen = i->ilen();
|
136 |
|
|
entry->tlen++;
|
137 |
|
|
|
138 |
|
|
#ifdef BX_INSTR_STORE_OPCODE_BYTES
|
139 |
|
|
i->set_opcode_bytes(fetchPtr);
|
140 |
|
|
#endif
|
141 |
|
|
BX_INSTR_OPCODE(BX_CPU_ID, i, fetchPtr, iLen,
|
142 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, long64_mode());
|
143 |
|
|
|
144 |
|
|
i++;
|
145 |
|
|
|
146 |
|
|
traceMask |= 1 << (pageOffset >> 7);
|
147 |
|
|
traceMask |= 1 << ((pageOffset + iLen - 1) >> 7);
|
148 |
|
|
|
149 |
|
|
// continue to the next instruction
|
150 |
|
|
remainingInPage -= iLen;
|
151 |
|
|
if (ret != 0 /* stop trace indication */ || remainingInPage == 0) break;
|
152 |
|
|
pAddr += iLen;
|
153 |
|
|
pageOffset += iLen;
|
154 |
|
|
fetchPtr += iLen;
|
155 |
|
|
|
156 |
|
|
// try to find a trace starting from current pAddr and merge
|
157 |
|
|
if (remainingInPage >= 15) { // avoid merging with page split trace
|
158 |
|
|
if (mergeTraces(entry, i, pAddr)) {
|
159 |
|
|
entry->traceMask |= traceMask;
|
160 |
|
|
pageWriteStampTable.markICacheMask(pAddr, entry->traceMask);
|
161 |
|
|
BX_CPU_THIS_PTR iCache.commit_trace(entry->tlen);
|
162 |
|
|
return entry;
|
163 |
|
|
}
|
164 |
|
|
}
|
165 |
|
|
}
|
166 |
|
|
|
167 |
|
|
//BX_INFO(("commit trace %08x len=%d mask %08x", (Bit32u) entry->pAddr, entry->tlen, pageWriteStampTable.getFineGranularityMapping(entry->pAddr)));
|
168 |
|
|
|
169 |
|
|
entry->traceMask |= traceMask;
|
170 |
|
|
|
171 |
|
|
pageWriteStampTable.markICacheMask(pAddr, entry->traceMask);
|
172 |
|
|
|
173 |
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
174 |
|
|
entry->tlen++; /* Add the inserted end of trace opcode */
|
175 |
|
|
genDummyICacheEntry(i);
|
176 |
|
|
#endif
|
177 |
|
|
|
178 |
|
|
BX_CPU_THIS_PTR iCache.commit_trace(entry->tlen);
|
179 |
|
|
|
180 |
|
|
return entry;
|
181 |
|
|
}
|
182 |
|
|
|
183 |
|
|
bx_bool BX_CPU_C::mergeTraces(bxICacheEntry_c *entry, bxInstruction_c *i, bx_phy_address pAddr)
|
184 |
|
|
{
|
185 |
|
|
bxICacheEntry_c *e = BX_CPU_THIS_PTR iCache.find_entry(pAddr, BX_CPU_THIS_PTR fetchModeMask);
|
186 |
|
|
|
187 |
|
|
if (e != NULL)
|
188 |
|
|
{
|
189 |
|
|
// determine max amount of instruction to take from another entry
|
190 |
|
|
unsigned max_length = e->tlen;
|
191 |
|
|
|
192 |
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
193 |
|
|
if (max_length + entry->tlen > BX_MAX_TRACE_LENGTH)
|
194 |
|
|
return 0;
|
195 |
|
|
#else
|
196 |
|
|
if (max_length + entry->tlen > BX_MAX_TRACE_LENGTH)
|
197 |
|
|
max_length = BX_MAX_TRACE_LENGTH - entry->tlen;
|
198 |
|
|
if(max_length == 0) return 0;
|
199 |
|
|
#endif
|
200 |
|
|
|
201 |
|
|
memcpy(i, e->i, sizeof(bxInstruction_c)*max_length);
|
202 |
|
|
entry->tlen += max_length;
|
203 |
|
|
BX_ASSERT(entry->tlen <= BX_MAX_TRACE_LENGTH);
|
204 |
|
|
|
205 |
|
|
entry->traceMask |= e->traceMask;
|
206 |
|
|
|
207 |
|
|
return 1;
|
208 |
|
|
}
|
209 |
|
|
|
210 |
|
|
return 0;
|
211 |
|
|
}
|
212 |
|
|
|
213 |
|
|
void BX_CPU_C::boundaryFetch(const Bit8u *fetchPtr, unsigned remainingInPage, bxInstruction_c *i)
|
214 |
|
|
{
|
215 |
|
|
unsigned j, k;
|
216 |
|
|
Bit8u fetchBuffer[32];
|
217 |
|
|
int ret;
|
218 |
|
|
|
219 |
|
|
if (remainingInPage >= 15) {
|
220 |
|
|
BX_ERROR(("boundaryFetch #GP(0): too many instruction prefixes"));
|
221 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
222 |
|
|
}
|
223 |
|
|
|
224 |
|
|
// Read all leftover bytes in current page up to boundary.
|
225 |
|
|
for (j=0; j<remainingInPage; j++) {
|
226 |
|
|
fetchBuffer[j] = *fetchPtr++;
|
227 |
|
|
}
|
228 |
|
|
|
229 |
|
|
// The 2nd chunk of the instruction is on the next page.
|
230 |
|
|
// Set RIP to the 0th byte of the 2nd page, and force a
|
231 |
|
|
// prefetch so direct access of that physical page is possible, and
|
232 |
|
|
// all the associated info is updated.
|
233 |
|
|
RIP += remainingInPage;
|
234 |
|
|
prefetch();
|
235 |
|
|
|
236 |
|
|
unsigned fetchBufferLimit = 15;
|
237 |
|
|
if (BX_CPU_THIS_PTR eipPageWindowSize < 15) {
|
238 |
|
|
BX_DEBUG(("boundaryFetch: small window size after prefetch=%d bytes, remainingInPage=%d bytes", BX_CPU_THIS_PTR eipPageWindowSize, remainingInPage));
|
239 |
|
|
fetchBufferLimit = BX_CPU_THIS_PTR eipPageWindowSize;
|
240 |
|
|
}
|
241 |
|
|
|
242 |
|
|
// We can fetch straight from the 0th byte, which is eipFetchPtr;
|
243 |
|
|
fetchPtr = BX_CPU_THIS_PTR eipFetchPtr;
|
244 |
|
|
|
245 |
|
|
// read leftover bytes in next page
|
246 |
|
|
for (k=0; k<fetchBufferLimit; k++, j++) {
|
247 |
|
|
fetchBuffer[j] = *fetchPtr++;
|
248 |
|
|
}
|
249 |
|
|
|
250 |
|
|
#if BX_SUPPORT_X86_64
|
251 |
|
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64)
|
252 |
|
|
ret = fetchDecode64(fetchBuffer, i, remainingInPage+fetchBufferLimit);
|
253 |
|
|
else
|
254 |
|
|
#endif
|
255 |
|
|
ret = fetchDecode32(fetchBuffer, i, remainingInPage+fetchBufferLimit);
|
256 |
|
|
|
257 |
|
|
if (ret < 0) {
|
258 |
|
|
BX_INFO(("boundaryFetch #GP(0): failed to complete instruction decoding"));
|
259 |
|
|
exception(BX_GP_EXCEPTION, 0);
|
260 |
|
|
}
|
261 |
|
|
|
262 |
|
|
// Restore EIP since we fudged it to start at the 2nd page boundary.
|
263 |
|
|
RIP = BX_CPU_THIS_PTR prev_rip;
|
264 |
|
|
|
265 |
|
|
// Since we cross an instruction boundary, note that we need a prefetch()
|
266 |
|
|
// again on the next instruction. Perhaps we can optimize this to
|
267 |
|
|
// eliminate the extra prefetch() since we do it above, but have to
|
268 |
|
|
// think about repeated instructions, etc.
|
269 |
|
|
// invalidate_prefetch_q();
|
270 |
|
|
|
271 |
|
|
#ifdef BX_INSTR_STORE_OPCODE_BYTES
|
272 |
|
|
i->set_opcode_bytes(fetchBuffer);
|
273 |
|
|
#endif
|
274 |
|
|
|
275 |
|
|
BX_INSTR_OPCODE(BX_CPU_ID, i, fetchBuffer, i->ilen(),
|
276 |
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b, long64_mode());
|
277 |
|
|
}
|