1 |
2 |
alfik |
/////////////////////////////////////////////////////////////////////////
|
2 |
|
|
// $Id: icache.h 11471 2012-10-01 18:19:09Z sshwarts $
|
3 |
|
|
/////////////////////////////////////////////////////////////////////////
|
4 |
|
|
//
|
5 |
|
|
// Copyright (c) 2007-2011 Stanislav Shwartsman
|
6 |
|
|
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
|
7 |
|
|
//
|
8 |
|
|
// This library is free software; you can redistribute it and/or
|
9 |
|
|
// modify it under the terms of the GNU Lesser General Public
|
10 |
|
|
// License as published by the Free Software Foundation; either
|
11 |
|
|
// version 2 of the License, or (at your option) any later version.
|
12 |
|
|
//
|
13 |
|
|
// This library is distributed in the hope that it will be useful,
|
14 |
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
16 |
|
|
// Lesser General Public License for more details.
|
17 |
|
|
//
|
18 |
|
|
// You should have received a copy of the GNU Lesser General Public
|
19 |
|
|
// License along with this library; if not, write to the Free Software
|
20 |
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
21 |
|
|
//
|
22 |
|
|
/////////////////////////////////////////////////////////////////////////
|
23 |
|
|
|
24 |
|
|
#ifndef BX_ICACHE_H
|
25 |
|
|
#define BX_ICACHE_H
|
26 |
|
|
|
27 |
|
|
extern void handleSMC(bx_phy_address pAddr, Bit32u mask);
|
28 |
|
|
|
29 |
|
|
class bxPageWriteStampTable
|
30 |
|
|
{
|
31 |
|
|
#define PHY_MEM_PAGES (1024*1024)
|
32 |
|
|
Bit32u *fineGranularityMapping;
|
33 |
|
|
|
34 |
|
|
public:
|
35 |
|
|
bxPageWriteStampTable() {
|
36 |
|
|
fineGranularityMapping = new Bit32u[PHY_MEM_PAGES];
|
37 |
|
|
resetWriteStamps();
|
38 |
|
|
}
|
39 |
|
|
~bxPageWriteStampTable() { delete [] fineGranularityMapping; }
|
40 |
|
|
|
41 |
|
|
BX_CPP_INLINE static Bit32u hash(bx_phy_address pAddr) {
|
42 |
|
|
// can share writeStamps between multiple pages if >32 bit phy address
|
43 |
|
|
return ((Bit32u) pAddr) >> 12;
|
44 |
|
|
}
|
45 |
|
|
|
46 |
|
|
BX_CPP_INLINE Bit32u getFineGranularityMapping(bx_phy_address pAddr) const
|
47 |
|
|
{
|
48 |
|
|
return fineGranularityMapping[hash(pAddr)];
|
49 |
|
|
}
|
50 |
|
|
|
51 |
|
|
BX_CPP_INLINE void markICache(bx_phy_address pAddr, unsigned len)
|
52 |
|
|
{
|
53 |
|
|
Bit32u mask = 1 << (PAGE_OFFSET((Bit32u) pAddr) >> 7);
|
54 |
|
|
mask |= 1 << (PAGE_OFFSET((Bit32u) pAddr + len - 1) >> 7);
|
55 |
|
|
|
56 |
|
|
fineGranularityMapping[hash(pAddr)] |= mask;
|
57 |
|
|
}
|
58 |
|
|
|
59 |
|
|
BX_CPP_INLINE void markICacheMask(bx_phy_address pAddr, Bit32u mask)
|
60 |
|
|
{
|
61 |
|
|
fineGranularityMapping[hash(pAddr)] |= mask;
|
62 |
|
|
}
|
63 |
|
|
|
64 |
|
|
// whole page is being altered
|
65 |
|
|
BX_CPP_INLINE void decWriteStamp(bx_phy_address pAddr)
|
66 |
|
|
{
|
67 |
|
|
Bit32u index = hash(pAddr);
|
68 |
|
|
|
69 |
|
|
if (fineGranularityMapping[index]) {
|
70 |
|
|
handleSMC(pAddr, 0xffffffff); // one of the CPUs might be running trace from this page
|
71 |
|
|
fineGranularityMapping[index] = 0;
|
72 |
|
|
}
|
73 |
|
|
}
|
74 |
|
|
|
75 |
|
|
// assumption: write does not split 4K page
|
76 |
|
|
BX_CPP_INLINE void decWriteStamp(bx_phy_address pAddr, unsigned len)
|
77 |
|
|
{
|
78 |
|
|
Bit32u index = hash(pAddr);
|
79 |
|
|
|
80 |
|
|
if (fineGranularityMapping[index]) {
|
81 |
|
|
Bit32u mask = 1 << (PAGE_OFFSET((Bit32u) pAddr) >> 7);
|
82 |
|
|
mask |= 1 << (PAGE_OFFSET((Bit32u) pAddr + len - 1) >> 7);
|
83 |
|
|
|
84 |
|
|
if (fineGranularityMapping[index] & mask) {
|
85 |
|
|
// one of the CPUs might be running trace from this page
|
86 |
|
|
handleSMC(pAddr, mask);
|
87 |
|
|
fineGranularityMapping[index] &= ~mask;
|
88 |
|
|
}
|
89 |
|
|
}
|
90 |
|
|
}
|
91 |
|
|
|
92 |
|
|
BX_CPP_INLINE void resetWriteStamps(void);
|
93 |
|
|
};
|
94 |
|
|
|
95 |
|
|
BX_CPP_INLINE void bxPageWriteStampTable::resetWriteStamps(void)
|
96 |
|
|
{
|
97 |
|
|
for (Bit32u i=0; i<PHY_MEM_PAGES; i++) {
|
98 |
|
|
fineGranularityMapping[i] = 0;
|
99 |
|
|
}
|
100 |
|
|
}
|
101 |
|
|
|
102 |
|
|
extern bxPageWriteStampTable pageWriteStampTable;
|
103 |
|
|
|
104 |
|
|
#define BxICacheEntries (256 * 1024) // Must be a power of 2.
|
105 |
|
|
#define BxICacheMemPool (576 * 1024)
|
106 |
|
|
|
107 |
|
|
//AO modif from 32 to 1
|
108 |
|
|
#define BX_MAX_TRACE_LENGTH 1
|
109 |
|
|
|
110 |
|
|
struct bxICacheEntry_c
|
111 |
|
|
{
|
112 |
|
|
bx_phy_address pAddr; // Physical address of the instruction
|
113 |
|
|
Bit32u traceMask;
|
114 |
|
|
|
115 |
|
|
Bit32u tlen; // Trace length in instructions
|
116 |
|
|
bxInstruction_c *i;
|
117 |
|
|
};
|
118 |
|
|
|
119 |
|
|
#define BX_ICACHE_INVALID_PHY_ADDRESS (bx_phy_address(-1))
|
120 |
|
|
|
121 |
|
|
BX_CPP_INLINE void flushSMC(bxICacheEntry_c *e)
|
122 |
|
|
{
|
123 |
|
|
if (e->pAddr != BX_ICACHE_INVALID_PHY_ADDRESS) {
|
124 |
|
|
e->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS;
|
125 |
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
126 |
|
|
extern void genDummyICacheEntry(bxInstruction_c *i);
|
127 |
|
|
for (unsigned instr=0;instr < e->tlen; instr++)
|
128 |
|
|
genDummyICacheEntry(e->i + instr);
|
129 |
|
|
#endif
|
130 |
|
|
}
|
131 |
|
|
}
|
132 |
|
|
|
133 |
|
|
class BOCHSAPI bxICache_c {
|
134 |
|
|
public:
|
135 |
|
|
bxICacheEntry_c entry[BxICacheEntries];
|
136 |
|
|
bxInstruction_c mpool[BxICacheMemPool];
|
137 |
|
|
unsigned mpindex;
|
138 |
|
|
|
139 |
|
|
#define BX_ICACHE_PAGE_SPLIT_ENTRIES 8 /* must be power of two */
|
140 |
|
|
struct pageSplitEntryIndex {
|
141 |
|
|
bx_phy_address ppf; // Physical address of 2nd page of the trace
|
142 |
|
|
bxICacheEntry_c *e; // Pointer to icache entry
|
143 |
|
|
} pageSplitIndex[BX_ICACHE_PAGE_SPLIT_ENTRIES];
|
144 |
|
|
int nextPageSplitIndex;
|
145 |
|
|
|
146 |
|
|
#define BX_ICACHE_VICTIM_ENTRIES 8 /* must be power of two */
|
147 |
|
|
struct bxVictimCacheEntry {
|
148 |
|
|
Bit32u fetchModeMask;
|
149 |
|
|
bxICacheEntry_c vc_entry;
|
150 |
|
|
} victimCache[BX_ICACHE_VICTIM_ENTRIES];
|
151 |
|
|
int nextVictimCacheIndex;
|
152 |
|
|
|
153 |
|
|
public:
|
154 |
|
|
bxICache_c() { flushICacheEntries(); }
|
155 |
|
|
|
156 |
|
|
BX_CPP_INLINE static unsigned hash(bx_phy_address pAddr, unsigned fetchModeMask)
|
157 |
|
|
{
|
158 |
|
|
// return ((pAddr + (pAddr << 2) + (pAddr>>6)) & (BxICacheEntries-1)) ^ fetchModeMask;
|
159 |
|
|
return ((pAddr) & (BxICacheEntries-1)) ^ fetchModeMask;
|
160 |
|
|
}
|
161 |
|
|
|
162 |
|
|
BX_CPP_INLINE void alloc_trace(bxICacheEntry_c *e)
|
163 |
|
|
{
|
164 |
|
|
// took +1 garbend for instruction chaining speedup (end-of-trace opcode)
|
165 |
|
|
if ((mpindex + BX_MAX_TRACE_LENGTH + 1) > BxICacheMemPool) {
|
166 |
|
|
flushICacheEntries();
|
167 |
|
|
}
|
168 |
|
|
e->i = &mpool[mpindex];
|
169 |
|
|
e->tlen = 0;
|
170 |
|
|
}
|
171 |
|
|
|
172 |
|
|
BX_CPP_INLINE void commit_trace(unsigned len) { mpindex += len; }
|
173 |
|
|
|
174 |
|
|
BX_CPP_INLINE void commit_page_split_trace(bx_phy_address paddr, bxICacheEntry_c *entry)
|
175 |
|
|
{
|
176 |
|
|
mpindex += entry->tlen;
|
177 |
|
|
|
178 |
|
|
// register page split entry
|
179 |
|
|
if (pageSplitIndex[nextPageSplitIndex].ppf != BX_ICACHE_INVALID_PHY_ADDRESS)
|
180 |
|
|
pageSplitIndex[nextPageSplitIndex].e->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS;
|
181 |
|
|
|
182 |
|
|
pageSplitIndex[nextPageSplitIndex].ppf = paddr;
|
183 |
|
|
pageSplitIndex[nextPageSplitIndex].e = entry;
|
184 |
|
|
|
185 |
|
|
nextPageSplitIndex = (nextPageSplitIndex+1) & (BX_ICACHE_PAGE_SPLIT_ENTRIES-1);
|
186 |
|
|
}
|
187 |
|
|
|
188 |
|
|
BX_CPP_INLINE bxICacheEntry_c *lookup_victim_cache(bx_phy_address pAddr, Bit32u fetchModeMask)
|
189 |
|
|
{
|
190 |
|
|
for (int i=0; i < BX_ICACHE_VICTIM_ENTRIES;i++) {
|
191 |
|
|
bxVictimCacheEntry *e = &victimCache[i];
|
192 |
|
|
if (e->vc_entry.pAddr == pAddr && e->fetchModeMask == fetchModeMask) {
|
193 |
|
|
return &e->vc_entry;
|
194 |
|
|
}
|
195 |
|
|
}
|
196 |
|
|
|
197 |
|
|
return NULL;
|
198 |
|
|
}
|
199 |
|
|
|
200 |
|
|
BX_CPP_INLINE void victim_entry(bxICacheEntry_c *entry, Bit32u fetchModeMask)
|
201 |
|
|
{
|
202 |
|
|
if (entry->pAddr != BX_ICACHE_INVALID_PHY_ADDRESS) {
|
203 |
|
|
victimCache[nextVictimCacheIndex].fetchModeMask = fetchModeMask;
|
204 |
|
|
victimCache[nextVictimCacheIndex].vc_entry = *entry;
|
205 |
|
|
nextVictimCacheIndex = (nextVictimCacheIndex+1) & (BX_ICACHE_VICTIM_ENTRIES-1);
|
206 |
|
|
}
|
207 |
|
|
}
|
208 |
|
|
|
209 |
|
|
BX_CPP_INLINE void handleSMC(bx_phy_address pAddr, Bit32u mask);
|
210 |
|
|
|
211 |
|
|
BX_CPP_INLINE void flushICacheEntries(void);
|
212 |
|
|
|
213 |
|
|
BX_CPP_INLINE bxICacheEntry_c* get_entry(bx_phy_address pAddr, unsigned fetchModeMask)
|
214 |
|
|
{
|
215 |
|
|
return &(entry[hash(pAddr, fetchModeMask)]);
|
216 |
|
|
}
|
217 |
|
|
|
218 |
|
|
BX_CPP_INLINE bxICacheEntry_c* find_entry(bx_phy_address pAddr, unsigned fetchModeMask)
|
219 |
|
|
{
|
220 |
|
|
//AO modif
|
221 |
|
|
return NULL;
|
222 |
|
|
//AO modif end
|
223 |
|
|
bxICacheEntry_c* e = &entry[hash(pAddr, fetchModeMask)];
|
224 |
|
|
if (e->pAddr != pAddr)
|
225 |
|
|
e = lookup_victim_cache(pAddr, fetchModeMask);
|
226 |
|
|
|
227 |
|
|
return e;
|
228 |
|
|
}
|
229 |
|
|
};
|
230 |
|
|
|
231 |
|
|
BX_CPP_INLINE void bxICache_c::flushICacheEntries(void)
|
232 |
|
|
{
|
233 |
|
|
bxICacheEntry_c* e = entry;
|
234 |
|
|
unsigned i;
|
235 |
|
|
|
236 |
|
|
for (i=0; i<BxICacheEntries; i++, e++) {
|
237 |
|
|
e->pAddr = BX_ICACHE_INVALID_PHY_ADDRESS;
|
238 |
|
|
e->traceMask = 0;
|
239 |
|
|
}
|
240 |
|
|
|
241 |
|
|
nextPageSplitIndex = 0;
|
242 |
|
|
for (i=0;i<BX_ICACHE_PAGE_SPLIT_ENTRIES;i++)
|
243 |
|
|
pageSplitIndex[i].ppf = BX_ICACHE_INVALID_PHY_ADDRESS;
|
244 |
|
|
|
245 |
|
|
nextVictimCacheIndex = 0;
|
246 |
|
|
for (i=0;i<BX_ICACHE_VICTIM_ENTRIES;i++)
|
247 |
|
|
victimCache[i].vc_entry.pAddr = BX_ICACHE_INVALID_PHY_ADDRESS;
|
248 |
|
|
|
249 |
|
|
mpindex = 0;
|
250 |
|
|
}
|
251 |
|
|
|
252 |
|
|
BX_CPP_INLINE void bxICache_c::handleSMC(bx_phy_address pAddr, Bit32u mask)
|
253 |
|
|
{
|
254 |
|
|
Bit32u pAddrIndex = bxPageWriteStampTable::hash(pAddr);
|
255 |
|
|
|
256 |
|
|
// Need to invalidate all traces in the trace cache that might include an
|
257 |
|
|
// instruction that was modified. But this is not enough, it is possible
|
258 |
|
|
// that some another trace is linked into invalidated trace and it won't
|
259 |
|
|
// be invalidated. In order to solve this issue replace all instructions
|
260 |
|
|
// from the invalidated trace with dummy EndOfTrace opcodes.
|
261 |
|
|
|
262 |
|
|
// Another corner case that has to be handled - pageWriteStampTable wrap.
|
263 |
|
|
// Multiple physical addresses could be mapped into single pageWriteStampTable
|
264 |
|
|
// entry and all of them have to be invalidated here now.
|
265 |
|
|
|
266 |
|
|
if (mask & 0x1) {
|
267 |
|
|
// the store touched 1st cache line in the page, check for
|
268 |
|
|
// page split traces to invalidate.
|
269 |
|
|
for (unsigned i=0;i<BX_ICACHE_PAGE_SPLIT_ENTRIES;i++) {
|
270 |
|
|
if (pageSplitIndex[i].ppf != BX_ICACHE_INVALID_PHY_ADDRESS) {
|
271 |
|
|
if (pAddrIndex == bxPageWriteStampTable::hash(pageSplitIndex[i].ppf)) {
|
272 |
|
|
pageSplitIndex[i].ppf = BX_ICACHE_INVALID_PHY_ADDRESS;
|
273 |
|
|
flushSMC(pageSplitIndex[i].e);
|
274 |
|
|
}
|
275 |
|
|
}
|
276 |
|
|
}
|
277 |
|
|
}
|
278 |
|
|
|
279 |
|
|
for (unsigned i=0;i < BX_ICACHE_VICTIM_ENTRIES; i++) {
|
280 |
|
|
bxICacheEntry_c *e = &victimCache[i].vc_entry;
|
281 |
|
|
if (pAddrIndex == bxPageWriteStampTable::hash(e->pAddr) && (e->traceMask & mask) != 0) {
|
282 |
|
|
flushSMC(e);
|
283 |
|
|
}
|
284 |
|
|
}
|
285 |
|
|
|
286 |
|
|
bxICacheEntry_c *e = get_entry(LPFOf(pAddr), 0);
|
287 |
|
|
|
288 |
|
|
// go over 32 "cache lines" of 128 byte each
|
289 |
|
|
for (unsigned n=0; n < 32; n++) {
|
290 |
|
|
Bit32u line_mask = (1 << n);
|
291 |
|
|
if (line_mask > mask) break;
|
292 |
|
|
for (unsigned index=0; index < 128; index++, e++) {
|
293 |
|
|
if (pAddrIndex == bxPageWriteStampTable::hash(e->pAddr) && (e->traceMask & mask) != 0) {
|
294 |
|
|
flushSMC(e);
|
295 |
|
|
}
|
296 |
|
|
}
|
297 |
|
|
}
|
298 |
|
|
}
|
299 |
|
|
|
300 |
|
|
extern void flushICaches(void);
|
301 |
|
|
|
302 |
|
|
#endif
|