OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [ppc/] [mm/] [hashtable.S] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  arch/ppc/kernel/hashtable.S
3
 *
4
 *  $Id: hashtable.S,v 1.1.1.1 2004-04-15 01:19:16 phoenix Exp $
5
 *
6
 *  PowerPC version
7
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8
 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
9
 *    Copyright (C) 1996 Cort Dougan 
10
 *  Adapted for Power Macintosh by Paul Mackerras.
11
 *  Low-level exception handlers and MMU support
12
 *  rewritten by Paul Mackerras.
13
 *    Copyright (C) 1996 Paul Mackerras.
14
 *
15
 *  This file contains low-level assembler routines for managing
16
 *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
17
 *  hash table, so this file is not used on them.)
18
 *
19
 *  This program is free software; you can redistribute it and/or
20
 *  modify it under the terms of the GNU General Public License
21
 *  as published by the Free Software Foundation; either version
22
 *  2 of the License, or (at your option) any later version.
23
 *
24
 */
25
 
26
#include 
27
#include 
28
#include 
29
#include 
30
#include 
31
#include 
32
#include 
33
 
34
#ifdef CONFIG_SMP
35
        .comm   hash_table_lock,4
36
#endif /* CONFIG_SMP */
37
 
38
/*
39
 * Load a PTE into the hash table, if possible.
40
 * The address is in r4, and r3 contains an access flag:
41
 * _PAGE_RW (0x400) if a write.
42
 * r23 contains the SRR1 value, from which we use the MSR_PR bit.
43
 * SPRG3 contains the physical address of the current task's thread.
44
 *
45
 * Returns to the caller if the access is illegal or there is no
46
 * mapping for the address.  Otherwise it places an appropriate PTE
47
 * in the hash table and returns from the exception.
48
 * Uses r0, r2 - r7, ctr, lr.
49
 */
50
        .text
51
        .globl  hash_page
52
hash_page:
53
#ifdef CONFIG_PPC64BRIDGE
54
        mfmsr   r0
55
        clrldi  r0,r0,1         /* make sure it's in 32-bit mode */
56
        MTMSRD(r0)
57
        isync
58
#endif
59
        tophys(r7,0)                    /* gets -KERNELBASE into r7 */
60
#ifdef CONFIG_SMP
61
        addis   r2,r7,hash_table_lock@h
62
        ori     r2,r2,hash_table_lock@l
63
        mfspr   r5,SPRG3
64
        lwz     r0,PROCESSOR-THREAD(r5)
65
        oris    r0,r0,0x0fff
66
        b       10f
67
11:     lwz     r6,0(r2)
68
        cmpwi   0,r6,0
69
        bne     11b
70
10:     lwarx   r6,0,r2
71
        cmpwi   0,r6,0
72
        bne-    11b
73
        stwcx.  r0,0,r2
74
        bne-    10b
75
        isync
76
#endif
77
        /* Get PTE (linux-style) and check access */
78
        lis     r0,KERNELBASE@h         /* check if kernel address */
79
        cmplw   0,r4,r0
80
        mfspr   r2,SPRG3                /* current task's THREAD (phys) */
81
        ori     r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
82
        lwz     r5,PGDIR(r2)            /* virt page-table root */
83
        blt+    112f                    /* assume user more likely */
84
        lis     r5,swapper_pg_dir@ha    /* if kernel address, use */
85
        addi    r5,r5,swapper_pg_dir@l  /* kernel page table */
86
        rlwimi  r3,r23,32-12,29,29      /* MSR_PR -> _PAGE_USER */
87
112:    add     r5,r5,r7                /* convert to phys addr */
88
        rlwimi  r5,r4,12,20,29          /* insert top 10 bits of address */
89
        lwz     r5,0(r5)                /* get pmd entry */
90
        rlwinm. r5,r5,0,0,19            /* extract address of pte page */
91
#ifdef CONFIG_SMP
92
        beq-    hash_page_out           /* return if no mapping */
93
#else
94
        /* XXX it seems like the 601 will give a machine fault on the
95
           rfi if its alignment is wrong (bottom 4 bits of address are
96
           8 or 0xc) and we have had a not-taken conditional branch
97
           to the address following the rfi. */
98
        beqlr-
99
#endif
100
        add     r2,r5,r7                /* convert to phys addr */
101
        rlwimi  r2,r4,22,20,29          /* insert next 10 bits of address */
102
        rlwinm  r0,r3,32-3,24,24        /* _PAGE_RW access -> _PAGE_DIRTY */
103
        ori     r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
104
 
105
        /*
106
         * Update the linux PTE atomically.  We do the lwarx up-front
107
         * because almost always, there won't be a permission violation
108
         * and there won't already be an HPTE, and thus we will have
109
         * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
110
         */
111
retry:
112
        lwarx   r6,0,r2                 /* get linux-style pte */
113
        andc.   r5,r3,r6                /* check access & ~permission */
114
#ifdef CONFIG_SMP
115
        bne-    hash_page_out           /* return if access not permitted */
116
#else
117
        bnelr-
118
#endif
119
        or      r5,r0,r6                /* set accessed/dirty bits */
120
        stwcx.  r5,0,r2                 /* attempt to update PTE */
121
        bne-    retry                   /* retry if someone got there first */
122
 
123
        mfsrin  r3,r4                   /* get segment reg for segment */
124
        mr      r2,r8                   /* we have saved r2 but not r8 */
125
        bl      create_hpte             /* add the hash table entry */
126
        mr      r8,r2
127
 
128
/*
129
 * htab_reloads counts the number of times we have to fault an
130
 * HPTE into the hash table.  This should only happen after a
131
 * fork (because fork does a flush_tlb_mm) or a vmalloc or ioremap.
132
 * Where a page is faulted into a process's address space,
133
 * update_mmu_cache gets called to put the HPTE into the hash table
134
 * and those are counted as preloads rather than reloads.
135
 */
136
        addis   r2,r7,htab_reloads@ha
137
        lwz     r3,htab_reloads@l(r2)
138
        addi    r3,r3,1
139
        stw     r3,htab_reloads@l(r2)
140
 
141
#ifdef CONFIG_SMP
142
        eieio
143
        addis   r2,r7,hash_table_lock@ha
144
        li      r0,0
145
        stw     r0,hash_table_lock@l(r2)
146
#endif
147
 
148
        /* Return from the exception */
149
        lwz     r3,_CCR(r21)
150
        lwz     r4,_LINK(r21)
151
        lwz     r5,_CTR(r21)
152
        mtcrf   0xff,r3
153
        mtlr    r4
154
        mtctr   r5
155
        lwz     r0,GPR0(r21)
156
        lwz     r1,GPR1(r21)
157
        lwz     r2,GPR2(r21)
158
        lwz     r3,GPR3(r21)
159
        lwz     r4,GPR4(r21)
160
        lwz     r5,GPR5(r21)
161
        lwz     r6,GPR6(r21)
162
        lwz     r7,GPR7(r21)
163
        /* we haven't used xer */
164
        mtspr   SRR1,r23
165
        mtspr   SRR0,r22
166
        lwz     r20,GPR20(r21)
167
        lwz     r22,GPR22(r21)
168
        lwz     r23,GPR23(r21)
169
        lwz     r21,GPR21(r21)
170
        RFI
171
 
172
#ifdef CONFIG_SMP
173
hash_page_out:
174
        eieio
175
        addis   r2,r7,hash_table_lock@ha
176
        li      r0,0
177
        stw     r0,hash_table_lock@l(r2)
178
        blr
179
#endif /* CONFIG_SMP */
180
 
181
/*
182
 * Add an entry for a particular page to the hash table.
183
 *
184
 * add_hash_page(unsigned context, unsigned long va, pte_t pte)
185
 *
186
 * We assume any necessary modifications to the pte (e.g. setting
187
 * the accessed bit) have already been done and that there is actually
188
 * a hash table in use (i.e. we're not on a 603).
189
 */
190
_GLOBAL(add_hash_page)
191
        mflr    r0
192
        stw     r0,4(r1)
193
 
194
        /* Convert context and va to VSID */
195
        mulli   r3,r3,897*16            /* multiply context by context skew */
196
        rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
197
        mulli   r0,r0,0x111             /* multiply by ESID skew */
198
        add     r3,r3,r0                /* note create_hpte trims to 24 bits */
199
 
200
        /*
201
         * We disable interrupts here, even on UP, because we don't
202
         * want to race with hash_page, and because we want the
203
         * _PAGE_HASHPTE bit to be a reliable indication of whether
204
         * the HPTE exists (or at least whether one did once).  -- paulus
205
         */
206
        mfmsr   r10
207
        SYNC
208
        rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
209
        mtmsr   r0
210
        SYNC
211
 
212
#ifdef CONFIG_SMP
213
        lis     r9,hash_table_lock@h
214
        ori     r9,r9,hash_table_lock@l
215
        lwz     r8,PROCESSOR(r2)
216
        oris    r8,r8,10
217
10:     lwarx   r7,0,r9
218
        cmpi    0,r7,0
219
        bne-    11f
220
        stwcx.  r8,0,r9
221
        beq+    12f
222
11:     lwz     r7,0(r9)
223
        cmpi    0,r7,0
224
        beq     10b
225
        b       11b
226
12:     isync
227
#endif
228
 
229
        /*
230
         * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
231
         * If _PAGE_HASHPTE was already set, we don't replace the existing
232
         * HPTE, so we just unlock and return.
233
         */
234
        mr      r7,r5
235
1:      lwarx   r6,0,r7
236
        andi.   r0,r6,_PAGE_HASHPTE
237
        bne     9f                      /* if HASHPTE already set, done */
238
        ori     r5,r6,_PAGE_ACCESSED|_PAGE_HASHPTE
239
        stwcx.  r5,0,r7
240
        bne-    1b
241
 
242
        li      r7,0                    /* no address offset needed */
243
        bl      create_hpte
244
 
245
        lis     r8,htab_preloads@ha
246
        lwz     r3,htab_preloads@l(r8)
247
        addi    r3,r3,1
248
        stw     r3,htab_preloads@l(r8)
249
 
250
9:
251
#ifdef CONFIG_SMP
252
        eieio
253
        li      r0,0
254
        stw     r0,0(r9)                /* clear hash_table_lock */
255
#endif
256
 
257
        lwz     r0,4(r1)
258
        mtlr    r0
259
 
260
        /* reenable interrupts */
261
        mtmsr   r10
262
        SYNC
263
        blr
264
 
265
/*
266
 * This routine adds a hardware PTE to the hash table.
267
 * It is designed to be called with the MMU either on or off.
268
 * r3 contains the VSID, r4 contains the virtual address,
269
 * r5 contains the linux PTE, r6 contains the old value of the
270
 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
271
 * offset to be added to addresses (0 if the MMU is on,
272
 * -KERNELBASE if it is off).
273
 * On SMP, the caller should have the hash_table_lock held.
274
 * We assume that the caller has (or will) set the _PAGE_HASHPTE
275
 * bit in the linux PTE in memory.  The value passed in r6 should
276
 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
277
 * this routine will skip the search for an existing HPTE.
278
 * This procedure modifies r0, r3 - r6, r8, cr0.
279
 *  -- paulus.
280
 *
281
 * For speed, 4 of the instructions get patched once the size and
282
 * physical address of the hash table are known.  These definitions
283
 * of Hash_base and Hash_bits below are just an example.
284
 */
285
Hash_base = 0xc0180000
286
Hash_bits = 12                          /* e.g. 256kB hash table */
287
Hash_msk = (((1 << Hash_bits) - 1) * 64)
288
 
289
#ifndef CONFIG_PPC64BRIDGE
290
/* defines for the PTE format for 32-bit PPCs */
291
#define PTE_SIZE        8
292
#define PTEG_SIZE       64
293
#define LG_PTEG_SIZE    6
294
#define LDPTEu          lwzu
295
#define STPTE           stw
296
#define CMPPTE          cmpw
297
#define PTE_H           0x40
298
#define PTE_V           0x80000000
299
#define TST_V(r)        rlwinm. r,r,0,0,0
300
#define SET_V(r)        oris r,r,PTE_V@h
301
#define CLR_V(r,t)      rlwinm r,r,0,1,31
302
 
303
#else
304
/* defines for the PTE format for 64-bit PPCs */
305
#define PTE_SIZE        16
306
#define PTEG_SIZE       128
307
#define LG_PTEG_SIZE    7
308
#define LDPTEu          ldu
309
#define STPTE           std
310
#define CMPPTE          cmpd
311
#define PTE_H           2
312
#define PTE_V           1
313
#define TST_V(r)        andi. r,r,PTE_V
314
#define SET_V(r)        ori r,r,PTE_V
315
#define CLR_V(r,t)      li t,PTE_V; andc r,r,t
316
#endif /* CONFIG_PPC64BRIDGE */
317
 
318
#define HASH_LEFT       31-(LG_PTEG_SIZE+Hash_bits-1)
319
#define HASH_RIGHT      31-LG_PTEG_SIZE
320
 
321
_GLOBAL(create_hpte)
322
        /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
323
        rlwinm  r8,r5,32-10,31,31       /* _PAGE_RW -> PP lsb */
324
        rlwinm  r0,r5,32-7,31,31        /* _PAGE_DIRTY -> PP lsb */
325
        and     r8,r8,r0                /* writable if _RW & _DIRTY */
326
        rlwimi  r5,r5,32-1,30,30        /* _PAGE_USER -> PP msb */
327
        rlwimi  r5,r5,32-2,31,31        /* _PAGE_USER -> PP lsb */
328
        ori     r8,r8,0xe14             /* clear out reserved bits and M */
329
        andc    r8,r5,r8                /* PP = user? (rw&dirty? 2: 3): 0 */
330
#ifdef CONFIG_SMP
331
        ori     r8,r8,_PAGE_COHERENT    /* set M (coherence required) */
332
#endif
333
 
334
        /* Construct the high word of the PPC-style PTE (r5) */
335
#ifndef CONFIG_PPC64BRIDGE
336
        rlwinm  r5,r3,7,1,24            /* put VSID in 0x7fffff80 bits */
337
        rlwimi  r5,r4,10,26,31          /* put in API (abbrev page index) */
338
#else /* CONFIG_PPC64BRIDGE */
339
        clrlwi  r3,r3,8                 /* reduce vsid to 24 bits */
340
        sldi    r5,r3,12                /* shift vsid into position */
341
        rlwimi  r5,r4,16,20,24          /* put in API (abbrev page index) */
342
#endif /* CONFIG_PPC64BRIDGE */
343
        SET_V(r5)                       /* set V (valid) bit */
344
 
345
        /* Get the address of the primary PTE group in the hash table (r3) */
346
        .globl  hash_page_patch_A
347
hash_page_patch_A:
348
        addis   r0,r7,Hash_base@h       /* base address of hash table */
349
        rlwimi  r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
350
        rlwinm  r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
351
        xor     r3,r3,r0                /* make primary hash */
352
        li      r0,8                    /* PTEs/group */
353
 
354
        /*
355
         * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
356
         * if it is clear, meaning that the HPTE isn't there already...
357
         */
358
        andi.   r6,r6,_PAGE_HASHPTE
359
        beq+    10f                     /* no PTE: go look for an empty slot */
360
        tlbie   r4
361
 
362
        addis   r4,r7,htab_hash_searches@ha
363
        lwz     r6,htab_hash_searches@l(r4)
364
        addi    r6,r6,1                 /* count how many searches we do */
365
        stw     r6,htab_hash_searches@l(r4)
366
 
367
        /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
368
        mtctr   r0
369
        addi    r4,r3,-PTE_SIZE
370
1:      LDPTEu  r6,PTE_SIZE(r4)         /* get next PTE */
371
        CMPPTE  0,r6,r5
372
        bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
373
        beq+    found_slot
374
 
375
        /* Search the secondary PTEG for a matching PTE */
376
        ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
377
        .globl  hash_page_patch_B
378
hash_page_patch_B:
379
        xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
380
        xori    r4,r4,(-PTEG_SIZE & 0xffff)
381
        addi    r4,r4,-PTE_SIZE
382
        mtctr   r0
383
2:      LDPTEu  r6,PTE_SIZE(r4)
384
        CMPPTE  0,r6,r5
385
        bdnzf   2,2b
386
        beq+    found_slot
387
        xori    r5,r5,PTE_H             /* clear H bit again */
388
 
389
        /* Search the primary PTEG for an empty slot */
390
10:     mtctr   r0
391
        addi    r4,r3,-PTE_SIZE         /* search primary PTEG */
392
1:      LDPTEu  r6,PTE_SIZE(r4)         /* get next PTE */
393
        TST_V(r6)                       /* test valid bit */
394
        bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
395
        beq+    found_empty
396
 
397
        /* update counter of times that the primary PTEG is full */
398
        addis   r4,r7,primary_pteg_full@ha
399
        lwz     r6,primary_pteg_full@l(r4)
400
        addi    r6,r6,1
401
        stw     r6,primary_pteg_full@l(r4)
402
 
403
        /* Search the secondary PTEG for an empty slot */
404
        ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
405
        .globl  hash_page_patch_C
406
hash_page_patch_C:
407
        xoris   r4,r3,Hash_msk>>16      /* compute secondary hash */
408
        xori    r4,r4,(-PTEG_SIZE & 0xffff)
409
        addi    r4,r4,-PTE_SIZE
410
        mtctr   r0
411
2:      LDPTEu  r6,PTE_SIZE(r4)
412
        TST_V(r6)
413
        bdnzf   2,2b
414
        beq+    found_empty
415
        xori    r5,r5,PTE_H             /* clear H bit again */
416
 
417
        /*
418
         * Choose an arbitrary slot in the primary PTEG to overwrite.
419
         * Since both the primary and secondary PTEGs are full, and we
420
         * have no information that the PTEs in the primary PTEG are
421
         * more important or useful than those in the secondary PTEG,
422
         * and we know there is a definite (although small) speed
423
         * advantage to putting the PTE in the primary PTEG, we always
424
         * put the PTE in the primary PTEG.
425
         */
426
        addis   r4,r7,next_slot@ha
427
        lwz     r6,next_slot@l(r4)
428
        addi    r6,r6,PTE_SIZE
429
        andi.   r6,r6,7*PTE_SIZE
430
#ifdef CONFIG_POWER4
431
        /*
432
         * Since we don't have BATs on POWER4, we rely on always having
433
         * PTEs in the hash table to map the hash table and the code
434
         * that manipulates it in virtual mode, namely flush_hash_page and
435
         * flush_hash_segments.  Otherwise we can get a DSI inside those
436
         * routines which leads to a deadlock on the hash_table_lock on
437
         * SMP machines.  We avoid this by never overwriting the first
438
         * PTE of each PTEG if it is already valid.
439
         *      -- paulus.
440
         */
441
        bne     102f
442
        li      r6,PTE_SIZE
443
102:
444
#endif /* CONFIG_POWER4 */
445
        stw     r6,next_slot@l(r4)
446
        add     r4,r3,r6
447
 
448
        /* update counter of evicted pages */
449
        addis   r6,r7,htab_evicts@ha
450
        lwz     r3,htab_evicts@l(r6)
451
        addi    r3,r3,1
452
        stw     r3,htab_evicts@l(r6)
453
 
454
#ifndef CONFIG_SMP
455
        /* Store PTE in PTEG */
456
found_empty:
457
        STPTE   r5,0(r4)
458
found_slot:
459
        STPTE   r8,PTE_SIZE/2(r4)
460
 
461
#else /* CONFIG_SMP */
462
/*
463
 * Between the tlbie above and updating the hash table entry below,
464
 * another CPU could read the hash table entry and put it in its TLB.
465
 * There are 3 cases:
466
 * 1. using an empty slot
467
 * 2. updating an earlier entry to change permissions (i.e. enable write)
468
 * 3. taking over the PTE for an unrelated address
469
 *
470
 * In each case it doesn't really matter if the other CPUs have the old
471
 * PTE in their TLB.  So we don't need to bother with another tlbie here,
472
 * which is convenient as we've overwritten the register that had the
473
 * address. :-)  The tlbie above is mainly to make sure that this CPU comes
474
 * and gets the new PTE from the hash table.
475
 *
476
 * We do however have to make sure that the PTE is never in an invalid
477
 * state with the V bit set.
478
 */
479
found_empty:
480
found_slot:
481
        CLR_V(r5,r0)            /* clear V (valid) bit in PTE */
482
        STPTE   r5,0(r4)
483
        sync
484
        TLBSYNC
485
        STPTE   r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
486
        sync
487
        SET_V(r5)
488
        STPTE   r5,0(r4)        /* finally set V bit in PTE */
489
#endif /* CONFIG_SMP */
490
 
491
        sync            /* make sure pte updates get to memory */
492
        blr
493
 
494
        .comm   next_slot,4
495
        .comm   primary_pteg_full,4
496
        .comm   htab_hash_searches,4
497
 
498
/*
499
 * Flush the entry for a particular page from the hash table.
500
 *
501
 * flush_hash_page(unsigned context, unsigned long va, pte_t *ptep)
502
 *
503
 * We assume that there is a hash table in use (Hash != 0).
504
 */
505
_GLOBAL(flush_hash_page)
506
        /* Convert context and va to VSID */
507
        mulli   r3,r3,897*16            /* multiply context by context skew */
508
        rlwinm  r0,r4,4,28,31           /* get ESID (top 4 bits of va) */
509
        mulli   r0,r0,0x111             /* multiply by ESID skew */
510
        add     r3,r3,r0                /* note code below trims to 24 bits */
511
 
512
        /*
513
         * We disable interrupts here, even on UP, because we want
514
         * the _PAGE_HASHPTE bit to be a reliable indication of
515
         * whether the HPTE exists.  -- paulus
516
         */
517
        mfmsr   r10
518
        rlwinm  r0,r10,0,17,15          /* clear bit 16 (MSR_EE) */
519
        SYNC
520
        mtmsr   r0
521
        SYNC
522
 
523
#ifdef CONFIG_SMP
524
        lis     r9,hash_table_lock@h
525
        ori     r9,r9,hash_table_lock@l
526
        lwz     r8,PROCESSOR(r2)
527
        oris    r8,r8,9
528
10:     lwarx   r7,0,r9
529
        cmpi    0,r7,0
530
        bne-    11f
531
        stwcx.  r8,0,r9
532
        beq+    12f
533
11:     lwz     r7,0(r9)
534
        cmpi    0,r7,0
535
        beq     10b
536
        b       11b
537
12:     isync
538
#endif
539
 
540
        /*
541
         * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
542
         * already clear, we're done.  If not, clear it (atomically)
543
         * and proceed.  -- paulus.
544
         */
545
1:      lwarx   r6,0,r5                 /* fetch the pte */
546
        andi.   r0,r6,_PAGE_HASHPTE
547
        beq     9f                      /* done if HASHPTE is already clear */
548
        rlwinm  r6,r6,0,31,29           /* clear HASHPTE bit */
549
        stwcx.  r6,0,r5                 /* update the pte */
550
        bne-    1b
551
 
552
        /* Construct the high word of the PPC-style PTE (r5) */
553
#ifndef CONFIG_PPC64BRIDGE
554
        rlwinm  r5,r3,7,1,24            /* put VSID in 0x7fffff80 bits */
555
        rlwimi  r5,r4,10,26,31          /* put in API (abbrev page index) */
556
#else /* CONFIG_PPC64BRIDGE */
557
        clrlwi  r3,r3,8                 /* reduce vsid to 24 bits */
558
        sldi    r5,r3,12                /* shift vsid into position */
559
        rlwimi  r5,r4,16,20,24          /* put in API (abbrev page index) */
560
#endif /* CONFIG_PPC64BRIDGE */
561
        SET_V(r5)                       /* set V (valid) bit */
562
 
563
        /* Get the address of the primary PTE group in the hash table (r3) */
564
        .globl  flush_hash_patch_A
565
flush_hash_patch_A:
566
        lis     r8,Hash_base@h          /* base address of hash table */
567
        rlwimi  r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
568
        rlwinm  r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
569
        xor     r3,r3,r8                /* make primary hash */
570
        li      r8,8                    /* PTEs/group */
571
 
572
        /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
573
        mtctr   r8
574
        addi    r7,r3,-PTE_SIZE
575
1:      LDPTEu  r0,PTE_SIZE(r7)         /* get next PTE */
576
        CMPPTE  0,r0,r5
577
        bdnzf   2,1b                    /* loop while ctr != 0 && !cr0.eq */
578
        beq+    3f
579
 
580
        /* Search the secondary PTEG for a matching PTE */
581
        ori     r5,r5,PTE_H             /* set H (secondary hash) bit */
582
        .globl  flush_hash_patch_B
583
flush_hash_patch_B:
584
        xoris   r7,r3,Hash_msk>>16      /* compute secondary hash */
585
        xori    r7,r7,(-PTEG_SIZE & 0xffff)
586
        addi    r7,r7,-PTE_SIZE
587
        mtctr   r8
588
2:      LDPTEu  r0,PTE_SIZE(r7)
589
        CMPPTE  0,r0,r5
590
        bdnzf   2,2b
591
        bne-    4f                      /* should never fail to find it */
592
 
593
3:      li      r0,0
594
        STPTE   r0,0(r7)                /* invalidate entry */
595
4:      sync
596
        tlbie   r4                      /* in hw tlb too */
597
        sync
598
 
599
#ifdef CONFIG_SMP
600
        TLBSYNC
601
9:      li      r0,0
602
        stw     r0,0(r9)                /* clear hash_table_lock */
603
#endif
604
 
605
9:      mtmsr   r10
606
        SYNC
607
        blr

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.