aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2015-11-30 22:36:43 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2015-12-13 23:19:09 -0500
commit91f1da99792a1d133df94c4753510305353064a1 (patch)
treea9b23f30ac3f681efaf8dedf79046586bd74efdd
parent17ed9e3192b2b29ad24ffe711fa4b71716ef3ff3 (diff)
powerpc/mm: Convert 4k hash insert to C
Acked-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/Makefile3
-rw-r--r--arch/powerpc/mm/hash64_64k.c202
-rw-r--r--arch/powerpc/mm/hash_low_64.S380
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
4 files changed, 208 insertions, 381 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 3eb73a38220d..f80ad1a76cc8 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -18,6 +18,9 @@ obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
18obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ 18obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
19 tlb_hash$(CONFIG_WORD_SIZE).o \ 19 tlb_hash$(CONFIG_WORD_SIZE).o \
20 mmu_context_hash$(CONFIG_WORD_SIZE).o 20 mmu_context_hash$(CONFIG_WORD_SIZE).o
21ifeq ($(CONFIG_PPC_STD_MMU_64),y)
22obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
23endif
21obj-$(CONFIG_PPC_ICSWX) += icswx.o 24obj-$(CONFIG_PPC_ICSWX) += icswx.o
22obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o 25obj-$(CONFIG_PPC_ICSWX_PID) += icswx_pid.o
23obj-$(CONFIG_40x) += 40x_mmu.o 26obj-$(CONFIG_40x) += 40x_mmu.o
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
new file mode 100644
index 000000000000..9ffeae2cbb57
--- /dev/null
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -0,0 +1,202 @@
1/*
2 * Copyright IBM Corporation, 2015
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15#include <linux/mm.h>
16#include <asm/machdep.h>
17#include <asm/mmu.h>
18
19int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
20 pte_t *ptep, unsigned long trap, unsigned long flags,
21 int ssize, int subpg_prot)
22{
23 real_pte_t rpte;
24 unsigned long *hidxp;
25 unsigned long hpte_group;
26 unsigned int subpg_index;
27 unsigned long rflags, pa, hidx;
28 unsigned long old_pte, new_pte, subpg_pte;
29 unsigned long vpn, hash, slot;
30 unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift;
31
32 /*
33 * atomically mark the linux large page PTE busy and dirty
34 */
35 do {
36 pte_t pte = READ_ONCE(*ptep);
37
38 old_pte = pte_val(pte);
39 /* If PTE busy, retry the access */
40 if (unlikely(old_pte & _PAGE_BUSY))
41 return 0;
42 /* If PTE permissions don't match, take page fault */
43 if (unlikely(access & ~old_pte))
44 return 1;
45 /*
46 * Try to lock the PTE, add ACCESSED and DIRTY if it was
47 * a write access. Since this is 4K insert of 64K page size
48 * also add _PAGE_COMBO
49 */
50 new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO;
51 if (access & _PAGE_RW)
52 new_pte |= _PAGE_DIRTY;
53 } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
54 old_pte, new_pte));
55 /*
56 * Handle the subpage protection bits
57 */
58 subpg_pte = new_pte & ~subpg_prot;
59 /*
60 * PP bits. _PAGE_USER is already PP bit 0x2, so we only
61 * need to add in 0x1 if it's a read-only user page
62 */
63 rflags = subpg_pte & _PAGE_USER;
64 if ((subpg_pte & _PAGE_USER) && !((subpg_pte & _PAGE_RW) &&
65 (subpg_pte & _PAGE_DIRTY)))
66 rflags |= 0x1;
67 /*
68 * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
69 */
70 rflags |= ((subpg_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
71 /*
72 * Always add C and Memory coherence bit
73 */
74 rflags |= HPTE_R_C | HPTE_R_M;
75 /*
76 * Add in WIMG bits
77 */
78 rflags |= (subpg_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
79 _PAGE_COHERENT | _PAGE_GUARDED));
80
81 if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
82 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
83
84 /*
85 * No CPU has hugepages but lacks no execute, so we
86 * don't need to worry about that case
87 */
88 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
89 }
90
91 subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
92 vpn = hpt_vpn(ea, vsid, ssize);
93 rpte = __real_pte(__pte(old_pte), ptep);
94 /*
95 *None of the sub 4k page is hashed
96 */
97 if (!(old_pte & _PAGE_HASHPTE))
98 goto htab_insert_hpte;
99 /*
100 * Check if the pte was already inserted into the hash table
101 * as a 64k HW page, and invalidate the 64k HPTE if so.
102 */
103 if (!(old_pte & _PAGE_COMBO)) {
104 flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
105 old_pte &= ~_PAGE_HPTE_SUB;
106 goto htab_insert_hpte;
107 }
108 /*
109 * Check for sub page valid and update
110 */
111 if (__rpte_sub_valid(rpte, subpg_index)) {
112 int ret;
113
114 hash = hpt_hash(vpn, shift, ssize);
115 hidx = __rpte_to_hidx(rpte, subpg_index);
116 if (hidx & _PTEIDX_SECONDARY)
117 hash = ~hash;
118 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
119 slot += hidx & _PTEIDX_GROUP_IX;
120
121 ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
122 MMU_PAGE_4K, MMU_PAGE_4K,
123 ssize, flags);
124 /*
125 *if we failed because typically the HPTE wasn't really here
126 * we try an insertion.
127 */
128 if (ret == -1)
129 goto htab_insert_hpte;
130
131 *ptep = __pte(new_pte & ~_PAGE_BUSY);
132 return 0;
133 }
134
135htab_insert_hpte:
136 /*
137 * handle _PAGE_4K_PFN case
138 */
139 if (old_pte & _PAGE_4K_PFN) {
140 /*
141 * All the sub 4k page have the same
142 * physical address.
143 */
144 pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT;
145 } else {
146 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
147 pa += (subpg_index << shift);
148 }
149 hash = hpt_hash(vpn, shift, ssize);
150repeat:
151 hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
152
153 /* Insert into the hash table, primary slot */
154 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
155 MMU_PAGE_4K, MMU_PAGE_4K, ssize);
156 /*
157 * Primary is full, try the secondary
158 */
159 if (unlikely(slot == -1)) {
160 hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
161 slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
162 rflags, HPTE_V_SECONDARY,
163 MMU_PAGE_4K, MMU_PAGE_4K, ssize);
164 if (slot == -1) {
165 if (mftb() & 0x1)
166 hpte_group = ((hash & htab_hash_mask) *
167 HPTES_PER_GROUP) & ~0x7UL;
168 ppc_md.hpte_remove(hpte_group);
169 /*
170 * FIXME!! Should be try the group from which we removed ?
171 */
172 goto repeat;
173 }
174 }
175 /*
176 * Hypervisor failure. Restore old pmd and return -1
177 * similar to __hash_page_*
178 */
179 if (unlikely(slot == -2)) {
180 *ptep = __pte(old_pte);
181 hash_failure_debug(ea, access, vsid, trap, ssize,
182 MMU_PAGE_4K, MMU_PAGE_4K, old_pte);
183 return -1;
184 }
185 /*
186 * Insert slot number & secondary bit in PTE second half,
187 * clear _PAGE_BUSY and set appropriate HPTE slot bit
188 * Since we have _PAGE_BUSY set on ptep, we can be sure
189 * nobody is undating hidx.
190 */
191 hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
192 /* __real_pte use pte_val() any idea why ? FIXME!! */
193 rpte.hidx &= ~(0xfUL << (subpg_index << 2));
194 *hidxp = rpte.hidx | (slot << (subpg_index << 2));
195 new_pte |= (_PAGE_HPTE_SUB0 >> subpg_index);
196 /*
197 * check __real_pte for details on matching smp_rmb()
198 */
199 smp_wmb();
200 *ptep = __pte(new_pte & ~_PAGE_BUSY);
201 return 0;
202}
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 3b49e3295901..6b4d4c1d0628 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -328,381 +328,8 @@ htab_pte_insert_failure:
328 li r3,-1 328 li r3,-1
329 b htab_bail 329 b htab_bail
330 330
331
332#else /* CONFIG_PPC_64K_PAGES */ 331#else /* CONFIG_PPC_64K_PAGES */
333 332
334
335/*****************************************************************************
336 * *
337 * 64K SW & 4K or 64K HW in a 4K segment pages implementation *
338 * *
339 *****************************************************************************/
340
341/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
342 * pte_t *ptep, unsigned long trap, unsigned local flags,
343 * int ssize, int subpg_prot)
344 */
345
346/*
347 * For now, we do NOT implement Admixed pages
348 */
349_GLOBAL(__hash_page_4K)
350 mflr r0
351 std r0,16(r1)
352 stdu r1,-STACKFRAMESIZE(r1)
353 /* Save all params that we need after a function call */
354 std r6,STK_PARAM(R6)(r1)
355 std r8,STK_PARAM(R8)(r1)
356 std r9,STK_PARAM(R9)(r1)
357
358 /* Save non-volatile registers.
359 * r31 will hold "old PTE"
360 * r30 is "new PTE"
361 * r29 is vpn
362 * r28 is a hash value
363 * r27 is hashtab mask (maybe dynamic patched instead ?)
364 * r26 is the hidx mask
365 * r25 is the index in combo page
366 */
367 std r25,STK_REG(R25)(r1)
368 std r26,STK_REG(R26)(r1)
369 std r27,STK_REG(R27)(r1)
370 std r28,STK_REG(R28)(r1)
371 std r29,STK_REG(R29)(r1)
372 std r30,STK_REG(R30)(r1)
373 std r31,STK_REG(R31)(r1)
374
375 /* Step 1:
376 *
377 * Check permissions, atomically mark the linux PTE busy
378 * and hashed.
379 */
3801:
381 ldarx r31,0,r6
382 /* Check access rights (access & ~(pte_val(*ptep))) */
383 andc. r0,r4,r31
384 bne- htab_wrong_access
385 /* Check if PTE is busy */
386 andi. r0,r31,_PAGE_BUSY
387 /* If so, just bail out and refault if needed. Someone else
388 * is changing this PTE anyway and might hash it.
389 */
390 bne- htab_bail_ok
391 /* Prepare new PTE value (turn access RW into DIRTY, then
392 * add BUSY and ACCESSED)
393 */
394 rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
395 or r30,r30,r31
396 ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED
397 oris r30,r30,_PAGE_COMBO@h
398 /* Write the linux PTE atomically (setting busy) */
399 stdcx. r30,0,r6
400 bne- 1b
401 isync
402
403 /* Step 2:
404 *
405 * Insert/Update the HPTE in the hash table. At this point,
406 * r4 (access) is re-useable, we use it for the new HPTE flags
407 */
408
409 /* Load the hidx index */
410 rldicl r25,r3,64-12,60
411
412BEGIN_FTR_SECTION
413 cmpdi r9,0 /* check segment size */
414 bne 3f
415END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
416 /* Calc vpn and put it in r29 */
417 sldi r29,r5,SID_SHIFT - VPN_SHIFT
418 /*
419 * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff
420 * srdi r28,r3,VPN_SHIFT
421 */
422 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
423 or r29,r28,r29
424 /*
425 * Calculate hash value for primary slot and store it in r28
426 * r3 = va, r5 = vsid
427 * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
428 */
429 rldicl r0,r3,64-12,48
430 xor r28,r5,r0 /* hash */
431 b 4f
432
4333: /* Calc vpn and put it in r29 */
434 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
435 /*
436 * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff
437 * srdi r28,r3,VPN_SHIFT
438 */
439 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
440 or r29,r28,r29
441
442 /*
443 * Calculate hash value for primary slot and
444 * store it in r28 for 1T segment
445 * r3 = va, r5 = vsid
446 */
447 sldi r28,r5,25 /* vsid << 25 */
448 /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
449 rldicl r0,r3,64-12,36
450 xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
451 xor r28,r28,r0 /* hash */
452
453 /* Convert linux PTE bits into HW equivalents */
4544:
455#ifdef CONFIG_PPC_SUBPAGE_PROT
456 andc r10,r30,r10
457 andi. r3,r10,0x1fe /* Get basic set of flags */
458 rlwinm r0,r10,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
459#else
460 andi. r3,r30,0x1fe /* Get basic set of flags */
461 rlwinm r0,r30,32-9+1,30,30 /* _PAGE_RW -> _PAGE_USER (r0) */
462#endif
463 xori r3,r3,HPTE_R_N /* _PAGE_EXEC -> NOEXEC */
464 rlwinm r4,r30,32-7+1,30,30 /* _PAGE_DIRTY -> _PAGE_USER (r4) */
465 and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
466 andc r0,r3,r0 /* r0 = pte & ~r0 */
467 rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
468 /*
469 * Always add "C" bit for perf. Memory coherence is always enabled
470 */
471 ori r3,r3,HPTE_R_C | HPTE_R_M
472
473 /* We eventually do the icache sync here (maybe inline that
474 * code rather than call a C function...)
475 */
476BEGIN_FTR_SECTION
477 mr r4,r30
478 mr r5,r7
479 bl hash_page_do_lazy_icache
480END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
481
482 /* At this point, r3 contains new PP bits, save them in
483 * place of "access" in the param area (sic)
484 */
485 std r3,STK_PARAM(R4)(r1)
486
487 /* Get htab_hash_mask */
488 ld r4,htab_hash_mask@got(2)
489 ld r27,0(r4) /* htab_hash_mask -> r27 */
490
491 /* Check if we may already be in the hashtable, in this case, we
492 * go to out-of-line code to try to modify the HPTE. We look for
493 * the bit at (1 >> (index + 32))
494 */
495 rldicl. r0,r31,64-12,48
496 li r26,0 /* Default hidx */
497 beq htab_insert_pte
498
499 /*
500 * Check if the pte was already inserted into the hash table
501 * as a 64k HW page, and invalidate the 64k HPTE if so.
502 */
503 andis. r0,r31,_PAGE_COMBO@h
504 beq htab_inval_old_hpte
505
506 ld r6,STK_PARAM(R6)(r1)
507 ori r26,r6,PTE_PAGE_HIDX_OFFSET /* Load the hidx mask. */
508 ld r26,0(r26)
509 addi r5,r25,36 /* Check actual HPTE_SUB bit, this */
510 rldcr. r0,r31,r5,0 /* must match pgtable.h definition */
511 bne htab_modify_pte
512
513htab_insert_pte:
514 /* real page number in r5, PTE RPN value + index */
515 andis. r0,r31,_PAGE_4K_PFN@h
516 srdi r5,r31,PTE_RPN_SHIFT
517 bne- htab_special_pfn
518 sldi r5,r5,PAGE_FACTOR
519 add r5,r5,r25
520htab_special_pfn:
521 sldi r5,r5,HW_PAGE_SHIFT
522
523 /* Calculate primary group hash */
524 and r0,r28,r27
525 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
526
527 /* Call ppc_md.hpte_insert */
528 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
529 mr r4,r29 /* Retrieve vpn */
530 li r7,0 /* !bolted, !secondary */
531 li r8,MMU_PAGE_4K /* page size */
532 li r9,MMU_PAGE_4K /* actual page size */
533 ld r10,STK_PARAM(R9)(r1) /* segment size */
534.globl htab_call_hpte_insert1
535htab_call_hpte_insert1:
536 bl . /* patched by htab_finish_init() */
537 cmpdi 0,r3,0
538 bge htab_pte_insert_ok /* Insertion successful */
539 cmpdi 0,r3,-2 /* Critical failure */
540 beq- htab_pte_insert_failure
541
542 /* Now try secondary slot */
543
544 /* real page number in r5, PTE RPN value + index */
545 andis. r0,r31,_PAGE_4K_PFN@h
546 srdi r5,r31,PTE_RPN_SHIFT
547 bne- 3f
548 sldi r5,r5,PAGE_FACTOR
549 add r5,r5,r25
5503: sldi r5,r5,HW_PAGE_SHIFT
551
552 /* Calculate secondary group hash */
553 andc r0,r27,r28
554 rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
555
556 /* Call ppc_md.hpte_insert */
557 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
558 mr r4,r29 /* Retrieve vpn */
559 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
560 li r8,MMU_PAGE_4K /* page size */
561 li r9,MMU_PAGE_4K /* actual page size */
562 ld r10,STK_PARAM(R9)(r1) /* segment size */
563.globl htab_call_hpte_insert2
564htab_call_hpte_insert2:
565 bl . /* patched by htab_finish_init() */
566 cmpdi 0,r3,0
567 bge+ htab_pte_insert_ok /* Insertion successful */
568 cmpdi 0,r3,-2 /* Critical failure */
569 beq- htab_pte_insert_failure
570
571 /* Both are full, we need to evict something */
572 mftb r0
573 /* Pick a random group based on TB */
574 andi. r0,r0,1
575 mr r5,r28
576 bne 2f
577 not r5,r5
5782: and r0,r5,r27
579 rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
580 /* Call ppc_md.hpte_remove */
581.globl htab_call_hpte_remove
582htab_call_hpte_remove:
583 bl . /* patched by htab_finish_init() */
584
585 /* Try all again */
586 b htab_insert_pte
587
588 /*
589 * Call out to C code to invalidate an 64k HW HPTE that is
590 * useless now that the segment has been switched to 4k pages.
591 */
592htab_inval_old_hpte:
593 mr r3,r29 /* vpn */
594 mr r4,r31 /* PTE.pte */
595 li r5,0 /* PTE.hidx */
596 li r6,MMU_PAGE_64K /* psize */
597 ld r7,STK_PARAM(R9)(r1) /* ssize */
598 ld r8,STK_PARAM(R8)(r1) /* flags */
599 bl flush_hash_page
600 /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
601 lis r0,_PAGE_HPTE_SUB@h
602 ori r0,r0,_PAGE_HPTE_SUB@l
603 andc r30,r30,r0
604 b htab_insert_pte
605
606htab_bail_ok:
607 li r3,0
608 b htab_bail
609
610htab_pte_insert_ok:
611 /* Insert slot number & secondary bit in PTE second half,
612 * clear _PAGE_BUSY and set approriate HPTE slot bit
613 */
614 ld r6,STK_PARAM(R6)(r1)
615 li r0,_PAGE_BUSY
616 andc r30,r30,r0
617 /* HPTE SUB bit */
618 li r0,1
619 subfic r5,r25,27 /* Must match bit position in */
620 sld r0,r0,r5 /* pgtable.h */
621 or r30,r30,r0
622 /* hindx */
623 sldi r5,r25,2
624 sld r3,r3,r5
625 li r4,0xf
626 sld r4,r4,r5
627 andc r26,r26,r4
628 or r26,r26,r3
629 ori r5,r6,PTE_PAGE_HIDX_OFFSET
630 std r26,0(r5)
631 lwsync
632 std r30,0(r6)
633 li r3, 0
634htab_bail:
635 ld r25,STK_REG(R25)(r1)
636 ld r26,STK_REG(R26)(r1)
637 ld r27,STK_REG(R27)(r1)
638 ld r28,STK_REG(R28)(r1)
639 ld r29,STK_REG(R29)(r1)
640 ld r30,STK_REG(R30)(r1)
641 ld r31,STK_REG(R31)(r1)
642 addi r1,r1,STACKFRAMESIZE
643 ld r0,16(r1)
644 mtlr r0
645 blr
646
647htab_modify_pte:
648 /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
649 mr r4,r3
650 sldi r5,r25,2
651 srd r3,r26,r5
652
653 /* Secondary group ? if yes, get a inverted hash value */
654 mr r5,r28
655 andi. r0,r3,0x8 /* page secondary ? */
656 beq 1f
657 not r5,r5
6581: andi. r3,r3,0x7 /* extract idx alone */
659
660 /* Calculate proper slot value for ppc_md.hpte_updatepp */
661 and r0,r5,r27
662 rldicr r0,r0,3,63-3 /* r0 = (hash & mask) << 3 */
663 add r3,r0,r3 /* add slot idx */
664
665 /* Call ppc_md.hpte_updatepp */
666 mr r5,r29 /* vpn */
667 li r6,MMU_PAGE_4K /* base page size */
668 li r7,MMU_PAGE_4K /* actual page size */
669 ld r8,STK_PARAM(R9)(r1) /* segment size */
670 ld r9,STK_PARAM(R8)(r1) /* get "flags" param */
671.globl htab_call_hpte_updatepp
672htab_call_hpte_updatepp:
673 bl . /* patched by htab_finish_init() */
674
675 /* if we failed because typically the HPTE wasn't really here
676 * we try an insertion.
677 */
678 cmpdi 0,r3,-1
679 beq- htab_insert_pte
680
681 /* Clear the BUSY bit and Write out the PTE */
682 li r0,_PAGE_BUSY
683 andc r30,r30,r0
684 ld r6,STK_PARAM(R6)(r1)
685 std r30,0(r6)
686 li r3,0
687 b htab_bail
688
689htab_wrong_access:
690 /* Bail out clearing reservation */
691 stdcx. r31,0,r6
692 li r3,1
693 b htab_bail
694
695htab_pte_insert_failure:
696 /* Bail out restoring old PTE */
697 ld r6,STK_PARAM(R6)(r1)
698 std r31,0(r6)
699 li r3,-1
700 b htab_bail
701
702#endif /* CONFIG_PPC_64K_PAGES */
703
704#ifdef CONFIG_PPC_64K_PAGES
705
706/***************************************************************************** 333/*****************************************************************************
707 * * 334 * *
708 * 64K SW & 64K HW in a 64K segment pages implementation * 335 * 64K SW & 64K HW in a 64K segment pages implementation *
@@ -994,10 +621,3 @@ ht64_pte_insert_failure:
994 621
995 622
996#endif /* CONFIG_PPC_64K_PAGES */ 623#endif /* CONFIG_PPC_64K_PAGES */
997
998
999/*****************************************************************************
1000 * *
1001 * Huge pages implementation is in hugetlbpage.c *
1002 * *
1003 *****************************************************************************/
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 7d4f254a2671..995809911f17 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -653,7 +653,7 @@ static void __init htab_finish_init(void)
653 patch_branch(ht64_call_hpte_updatepp, 653 patch_branch(ht64_call_hpte_updatepp,
654 ppc_function_entry(ppc_md.hpte_updatepp), 654 ppc_function_entry(ppc_md.hpte_updatepp),
655 BRANCH_SET_LINK); 655 BRANCH_SET_LINK);
656#endif /* CONFIG_PPC_64K_PAGES */ 656#else /* !CONFIG_PPC_64K_PAGES */
657 657
658 patch_branch(htab_call_hpte_insert1, 658 patch_branch(htab_call_hpte_insert1,
659 ppc_function_entry(ppc_md.hpte_insert), 659 ppc_function_entry(ppc_md.hpte_insert),
@@ -667,6 +667,8 @@ static void __init htab_finish_init(void)
667 patch_branch(htab_call_hpte_updatepp, 667 patch_branch(htab_call_hpte_updatepp,
668 ppc_function_entry(ppc_md.hpte_updatepp), 668 ppc_function_entry(ppc_md.hpte_updatepp),
669 BRANCH_SET_LINK); 669 BRANCH_SET_LINK);
670#endif
671
670} 672}
671 673
672static void __init htab_initialize(void) 674static void __init htab_initialize(void)