aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-05 14:16:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-05 14:16:12 -0400
commit5f3d2f2e1a63679cf1c4a4210f2f1cc2f335bef6 (patch)
tree9189bd6c81fe5f982a7ae45d2f3d900176658509 /arch/powerpc/mm
parent283dbd82055eb70ff3b469f812d9c695f18c9641 (diff)
parentd900bd7366463fd96a907b2c212242e2b68b27d8 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "Some highlights in addition to the usual batch of fixes: - 64TB address space support for 64-bit processes by Aneesh Kumar - Gavin Shan did a major cleanup & re-organization of our EEH support code (IBM fancy PCI error handling & recovery infrastructure) which paves the way for supporting different platform backends, along with some rework of the PCIe code for the PowerNV platform in order to remove home made resource allocations and instead use the generic code (which is possible after some small improvements to it done by Gavin). - Uprobes support by Ananth N Mavinakayanahalli - A pile of embedded updates from Freescale folks, including new SoC and board supports, more KVM stuff including preparing for 64-bit BookE KVM support, ePAPR 1.1 updates, etc..." Fixup trivial conflicts in drivers/scsi/ipr.c * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (146 commits) powerpc/iommu: Fix multiple issues with IOMMU pools code powerpc: Fix VMX fix for memcpy case driver/mtd:IFC NAND:Initialise internal SRAM before any write powerpc/fsl-pci: use 'Header Type' to identify PCIE mode powerpc/eeh: Don't release eeh_mutex in eeh_phb_pe_get powerpc: Remove tlb batching hack for nighthawk powerpc: Set paca->data_offset = 0 for boot cpu powerpc/perf: Sample only if SIAR-Valid bit is set in P7+ powerpc/fsl-pci: fix warning when CONFIG_SWIOTLB is disabled powerpc/mpc85xx: Update interrupt handling for IFC controller powerpc/85xx: Enable USB support in p1023rds_defconfig powerpc/smp: Do not disable IPI interrupts during suspend powerpc/eeh: Fix crash on converting OF node to edev powerpc/eeh: Lock module while handling EEH event powerpc/kprobe: Don't emulate store when kprobe stwu r1 powerpc/kprobe: Complete kprobe and migrate exception frame powerpc/kprobe: Introduce a new thread flag powerpc: Remove unused __get_user64() and __put_user64() powerpc/eeh: Global mutex to protect PE tree powerpc/eeh: Remove EEH PE for normal PCI hotplug ...
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fault.c1
-rw-r--r--arch/powerpc/mm/hash_low_64.S97
-rw-r--r--arch/powerpc/mm/hash_native_64.c192
-rw-r--r--arch/powerpc/mm/hash_utils_64.c48
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c15
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c10
-rw-r--r--arch/powerpc/mm/pgtable_64.c13
-rw-r--r--arch/powerpc/mm/slb_low.S62
-rw-r--r--arch/powerpc/mm/slice.c112
-rw-r--r--arch/powerpc/mm/stab.c3
-rw-r--r--arch/powerpc/mm/subpage-prot.c6
-rw-r--r--arch/powerpc/mm/tlb_hash64.c11
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S18
15 files changed, 357 insertions, 237 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e5f028b5794..5495ebe983a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -133,6 +133,7 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address)
133 up_read(&current->mm->mmap_sem); 133 up_read(&current->mm->mmap_sem);
134 134
135 if (user_mode(regs)) { 135 if (user_mode(regs)) {
136 current->thread.trap_nr = BUS_ADRERR;
136 info.si_signo = SIGBUS; 137 info.si_signo = SIGBUS;
137 info.si_errno = 0; 138 info.si_errno = 0;
138 info.si_code = BUS_ADRERR; 139 info.si_code = BUS_ADRERR;
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 602aeb06d29..56585086413 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -63,7 +63,7 @@ _GLOBAL(__hash_page_4K)
63 /* Save non-volatile registers. 63 /* Save non-volatile registers.
64 * r31 will hold "old PTE" 64 * r31 will hold "old PTE"
65 * r30 is "new PTE" 65 * r30 is "new PTE"
66 * r29 is "va" 66 * r29 is vpn
67 * r28 is a hash value 67 * r28 is a hash value
68 * r27 is hashtab mask (maybe dynamic patched instead ?) 68 * r27 is hashtab mask (maybe dynamic patched instead ?)
69 */ 69 */
@@ -111,10 +111,10 @@ BEGIN_FTR_SECTION
111 cmpdi r9,0 /* check segment size */ 111 cmpdi r9,0 /* check segment size */
112 bne 3f 112 bne 3f
113END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 113END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
114 /* Calc va and put it in r29 */ 114 /* Calc vpn and put it in r29 */
115 rldicr r29,r5,28,63-28 115 sldi r29,r5,SID_SHIFT - VPN_SHIFT
116 rldicl r3,r3,0,36 116 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
117 or r29,r3,r29 117 or r29,r28,r29
118 118
119 /* Calculate hash value for primary slot and store it in r28 */ 119 /* Calculate hash value for primary slot and store it in r28 */
120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 120 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -122,14 +122,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
122 xor r28,r5,r0 122 xor r28,r5,r0
123 b 4f 123 b 4f
124 124
1253: /* Calc VA and hash in r29 and r28 for 1T segment */ 1253: /* Calc vpn and put it in r29 */
126 sldi r29,r5,40 /* vsid << 40 */ 126 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
127 clrldi r3,r3,24 /* ea & 0xffffffffff */ 127 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
128 or r29,r28,r29
129
130 /*
131 * calculate hash value for primary slot and
132 * store it in r28 for 1T segment
133 */
128 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 134 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
129 clrldi r5,r5,40 /* vsid & 0xffffff */ 135 clrldi r5,r5,40 /* vsid & 0xffffff */
130 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 136 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
131 xor r28,r28,r5 137 xor r28,r28,r5
132 or r29,r3,r29 /* VA */
133 xor r28,r28,r0 /* hash */ 138 xor r28,r28,r0 /* hash */
134 139
135 /* Convert linux PTE bits into HW equivalents */ 140 /* Convert linux PTE bits into HW equivalents */
@@ -185,7 +190,7 @@ htab_insert_pte:
185 190
186 /* Call ppc_md.hpte_insert */ 191 /* Call ppc_md.hpte_insert */
187 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 192 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
188 mr r4,r29 /* Retrieve va */ 193 mr r4,r29 /* Retrieve vpn */
189 li r7,0 /* !bolted, !secondary */ 194 li r7,0 /* !bolted, !secondary */
190 li r8,MMU_PAGE_4K /* page size */ 195 li r8,MMU_PAGE_4K /* page size */
191 ld r9,STK_PARAM(R9)(r1) /* segment size */ 196 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -208,7 +213,7 @@ _GLOBAL(htab_call_hpte_insert1)
208 213
209 /* Call ppc_md.hpte_insert */ 214 /* Call ppc_md.hpte_insert */
210 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 215 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
211 mr r4,r29 /* Retrieve va */ 216 mr r4,r29 /* Retrieve vpn */
212 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 217 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
213 li r8,MMU_PAGE_4K /* page size */ 218 li r8,MMU_PAGE_4K /* page size */
214 ld r9,STK_PARAM(R9)(r1) /* segment size */ 219 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -278,7 +283,7 @@ htab_modify_pte:
278 add r3,r0,r3 /* add slot idx */ 283 add r3,r0,r3 /* add slot idx */
279 284
280 /* Call ppc_md.hpte_updatepp */ 285 /* Call ppc_md.hpte_updatepp */
281 mr r5,r29 /* va */ 286 mr r5,r29 /* vpn */
282 li r6,MMU_PAGE_4K /* page size */ 287 li r6,MMU_PAGE_4K /* page size */
283 ld r7,STK_PARAM(R9)(r1) /* segment size */ 288 ld r7,STK_PARAM(R9)(r1) /* segment size */
284 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 289 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -339,7 +344,7 @@ _GLOBAL(__hash_page_4K)
339 /* Save non-volatile registers. 344 /* Save non-volatile registers.
340 * r31 will hold "old PTE" 345 * r31 will hold "old PTE"
341 * r30 is "new PTE" 346 * r30 is "new PTE"
342 * r29 is "va" 347 * r29 is vpn
343 * r28 is a hash value 348 * r28 is a hash value
344 * r27 is hashtab mask (maybe dynamic patched instead ?) 349 * r27 is hashtab mask (maybe dynamic patched instead ?)
345 * r26 is the hidx mask 350 * r26 is the hidx mask
@@ -394,10 +399,14 @@ BEGIN_FTR_SECTION
394 cmpdi r9,0 /* check segment size */ 399 cmpdi r9,0 /* check segment size */
395 bne 3f 400 bne 3f
396END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 401END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
397 /* Calc va and put it in r29 */ 402 /* Calc vpn and put it in r29 */
398 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ 403 sldi r29,r5,SID_SHIFT - VPN_SHIFT
399 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ 404 /*
400 or r29,r3,r29 /* r29 = va */ 405 * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff
406 * srdi r28,r3,VPN_SHIFT
407 */
408 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
409 or r29,r28,r29
401 410
402 /* Calculate hash value for primary slot and store it in r28 */ 411 /* Calculate hash value for primary slot and store it in r28 */
403 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 412 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -405,14 +414,23 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
405 xor r28,r5,r0 414 xor r28,r5,r0
406 b 4f 415 b 4f
407 416
4083: /* Calc VA and hash in r29 and r28 for 1T segment */ 4173: /* Calc vpn and put it in r29 */
409 sldi r29,r5,40 /* vsid << 40 */ 418 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
410 clrldi r3,r3,24 /* ea & 0xffffffffff */ 419 /*
420 * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff
421 * srdi r28,r3,VPN_SHIFT
422 */
423 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
424 or r29,r28,r29
425
426 /*
427 * Calculate hash value for primary slot and
428 * store it in r28 for 1T segment
429 */
411 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 430 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
412 clrldi r5,r5,40 /* vsid & 0xffffff */ 431 clrldi r5,r5,40 /* vsid & 0xffffff */
413 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ 432 rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
414 xor r28,r28,r5 433 xor r28,r28,r5
415 or r29,r3,r29 /* VA */
416 xor r28,r28,r0 /* hash */ 434 xor r28,r28,r0 /* hash */
417 435
418 /* Convert linux PTE bits into HW equivalents */ 436 /* Convert linux PTE bits into HW equivalents */
@@ -488,7 +506,7 @@ htab_special_pfn:
488 506
489 /* Call ppc_md.hpte_insert */ 507 /* Call ppc_md.hpte_insert */
490 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 508 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
491 mr r4,r29 /* Retrieve va */ 509 mr r4,r29 /* Retrieve vpn */
492 li r7,0 /* !bolted, !secondary */ 510 li r7,0 /* !bolted, !secondary */
493 li r8,MMU_PAGE_4K /* page size */ 511 li r8,MMU_PAGE_4K /* page size */
494 ld r9,STK_PARAM(R9)(r1) /* segment size */ 512 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -515,7 +533,7 @@ _GLOBAL(htab_call_hpte_insert1)
515 533
516 /* Call ppc_md.hpte_insert */ 534 /* Call ppc_md.hpte_insert */
517 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 535 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
518 mr r4,r29 /* Retrieve va */ 536 mr r4,r29 /* Retrieve vpn */
519 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 537 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
520 li r8,MMU_PAGE_4K /* page size */ 538 li r8,MMU_PAGE_4K /* page size */
521 ld r9,STK_PARAM(R9)(r1) /* segment size */ 539 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -547,7 +565,7 @@ _GLOBAL(htab_call_hpte_remove)
547 * useless now that the segment has been switched to 4k pages. 565 * useless now that the segment has been switched to 4k pages.
548 */ 566 */
549htab_inval_old_hpte: 567htab_inval_old_hpte:
550 mr r3,r29 /* virtual addr */ 568 mr r3,r29 /* vpn */
551 mr r4,r31 /* PTE.pte */ 569 mr r4,r31 /* PTE.pte */
552 li r5,0 /* PTE.hidx */ 570 li r5,0 /* PTE.hidx */
553 li r6,MMU_PAGE_64K /* psize */ 571 li r6,MMU_PAGE_64K /* psize */
@@ -620,7 +638,7 @@ htab_modify_pte:
620 add r3,r0,r3 /* add slot idx */ 638 add r3,r0,r3 /* add slot idx */
621 639
622 /* Call ppc_md.hpte_updatepp */ 640 /* Call ppc_md.hpte_updatepp */
623 mr r5,r29 /* va */ 641 mr r5,r29 /* vpn */
624 li r6,MMU_PAGE_4K /* page size */ 642 li r6,MMU_PAGE_4K /* page size */
625 ld r7,STK_PARAM(R9)(r1) /* segment size */ 643 ld r7,STK_PARAM(R9)(r1) /* segment size */
626 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 644 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -676,7 +694,7 @@ _GLOBAL(__hash_page_64K)
676 /* Save non-volatile registers. 694 /* Save non-volatile registers.
677 * r31 will hold "old PTE" 695 * r31 will hold "old PTE"
678 * r30 is "new PTE" 696 * r30 is "new PTE"
679 * r29 is "va" 697 * r29 is vpn
680 * r28 is a hash value 698 * r28 is a hash value
681 * r27 is hashtab mask (maybe dynamic patched instead ?) 699 * r27 is hashtab mask (maybe dynamic patched instead ?)
682 */ 700 */
@@ -729,10 +747,10 @@ BEGIN_FTR_SECTION
729 cmpdi r9,0 /* check segment size */ 747 cmpdi r9,0 /* check segment size */
730 bne 3f 748 bne 3f
731END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 749END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
732 /* Calc va and put it in r29 */ 750 /* Calc vpn and put it in r29 */
733 rldicr r29,r5,28,63-28 751 sldi r29,r5,SID_SHIFT - VPN_SHIFT
734 rldicl r3,r3,0,36 752 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
735 or r29,r3,r29 753 or r29,r28,r29
736 754
737 /* Calculate hash value for primary slot and store it in r28 */ 755 /* Calculate hash value for primary slot and store it in r28 */
738 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ 756 rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -740,14 +758,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
740 xor r28,r5,r0 758 xor r28,r5,r0
741 b 4f 759 b 4f
742 760
7433: /* Calc VA and hash in r29 and r28 for 1T segment */ 7613: /* Calc vpn and put it in r29 */
744 sldi r29,r5,40 /* vsid << 40 */ 762 sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
745 clrldi r3,r3,24 /* ea & 0xffffffffff */ 763 rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
764 or r29,r28,r29
765
766 /*
767 * calculate hash value for primary slot and
768 * store it in r28 for 1T segment
769 */
746 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ 770 rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
747 clrldi r5,r5,40 /* vsid & 0xffffff */ 771 clrldi r5,r5,40 /* vsid & 0xffffff */
748 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ 772 rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
749 xor r28,r28,r5 773 xor r28,r28,r5
750 or r29,r3,r29 /* VA */
751 xor r28,r28,r0 /* hash */ 774 xor r28,r28,r0 /* hash */
752 775
753 /* Convert linux PTE bits into HW equivalents */ 776 /* Convert linux PTE bits into HW equivalents */
@@ -806,7 +829,7 @@ ht64_insert_pte:
806 829
807 /* Call ppc_md.hpte_insert */ 830 /* Call ppc_md.hpte_insert */
808 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 831 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
809 mr r4,r29 /* Retrieve va */ 832 mr r4,r29 /* Retrieve vpn */
810 li r7,0 /* !bolted, !secondary */ 833 li r7,0 /* !bolted, !secondary */
811 li r8,MMU_PAGE_64K 834 li r8,MMU_PAGE_64K
812 ld r9,STK_PARAM(R9)(r1) /* segment size */ 835 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -829,7 +852,7 @@ _GLOBAL(ht64_call_hpte_insert1)
829 852
830 /* Call ppc_md.hpte_insert */ 853 /* Call ppc_md.hpte_insert */
831 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */ 854 ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
832 mr r4,r29 /* Retrieve va */ 855 mr r4,r29 /* Retrieve vpn */
833 li r7,HPTE_V_SECONDARY /* !bolted, secondary */ 856 li r7,HPTE_V_SECONDARY /* !bolted, secondary */
834 li r8,MMU_PAGE_64K 857 li r8,MMU_PAGE_64K
835 ld r9,STK_PARAM(R9)(r1) /* segment size */ 858 ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -899,7 +922,7 @@ ht64_modify_pte:
899 add r3,r0,r3 /* add slot idx */ 922 add r3,r0,r3 /* add slot idx */
900 923
901 /* Call ppc_md.hpte_updatepp */ 924 /* Call ppc_md.hpte_updatepp */
902 mr r5,r29 /* va */ 925 mr r5,r29 /* vpn */
903 li r6,MMU_PAGE_64K 926 li r6,MMU_PAGE_64K
904 ld r7,STK_PARAM(R9)(r1) /* segment size */ 927 ld r7,STK_PARAM(R9)(r1) /* segment size */
905 ld r8,STK_PARAM(R8)(r1) /* get "local" param */ 928 ld r8,STK_PARAM(R8)(r1) /* get "local" param */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 90039bc6411..ffc1e00f7a2 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -14,10 +14,10 @@
14 14
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/bitops.h> 16#include <linux/bitops.h>
17#include <linux/of.h>
17#include <linux/threads.h> 18#include <linux/threads.h>
18#include <linux/smp.h> 19#include <linux/smp.h>
19 20
20#include <asm/abs_addr.h>
21#include <asm/machdep.h> 21#include <asm/machdep.h>
22#include <asm/mmu.h> 22#include <asm/mmu.h>
23#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
@@ -39,22 +39,35 @@
39 39
40DEFINE_RAW_SPINLOCK(native_tlbie_lock); 40DEFINE_RAW_SPINLOCK(native_tlbie_lock);
41 41
42static inline void __tlbie(unsigned long va, int psize, int ssize) 42static inline void __tlbie(unsigned long vpn, int psize, int ssize)
43{ 43{
44 unsigned long va;
44 unsigned int penc; 45 unsigned int penc;
45 46
46 /* clear top 16 bits, non SLS segment */ 47 /*
48 * We need 14 to 65 bits of va for a tlibe of 4K page
49 * With vpn we ignore the lower VPN_SHIFT bits already.
50 * And top two bits are already ignored because we can
51 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
52 * of 12.
53 */
54 va = vpn << VPN_SHIFT;
55 /*
56 * clear top 16 bits of 64bit va, non SLS segment
57 * Older versions of the architecture (2.02 and earler) require the
58 * masking of the top 16 bits.
59 */
47 va &= ~(0xffffULL << 48); 60 va &= ~(0xffffULL << 48);
48 61
49 switch (psize) { 62 switch (psize) {
50 case MMU_PAGE_4K: 63 case MMU_PAGE_4K:
51 va &= ~0xffful;
52 va |= ssize << 8; 64 va |= ssize << 8;
53 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 65 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
54 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 66 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
55 : "memory"); 67 : "memory");
56 break; 68 break;
57 default: 69 default:
70 /* We need 14 to 14 + i bits of va */
58 penc = mmu_psize_defs[psize].penc; 71 penc = mmu_psize_defs[psize].penc;
59 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 72 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
60 va |= penc << 12; 73 va |= penc << 12;
@@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
67 } 80 }
68} 81}
69 82
70static inline void __tlbiel(unsigned long va, int psize, int ssize) 83static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
71{ 84{
85 unsigned long va;
72 unsigned int penc; 86 unsigned int penc;
73 87
74 /* clear top 16 bits, non SLS segment */ 88 /* VPN_SHIFT can be atmost 12 */
89 va = vpn << VPN_SHIFT;
90 /*
91 * clear top 16 bits of 64 bit va, non SLS segment
92 * Older versions of the architecture (2.02 and earler) require the
93 * masking of the top 16 bits.
94 */
75 va &= ~(0xffffULL << 48); 95 va &= ~(0xffffULL << 48);
76 96
77 switch (psize) { 97 switch (psize) {
78 case MMU_PAGE_4K: 98 case MMU_PAGE_4K:
79 va &= ~0xffful;
80 va |= ssize << 8; 99 va |= ssize << 8;
81 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 100 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
82 : : "r"(va) : "memory"); 101 : : "r"(va) : "memory");
83 break; 102 break;
84 default: 103 default:
104 /* We need 14 to 14 + i bits of va */
85 penc = mmu_psize_defs[psize].penc; 105 penc = mmu_psize_defs[psize].penc;
86 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1); 106 va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
87 va |= penc << 12; 107 va |= penc << 12;
@@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
94 114
95} 115}
96 116
97static inline void tlbie(unsigned long va, int psize, int ssize, int local) 117static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
98{ 118{
99 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); 119 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
100 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 120 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
105 raw_spin_lock(&native_tlbie_lock); 125 raw_spin_lock(&native_tlbie_lock);
106 asm volatile("ptesync": : :"memory"); 126 asm volatile("ptesync": : :"memory");
107 if (use_local) { 127 if (use_local) {
108 __tlbiel(va, psize, ssize); 128 __tlbiel(vpn, psize, ssize);
109 asm volatile("ptesync": : :"memory"); 129 asm volatile("ptesync": : :"memory");
110 } else { 130 } else {
111 __tlbie(va, psize, ssize); 131 __tlbie(vpn, psize, ssize);
112 asm volatile("eieio; tlbsync; ptesync": : :"memory"); 132 asm volatile("eieio; tlbsync; ptesync": : :"memory");
113 } 133 }
114 if (lock_tlbie && !use_local) 134 if (lock_tlbie && !use_local)
@@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
134 clear_bit_unlock(HPTE_LOCK_BIT, word); 154 clear_bit_unlock(HPTE_LOCK_BIT, word);
135} 155}
136 156
137static long native_hpte_insert(unsigned long hpte_group, unsigned long va, 157static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
138 unsigned long pa, unsigned long rflags, 158 unsigned long pa, unsigned long rflags,
139 unsigned long vflags, int psize, int ssize) 159 unsigned long vflags, int psize, int ssize)
140{ 160{
@@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
143 int i; 163 int i;
144 164
145 if (!(vflags & HPTE_V_BOLTED)) { 165 if (!(vflags & HPTE_V_BOLTED)) {
146 DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx," 166 DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
147 " rflags=%lx, vflags=%lx, psize=%d)\n", 167 " rflags=%lx, vflags=%lx, psize=%d)\n",
148 hpte_group, va, pa, rflags, vflags, psize); 168 hpte_group, vpn, pa, rflags, vflags, psize);
149 } 169 }
150 170
151 for (i = 0; i < HPTES_PER_GROUP; i++) { 171 for (i = 0; i < HPTES_PER_GROUP; i++) {
@@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
163 if (i == HPTES_PER_GROUP) 183 if (i == HPTES_PER_GROUP)
164 return -1; 184 return -1;
165 185
166 hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID; 186 hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
167 hpte_r = hpte_encode_r(pa, psize) | rflags; 187 hpte_r = hpte_encode_r(pa, psize) | rflags;
168 188
169 if (!(vflags & HPTE_V_BOLTED)) { 189 if (!(vflags & HPTE_V_BOLTED)) {
@@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group)
225} 245}
226 246
227static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 247static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
228 unsigned long va, int psize, int ssize, 248 unsigned long vpn, int psize, int ssize,
229 int local) 249 int local)
230{ 250{
231 struct hash_pte *hptep = htab_address + slot; 251 struct hash_pte *hptep = htab_address + slot;
232 unsigned long hpte_v, want_v; 252 unsigned long hpte_v, want_v;
233 int ret = 0; 253 int ret = 0;
234 254
235 want_v = hpte_encode_v(va, psize, ssize); 255 want_v = hpte_encode_v(vpn, psize, ssize);
236 256
237 DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)", 257 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
238 va, want_v & HPTE_V_AVPN, slot, newpp); 258 vpn, want_v & HPTE_V_AVPN, slot, newpp);
239 259
240 native_lock_hpte(hptep); 260 native_lock_hpte(hptep);
241 261
@@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
254 native_unlock_hpte(hptep); 274 native_unlock_hpte(hptep);
255 275
256 /* Ensure it is out of the tlb too. */ 276 /* Ensure it is out of the tlb too. */
257 tlbie(va, psize, ssize, local); 277 tlbie(vpn, psize, ssize, local);
258 278
259 return ret; 279 return ret;
260} 280}
261 281
262static long native_hpte_find(unsigned long va, int psize, int ssize) 282static long native_hpte_find(unsigned long vpn, int psize, int ssize)
263{ 283{
264 struct hash_pte *hptep; 284 struct hash_pte *hptep;
265 unsigned long hash; 285 unsigned long hash;
@@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
267 long slot; 287 long slot;
268 unsigned long want_v, hpte_v; 288 unsigned long want_v, hpte_v;
269 289
270 hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize); 290 hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
271 want_v = hpte_encode_v(va, psize, ssize); 291 want_v = hpte_encode_v(vpn, psize, ssize);
272 292
273 /* Bolted mappings are only ever in the primary group */ 293 /* Bolted mappings are only ever in the primary group */
274 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 294 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
295static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, 315static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
296 int psize, int ssize) 316 int psize, int ssize)
297{ 317{
298 unsigned long vsid, va; 318 unsigned long vpn;
319 unsigned long vsid;
299 long slot; 320 long slot;
300 struct hash_pte *hptep; 321 struct hash_pte *hptep;
301 322
302 vsid = get_kernel_vsid(ea, ssize); 323 vsid = get_kernel_vsid(ea, ssize);
303 va = hpt_va(ea, vsid, ssize); 324 vpn = hpt_vpn(ea, vsid, ssize);
304 325
305 slot = native_hpte_find(va, psize, ssize); 326 slot = native_hpte_find(vpn, psize, ssize);
306 if (slot == -1) 327 if (slot == -1)
307 panic("could not find page to bolt\n"); 328 panic("could not find page to bolt\n");
308 hptep = htab_address + slot; 329 hptep = htab_address + slot;
@@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
312 (newpp & (HPTE_R_PP | HPTE_R_N)); 333 (newpp & (HPTE_R_PP | HPTE_R_N));
313 334
314 /* Ensure it is out of the tlb too. */ 335 /* Ensure it is out of the tlb too. */
315 tlbie(va, psize, ssize, 0); 336 tlbie(vpn, psize, ssize, 0);
316} 337}
317 338
318static void native_hpte_invalidate(unsigned long slot, unsigned long va, 339static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
319 int psize, int ssize, int local) 340 int psize, int ssize, int local)
320{ 341{
321 struct hash_pte *hptep = htab_address + slot; 342 struct hash_pte *hptep = htab_address + slot;
@@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
325 346
326 local_irq_save(flags); 347 local_irq_save(flags);
327 348
328 DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot); 349 DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
329 350
330 want_v = hpte_encode_v(va, psize, ssize); 351 want_v = hpte_encode_v(vpn, psize, ssize);
331 native_lock_hpte(hptep); 352 native_lock_hpte(hptep);
332 hpte_v = hptep->v; 353 hpte_v = hptep->v;
333 354
@@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
339 hptep->v = 0; 360 hptep->v = 0;
340 361
341 /* Invalidate the TLB */ 362 /* Invalidate the TLB */
342 tlbie(va, psize, ssize, local); 363 tlbie(vpn, psize, ssize, local);
343 364
344 local_irq_restore(flags); 365 local_irq_restore(flags);
345} 366}
@@ -349,11 +370,12 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
349#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) 370#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
350 371
351static void hpte_decode(struct hash_pte *hpte, unsigned long slot, 372static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
352 int *psize, int *ssize, unsigned long *va) 373 int *psize, int *ssize, unsigned long *vpn)
353{ 374{
375 unsigned long avpn, pteg, vpi;
354 unsigned long hpte_r = hpte->r; 376 unsigned long hpte_r = hpte->r;
355 unsigned long hpte_v = hpte->v; 377 unsigned long hpte_v = hpte->v;
356 unsigned long avpn; 378 unsigned long vsid, seg_off;
357 int i, size, shift, penc; 379 int i, size, shift, penc;
358 380
359 if (!(hpte_v & HPTE_V_LARGE)) 381 if (!(hpte_v & HPTE_V_LARGE))
@@ -380,32 +402,38 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
380 } 402 }
381 403
382 /* This works for all page sizes, and for 256M and 1T segments */ 404 /* This works for all page sizes, and for 256M and 1T segments */
405 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
383 shift = mmu_psize_defs[size].shift; 406 shift = mmu_psize_defs[size].shift;
384 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
385
386 if (shift < 23) {
387 unsigned long vpi, vsid, pteg;
388 407
389 pteg = slot / HPTES_PER_GROUP; 408 avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
390 if (hpte_v & HPTE_V_SECONDARY) 409 pteg = slot / HPTES_PER_GROUP;
391 pteg = ~pteg; 410 if (hpte_v & HPTE_V_SECONDARY)
392 switch (hpte_v >> HPTE_V_SSIZE_SHIFT) { 411 pteg = ~pteg;
393 case MMU_SEGSIZE_256M: 412
394 vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask; 413 switch (*ssize) {
395 break; 414 case MMU_SEGSIZE_256M:
396 case MMU_SEGSIZE_1T: 415 /* We only have 28 - 23 bits of seg_off in avpn */
397 vsid = avpn >> 40; 416 seg_off = (avpn & 0x1f) << 23;
417 vsid = avpn >> 5;
418 /* We can find more bits from the pteg value */
419 if (shift < 23) {
420 vpi = (vsid ^ pteg) & htab_hash_mask;
421 seg_off |= vpi << shift;
422 }
423 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
424 case MMU_SEGSIZE_1T:
425 /* We only have 40 - 23 bits of seg_off in avpn */
426 seg_off = (avpn & 0x1ffff) << 23;
427 vsid = avpn >> 17;
428 if (shift < 23) {
398 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; 429 vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
399 break; 430 seg_off |= vpi << shift;
400 default:
401 avpn = vpi = size = 0;
402 } 431 }
403 avpn |= (vpi << mmu_psize_defs[size].shift); 432 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
433 default:
434 *vpn = size = 0;
404 } 435 }
405
406 *va = avpn;
407 *psize = size; 436 *psize = size;
408 *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
409} 437}
410 438
411/* 439/*
@@ -418,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
418 */ 446 */
419static void native_hpte_clear(void) 447static void native_hpte_clear(void)
420{ 448{
449 unsigned long vpn = 0;
421 unsigned long slot, slots, flags; 450 unsigned long slot, slots, flags;
422 struct hash_pte *hptep = htab_address; 451 struct hash_pte *hptep = htab_address;
423 unsigned long hpte_v, va; 452 unsigned long hpte_v;
424 unsigned long pteg_count; 453 unsigned long pteg_count;
425 int psize, ssize; 454 int psize, ssize;
426 455
@@ -448,9 +477,9 @@ static void native_hpte_clear(void)
448 * already hold the native_tlbie_lock. 477 * already hold the native_tlbie_lock.
449 */ 478 */
450 if (hpte_v & HPTE_V_VALID) { 479 if (hpte_v & HPTE_V_VALID) {
451 hpte_decode(hptep, slot, &psize, &ssize, &va); 480 hpte_decode(hptep, slot, &psize, &ssize, &vpn);
452 hptep->v = 0; 481 hptep->v = 0;
453 __tlbie(va, psize, ssize); 482 __tlbie(vpn, psize, ssize);
454 } 483 }
455 } 484 }
456 485
@@ -465,7 +494,8 @@ static void native_hpte_clear(void)
465 */ 494 */
466static void native_flush_hash_range(unsigned long number, int local) 495static void native_flush_hash_range(unsigned long number, int local)
467{ 496{
468 unsigned long va, hash, index, hidx, shift, slot; 497 unsigned long vpn;
498 unsigned long hash, index, hidx, shift, slot;
469 struct hash_pte *hptep; 499 struct hash_pte *hptep;
470 unsigned long hpte_v; 500 unsigned long hpte_v;
471 unsigned long want_v; 501 unsigned long want_v;
@@ -479,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local)
479 local_irq_save(flags); 509 local_irq_save(flags);
480 510
481 for (i = 0; i < number; i++) { 511 for (i = 0; i < number; i++) {
482 va = batch->vaddr[i]; 512 vpn = batch->vpn[i];
483 pte = batch->pte[i]; 513 pte = batch->pte[i];
484 514
485 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 515 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
486 hash = hpt_hash(va, shift, ssize); 516 hash = hpt_hash(vpn, shift, ssize);
487 hidx = __rpte_to_hidx(pte, index); 517 hidx = __rpte_to_hidx(pte, index);
488 if (hidx & _PTEIDX_SECONDARY) 518 if (hidx & _PTEIDX_SECONDARY)
489 hash = ~hash; 519 hash = ~hash;
490 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 520 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
491 slot += hidx & _PTEIDX_GROUP_IX; 521 slot += hidx & _PTEIDX_GROUP_IX;
492 hptep = htab_address + slot; 522 hptep = htab_address + slot;
493 want_v = hpte_encode_v(va, psize, ssize); 523 want_v = hpte_encode_v(vpn, psize, ssize);
494 native_lock_hpte(hptep); 524 native_lock_hpte(hptep);
495 hpte_v = hptep->v; 525 hpte_v = hptep->v;
496 if (!HPTE_V_COMPARE(hpte_v, want_v) || 526 if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -505,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local)
505 mmu_psize_defs[psize].tlbiel && local) { 535 mmu_psize_defs[psize].tlbiel && local) {
506 asm volatile("ptesync":::"memory"); 536 asm volatile("ptesync":::"memory");
507 for (i = 0; i < number; i++) { 537 for (i = 0; i < number; i++) {
508 va = batch->vaddr[i]; 538 vpn = batch->vpn[i];
509 pte = batch->pte[i]; 539 pte = batch->pte[i];
510 540
511 pte_iterate_hashed_subpages(pte, psize, va, index, 541 pte_iterate_hashed_subpages(pte, psize,
512 shift) { 542 vpn, index, shift) {
513 __tlbiel(va, psize, ssize); 543 __tlbiel(vpn, psize, ssize);
514 } pte_iterate_hashed_end(); 544 } pte_iterate_hashed_end();
515 } 545 }
516 asm volatile("ptesync":::"memory"); 546 asm volatile("ptesync":::"memory");
@@ -522,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local)
522 552
523 asm volatile("ptesync":::"memory"); 553 asm volatile("ptesync":::"memory");
524 for (i = 0; i < number; i++) { 554 for (i = 0; i < number; i++) {
525 va = batch->vaddr[i]; 555 vpn = batch->vpn[i];
526 pte = batch->pte[i]; 556 pte = batch->pte[i];
527 557
528 pte_iterate_hashed_subpages(pte, psize, va, index, 558 pte_iterate_hashed_subpages(pte, psize,
529 shift) { 559 vpn, index, shift) {
530 __tlbie(va, psize, ssize); 560 __tlbie(vpn, psize, ssize);
531 } pte_iterate_hashed_end(); 561 } pte_iterate_hashed_end();
532 } 562 }
533 asm volatile("eieio; tlbsync; ptesync":::"memory"); 563 asm volatile("eieio; tlbsync; ptesync":::"memory");
@@ -539,29 +569,6 @@ static void native_flush_hash_range(unsigned long number, int local)
539 local_irq_restore(flags); 569 local_irq_restore(flags);
540} 570}
541 571
542#ifdef CONFIG_PPC_PSERIES
543/* Disable TLB batching on nighthawk */
544static inline int tlb_batching_enabled(void)
545{
546 struct device_node *root = of_find_node_by_path("/");
547 int enabled = 1;
548
549 if (root) {
550 const char *model = of_get_property(root, "model", NULL);
551 if (model && !strcmp(model, "IBM,9076-N81"))
552 enabled = 0;
553 of_node_put(root);
554 }
555
556 return enabled;
557}
558#else
559static inline int tlb_batching_enabled(void)
560{
561 return 1;
562}
563#endif
564
565void __init hpte_init_native(void) 572void __init hpte_init_native(void)
566{ 573{
567 ppc_md.hpte_invalidate = native_hpte_invalidate; 574 ppc_md.hpte_invalidate = native_hpte_invalidate;
@@ -570,6 +577,5 @@ void __init hpte_init_native(void)
570 ppc_md.hpte_insert = native_hpte_insert; 577 ppc_md.hpte_insert = native_hpte_insert;
571 ppc_md.hpte_remove = native_hpte_remove; 578 ppc_md.hpte_remove = native_hpte_remove;
572 ppc_md.hpte_clear_all = native_hpte_clear; 579 ppc_md.hpte_clear_all = native_hpte_clear;
573 if (tlb_batching_enabled()) 580 ppc_md.flush_hash_range = native_flush_hash_range;
574 ppc_md.flush_hash_range = native_flush_hash_range;
575} 581}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 377e5cbedbb..3a292be2e07 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -43,7 +43,6 @@
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44#include <asm/machdep.h> 44#include <asm/machdep.h>
45#include <asm/prom.h> 45#include <asm/prom.h>
46#include <asm/abs_addr.h>
47#include <asm/tlbflush.h> 46#include <asm/tlbflush.h>
48#include <asm/io.h> 47#include <asm/io.h>
49#include <asm/eeh.h> 48#include <asm/eeh.h>
@@ -192,18 +191,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
192 vaddr += step, paddr += step) { 191 vaddr += step, paddr += step) {
193 unsigned long hash, hpteg; 192 unsigned long hash, hpteg;
194 unsigned long vsid = get_kernel_vsid(vaddr, ssize); 193 unsigned long vsid = get_kernel_vsid(vaddr, ssize);
195 unsigned long va = hpt_va(vaddr, vsid, ssize); 194 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
196 unsigned long tprot = prot; 195 unsigned long tprot = prot;
197 196
198 /* Make kernel text executable */ 197 /* Make kernel text executable */
199 if (overlaps_kernel_text(vaddr, vaddr + step)) 198 if (overlaps_kernel_text(vaddr, vaddr + step))
200 tprot &= ~HPTE_R_N; 199 tprot &= ~HPTE_R_N;
201 200
202 hash = hpt_hash(va, shift, ssize); 201 hash = hpt_hash(vpn, shift, ssize);
203 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 202 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
204 203
205 BUG_ON(!ppc_md.hpte_insert); 204 BUG_ON(!ppc_md.hpte_insert);
206 ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot, 205 ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
207 HPTE_V_BOLTED, psize, ssize); 206 HPTE_V_BOLTED, psize, ssize);
208 207
209 if (ret < 0) 208 if (ret < 0)
@@ -651,7 +650,7 @@ static void __init htab_initialize(void)
651 DBG("Hash table allocated at %lx, size: %lx\n", table, 650 DBG("Hash table allocated at %lx, size: %lx\n", table,
652 htab_size_bytes); 651 htab_size_bytes);
653 652
654 htab_address = abs_to_virt(table); 653 htab_address = __va(table);
655 654
656 /* htab absolute addr + encoded htabsize */ 655 /* htab absolute addr + encoded htabsize */
657 _SDR1 = table + __ilog2(pteg_count) - 11; 656 _SDR1 = table + __ilog2(pteg_count) - 11;
@@ -804,16 +803,19 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
804#ifdef CONFIG_PPC_MM_SLICES 803#ifdef CONFIG_PPC_MM_SLICES
805unsigned int get_paca_psize(unsigned long addr) 804unsigned int get_paca_psize(unsigned long addr)
806{ 805{
807 unsigned long index, slices; 806 u64 lpsizes;
807 unsigned char *hpsizes;
808 unsigned long index, mask_index;
808 809
809 if (addr < SLICE_LOW_TOP) { 810 if (addr < SLICE_LOW_TOP) {
810 slices = get_paca()->context.low_slices_psize; 811 lpsizes = get_paca()->context.low_slices_psize;
811 index = GET_LOW_SLICE_INDEX(addr); 812 index = GET_LOW_SLICE_INDEX(addr);
812 } else { 813 return (lpsizes >> (index * 4)) & 0xF;
813 slices = get_paca()->context.high_slices_psize;
814 index = GET_HIGH_SLICE_INDEX(addr);
815 } 814 }
816 return (slices >> (index * 4)) & 0xF; 815 hpsizes = get_paca()->context.high_slices_psize;
816 index = GET_HIGH_SLICE_INDEX(addr);
817 mask_index = index & 0x1;
818 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
817} 819}
818 820
819#else 821#else
@@ -1153,21 +1155,21 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1153/* WARNING: This is called from hash_low_64.S, if you change this prototype, 1155/* WARNING: This is called from hash_low_64.S, if you change this prototype,
1154 * do not forget to update the assembly call site ! 1156 * do not forget to update the assembly call site !
1155 */ 1157 */
1156void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, 1158void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
1157 int local) 1159 int local)
1158{ 1160{
1159 unsigned long hash, index, shift, hidx, slot; 1161 unsigned long hash, index, shift, hidx, slot;
1160 1162
1161 DBG_LOW("flush_hash_page(va=%016lx)\n", va); 1163 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
1162 pte_iterate_hashed_subpages(pte, psize, va, index, shift) { 1164 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
1163 hash = hpt_hash(va, shift, ssize); 1165 hash = hpt_hash(vpn, shift, ssize);
1164 hidx = __rpte_to_hidx(pte, index); 1166 hidx = __rpte_to_hidx(pte, index);
1165 if (hidx & _PTEIDX_SECONDARY) 1167 if (hidx & _PTEIDX_SECONDARY)
1166 hash = ~hash; 1168 hash = ~hash;
1167 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1169 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1168 slot += hidx & _PTEIDX_GROUP_IX; 1170 slot += hidx & _PTEIDX_GROUP_IX;
1169 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); 1171 DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
1170 ppc_md.hpte_invalidate(slot, va, psize, ssize, local); 1172 ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
1171 } pte_iterate_hashed_end(); 1173 } pte_iterate_hashed_end();
1172} 1174}
1173 1175
@@ -1181,7 +1183,7 @@ void flush_hash_range(unsigned long number, int local)
1181 &__get_cpu_var(ppc64_tlb_batch); 1183 &__get_cpu_var(ppc64_tlb_batch);
1182 1184
1183 for (i = 0; i < number; i++) 1185 for (i = 0; i < number; i++)
1184 flush_hash_page(batch->vaddr[i], batch->pte[i], 1186 flush_hash_page(batch->vpn[i], batch->pte[i],
1185 batch->psize, batch->ssize, local); 1187 batch->psize, batch->ssize, local);
1186 } 1188 }
1187} 1189}
@@ -1208,14 +1210,14 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1208{ 1210{
1209 unsigned long hash, hpteg; 1211 unsigned long hash, hpteg;
1210 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1212 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1211 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1213 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1212 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL); 1214 unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
1213 int ret; 1215 int ret;
1214 1216
1215 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1217 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1216 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1218 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1217 1219
1218 ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), 1220 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
1219 mode, HPTE_V_BOLTED, 1221 mode, HPTE_V_BOLTED,
1220 mmu_linear_psize, mmu_kernel_ssize); 1222 mmu_linear_psize, mmu_kernel_ssize);
1221 BUG_ON (ret < 0); 1223 BUG_ON (ret < 0);
@@ -1229,9 +1231,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1229{ 1231{
1230 unsigned long hash, hidx, slot; 1232 unsigned long hash, hidx, slot;
1231 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize); 1233 unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
1232 unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize); 1234 unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
1233 1235
1234 hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize); 1236 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1235 spin_lock(&linear_map_hash_lock); 1237 spin_lock(&linear_map_hash_lock);
1236 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); 1238 BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
1237 hidx = linear_map_hash_slots[lmi] & 0x7f; 1239 hidx = linear_map_hash_slots[lmi] & 0x7f;
@@ -1241,7 +1243,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
1241 hash = ~hash; 1243 hash = ~hash;
1242 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 1244 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1243 slot += hidx & _PTEIDX_GROUP_IX; 1245 slot += hidx & _PTEIDX_GROUP_IX;
1244 ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0); 1246 ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
1245} 1247}
1246 1248
1247void kernel_map_pages(struct page *page, int numpages, int enable) 1249void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index cc5c273086c..cecad348f60 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -18,14 +18,15 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
18 pte_t *ptep, unsigned long trap, int local, int ssize, 18 pte_t *ptep, unsigned long trap, int local, int ssize,
19 unsigned int shift, unsigned int mmu_psize) 19 unsigned int shift, unsigned int mmu_psize)
20{ 20{
21 unsigned long vpn;
21 unsigned long old_pte, new_pte; 22 unsigned long old_pte, new_pte;
22 unsigned long va, rflags, pa, sz; 23 unsigned long rflags, pa, sz;
23 long slot; 24 long slot;
24 25
25 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); 26 BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
26 27
27 /* Search the Linux page table for a match with va */ 28 /* Search the Linux page table for a match with va */
28 va = hpt_va(ea, vsid, ssize); 29 vpn = hpt_vpn(ea, vsid, ssize);
29 30
30 /* At this point, we have a pte (old_pte) which can be used to build 31 /* At this point, we have a pte (old_pte) which can be used to build
31 * or update an HPTE. There are 2 cases: 32 * or update an HPTE. There are 2 cases:
@@ -69,19 +70,19 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
69 /* There MIGHT be an HPTE for this pte */ 70 /* There MIGHT be an HPTE for this pte */
70 unsigned long hash, slot; 71 unsigned long hash, slot;
71 72
72 hash = hpt_hash(va, shift, ssize); 73 hash = hpt_hash(vpn, shift, ssize);
73 if (old_pte & _PAGE_F_SECOND) 74 if (old_pte & _PAGE_F_SECOND)
74 hash = ~hash; 75 hash = ~hash;
75 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; 76 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
76 slot += (old_pte & _PAGE_F_GIX) >> 12; 77 slot += (old_pte & _PAGE_F_GIX) >> 12;
77 78
78 if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize, 79 if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
79 ssize, local) == -1) 80 ssize, local) == -1)
80 old_pte &= ~_PAGE_HPTEFLAGS; 81 old_pte &= ~_PAGE_HPTEFLAGS;
81 } 82 }
82 83
83 if (likely(!(old_pte & _PAGE_HASHPTE))) { 84 if (likely(!(old_pte & _PAGE_HASHPTE))) {
84 unsigned long hash = hpt_hash(va, shift, ssize); 85 unsigned long hash = hpt_hash(vpn, shift, ssize);
85 unsigned long hpte_group; 86 unsigned long hpte_group;
86 87
87 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; 88 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -101,14 +102,14 @@ repeat:
101 _PAGE_COHERENT | _PAGE_GUARDED)); 102 _PAGE_COHERENT | _PAGE_GUARDED));
102 103
103 /* Insert into the hash table, primary slot */ 104 /* Insert into the hash table, primary slot */
104 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0, 105 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
105 mmu_psize, ssize); 106 mmu_psize, ssize);
106 107
107 /* Primary is full, try the secondary */ 108 /* Primary is full, try the secondary */
108 if (unlikely(slot == -1)) { 109 if (unlikely(slot == -1)) {
109 hpte_group = ((~hash & htab_hash_mask) * 110 hpte_group = ((~hash & htab_hash_mask) *
110 HPTES_PER_GROUP) & ~0x7UL; 111 HPTES_PER_GROUP) & ~0x7UL;
111 slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 112 slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
112 HPTE_V_SECONDARY, 113 HPTE_V_SECONDARY,
113 mmu_psize, ssize); 114 mmu_psize, ssize);
114 if (slot == -1) { 115 if (slot == -1) {
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 620b7acd2fd..95a45293e5a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -62,7 +62,6 @@
62#include <asm/cputable.h> 62#include <asm/cputable.h>
63#include <asm/sections.h> 63#include <asm/sections.h>
64#include <asm/iommu.h> 64#include <asm/iommu.h>
65#include <asm/abs_addr.h>
66#include <asm/vdso.h> 65#include <asm/vdso.h>
67 66
68#include "mmu_decl.h" 67#include "mmu_decl.h"
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index fbdad0e3929..0dba5066c22 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -62,7 +62,7 @@
62 62
63int init_bootmem_done; 63int init_bootmem_done;
64int mem_init_done; 64int mem_init_done;
65phys_addr_t memory_limit; 65unsigned long long memory_limit;
66 66
67#ifdef CONFIG_HIGHMEM 67#ifdef CONFIG_HIGHMEM
68pte_t *kmap_pte; 68pte_t *kmap_pte;
@@ -300,8 +300,7 @@ void __init mem_init(void)
300 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 300 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
301 301
302#ifdef CONFIG_SWIOTLB 302#ifdef CONFIG_SWIOTLB
303 if (ppc_swiotlb_enable) 303 swiotlb_init(0);
304 swiotlb_init(1);
305#endif 304#endif
306 305
307 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; 306 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40677aa0190..40bc5b0ace5 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -30,11 +30,13 @@ static DEFINE_SPINLOCK(mmu_context_lock);
30static DEFINE_IDA(mmu_context_ida); 30static DEFINE_IDA(mmu_context_ida);
31 31
32/* 32/*
33 * The proto-VSID space has 2^35 - 1 segments available for user mappings. 33 * 256MB segment
34 * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, 34 * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
35 * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). 35 * available for user mappings. Each segment contains 2^28 bytes. Each
36 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
37 * (19 == 37 + 28 - 46).
36 */ 38 */
37#define MAX_CONTEXT ((1UL << 19) - 1) 39#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
38 40
39int __init_new_context(void) 41int __init_new_context(void)
40{ 42{
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 249a0631c4d..e212a271c7a 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -51,13 +51,22 @@
51#include <asm/processor.h> 51#include <asm/processor.h>
52#include <asm/cputable.h> 52#include <asm/cputable.h>
53#include <asm/sections.h> 53#include <asm/sections.h>
54#include <asm/abs_addr.h>
55#include <asm/firmware.h> 54#include <asm/firmware.h>
56 55
57#include "mmu_decl.h" 56#include "mmu_decl.h"
58 57
59unsigned long ioremap_bot = IOREMAP_BASE; 58/* Some sanity checking */
59#if TASK_SIZE_USER64 > PGTABLE_RANGE
60#error TASK_SIZE_USER64 exceeds pagetable range
61#endif
62
63#ifdef CONFIG_PPC_STD_MMU_64
64#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
65#error TASK_SIZE_USER64 exceeds user VSID range
66#endif
67#endif
60 68
69unsigned long ioremap_bot = IOREMAP_BASE;
61 70
62#ifdef CONFIG_PPC_MMU_NOHASH 71#ifdef CONFIG_PPC_MMU_NOHASH
63static void *early_alloc_pgtable(unsigned long size) 72static void *early_alloc_pgtable(unsigned long size)
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index b9ee79ce220..1a16ca22775 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -56,6 +56,12 @@ _GLOBAL(slb_allocate_realmode)
56 */ 56 */
57_GLOBAL(slb_miss_kernel_load_linear) 57_GLOBAL(slb_miss_kernel_load_linear)
58 li r11,0 58 li r11,0
59 li r9,0x1
60 /*
61 * for 1T we shift 12 bits more. slb_finish_load_1T will do
62 * the necessary adjustment
63 */
64 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
59BEGIN_FTR_SECTION 65BEGIN_FTR_SECTION
60 b slb_finish_load 66 b slb_finish_load
61END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 67END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -85,6 +91,12 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
85 _GLOBAL(slb_miss_kernel_load_io) 91 _GLOBAL(slb_miss_kernel_load_io)
86 li r11,0 92 li r11,0
876: 936:
94 li r9,0x1
95 /*
96 * for 1T we shift 12 bits more. slb_finish_load_1T will do
97 * the necessary adjustment
98 */
99 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
88BEGIN_FTR_SECTION 100BEGIN_FTR_SECTION
89 b slb_finish_load 101 b slb_finish_load
90END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -108,17 +120,31 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
108 * between 4k and 64k standard page size 120 * between 4k and 64k standard page size
109 */ 121 */
110#ifdef CONFIG_PPC_MM_SLICES 122#ifdef CONFIG_PPC_MM_SLICES
123 /* r10 have esid */
111 cmpldi r10,16 124 cmpldi r10,16
112 125 /* below SLICE_LOW_TOP */
113 /* Get the slice index * 4 in r11 and matching slice size mask in r9 */
114 ld r9,PACALOWSLICESPSIZE(r13)
115 sldi r11,r10,2
116 blt 5f 126 blt 5f
117 ld r9,PACAHIGHSLICEPSIZE(r13) 127 /*
118 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2) 128 * Handle hpsizes,
119 andi. r11,r11,0x3c 129 * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
130 */
131 srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
132 addi r9,r11,PACAHIGHSLICEPSIZE
133 lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
134 /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
135 rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
136 b 6f
120 137
1215: /* Extract the psize and multiply to get an array offset */ 1385:
139 /*
140 * Handle lpsizes
141 * r9 is get_paca()->context.low_slices_psize, r11 is index
142 */
143 ld r9,PACALOWSLICESPSIZE(r13)
144 mr r11,r10
1456:
146 sldi r11,r11,2 /* index * 4 */
147 /* Extract the psize and multiply to get an array offset */
122 srd r9,r9,r11 148 srd r9,r9,r11
123 andi. r9,r9,0xf 149 andi. r9,r9,0xf
124 mulli r9,r9,MMUPSIZEDEFSIZE 150 mulli r9,r9,MMUPSIZEDEFSIZE
@@ -209,7 +235,11 @@ _GLOBAL(slb_allocate_user)
209 */ 235 */
210slb_finish_load: 236slb_finish_load:
211 ASM_VSID_SCRAMBLE(r10,r9,256M) 237 ASM_VSID_SCRAMBLE(r10,r9,256M)
212 rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */ 238 /*
239 * bits above VSID_BITS_256M need to be ignored from r10
240 * also combine VSID and flags
241 */
242 rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
213 243
214 /* r3 = EA, r11 = VSID data */ 244 /* r3 = EA, r11 = VSID data */
215 /* 245 /*
@@ -252,10 +282,10 @@ _GLOBAL(slb_compare_rr_to_size)
252 bge 1f 282 bge 1f
253 283
254 /* still room in the slb cache */ 284 /* still room in the slb cache */
255 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */ 285 sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
256 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */ 286 srdi r10,r10,28 /* get the 36 bits of the ESID */
257 add r11,r11,r13 /* r11 = (u16 *)paca + offset */ 287 add r11,r11,r13 /* r11 = (u32 *)paca + offset */
258 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */ 288 stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
259 addi r3,r3,1 /* offset++ */ 289 addi r3,r3,1 /* offset++ */
260 b 2f 290 b 2f
2611: /* offset >= SLB_CACHE_ENTRIES */ 2911: /* offset >= SLB_CACHE_ENTRIES */
@@ -273,7 +303,11 @@ _GLOBAL(slb_compare_rr_to_size)
273slb_finish_load_1T: 303slb_finish_load_1T:
274 srdi r10,r10,40-28 /* get 1T ESID */ 304 srdi r10,r10,40-28 /* get 1T ESID */
275 ASM_VSID_SCRAMBLE(r10,r9,1T) 305 ASM_VSID_SCRAMBLE(r10,r9,1T)
276 rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */ 306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10
308 * also combine VSID and flags
309 */
310 rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
277 li r10,MMU_SEGSIZE_1T 311 li r10,MMU_SEGSIZE_1T
278 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */ 312 rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
279 313
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 73709f7ce92..5829d2a950d 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -34,6 +34,11 @@
34#include <asm/mmu.h> 34#include <asm/mmu.h>
35#include <asm/spu.h> 35#include <asm/spu.h>
36 36
37/* some sanity checks */
38#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
39#error PGTABLE_RANGE exceeds slice_mask high_slices size
40#endif
41
37static DEFINE_SPINLOCK(slice_convert_lock); 42static DEFINE_SPINLOCK(slice_convert_lock);
38 43
39 44
@@ -42,7 +47,7 @@ int _slice_debug = 1;
42 47
43static void slice_print_mask(const char *label, struct slice_mask mask) 48static void slice_print_mask(const char *label, struct slice_mask mask)
44{ 49{
45 char *p, buf[16 + 3 + 16 + 1]; 50 char *p, buf[16 + 3 + 64 + 1];
46 int i; 51 int i;
47 52
48 if (!_slice_debug) 53 if (!_slice_debug)
@@ -54,7 +59,7 @@ static void slice_print_mask(const char *label, struct slice_mask mask)
54 *(p++) = '-'; 59 *(p++) = '-';
55 *(p++) = ' '; 60 *(p++) = ' ';
56 for (i = 0; i < SLICE_NUM_HIGH; i++) 61 for (i = 0; i < SLICE_NUM_HIGH; i++)
57 *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0'; 62 *(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
58 *(p++) = 0; 63 *(p++) = 0;
59 64
60 printk(KERN_DEBUG "%s:%s\n", label, buf); 65 printk(KERN_DEBUG "%s:%s\n", label, buf);
@@ -84,8 +89,8 @@ static struct slice_mask slice_range_to_mask(unsigned long start,
84 } 89 }
85 90
86 if ((start + len) > SLICE_LOW_TOP) 91 if ((start + len) > SLICE_LOW_TOP)
87 ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1)) 92 ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
88 - (1u << GET_HIGH_SLICE_INDEX(start)); 93 - (1ul << GET_HIGH_SLICE_INDEX(start));
89 94
90 return ret; 95 return ret;
91} 96}
@@ -135,26 +140,31 @@ static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
135 140
136 for (i = 0; i < SLICE_NUM_HIGH; i++) 141 for (i = 0; i < SLICE_NUM_HIGH; i++)
137 if (!slice_high_has_vma(mm, i)) 142 if (!slice_high_has_vma(mm, i))
138 ret.high_slices |= 1u << i; 143 ret.high_slices |= 1ul << i;
139 144
140 return ret; 145 return ret;
141} 146}
142 147
143static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) 148static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
144{ 149{
150 unsigned char *hpsizes;
151 int index, mask_index;
145 struct slice_mask ret = { 0, 0 }; 152 struct slice_mask ret = { 0, 0 };
146 unsigned long i; 153 unsigned long i;
147 u64 psizes; 154 u64 lpsizes;
148 155
149 psizes = mm->context.low_slices_psize; 156 lpsizes = mm->context.low_slices_psize;
150 for (i = 0; i < SLICE_NUM_LOW; i++) 157 for (i = 0; i < SLICE_NUM_LOW; i++)
151 if (((psizes >> (i * 4)) & 0xf) == psize) 158 if (((lpsizes >> (i * 4)) & 0xf) == psize)
152 ret.low_slices |= 1u << i; 159 ret.low_slices |= 1u << i;
153 160
154 psizes = mm->context.high_slices_psize; 161 hpsizes = mm->context.high_slices_psize;
155 for (i = 0; i < SLICE_NUM_HIGH; i++) 162 for (i = 0; i < SLICE_NUM_HIGH; i++) {
156 if (((psizes >> (i * 4)) & 0xf) == psize) 163 mask_index = i & 0x1;
157 ret.high_slices |= 1u << i; 164 index = i >> 1;
165 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
166 ret.high_slices |= 1ul << i;
167 }
158 168
159 return ret; 169 return ret;
160} 170}
@@ -183,8 +193,10 @@ static void slice_flush_segments(void *parm)
183 193
184static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize) 194static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
185{ 195{
196 int index, mask_index;
186 /* Write the new slice psize bits */ 197 /* Write the new slice psize bits */
187 u64 lpsizes, hpsizes; 198 unsigned char *hpsizes;
199 u64 lpsizes;
188 unsigned long i, flags; 200 unsigned long i, flags;
189 201
190 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); 202 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
@@ -201,14 +213,18 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
201 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 213 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
202 (((unsigned long)psize) << (i * 4)); 214 (((unsigned long)psize) << (i * 4));
203 215
204 hpsizes = mm->context.high_slices_psize; 216 /* Assign the value back */
205 for (i = 0; i < SLICE_NUM_HIGH; i++)
206 if (mask.high_slices & (1u << i))
207 hpsizes = (hpsizes & ~(0xful << (i * 4))) |
208 (((unsigned long)psize) << (i * 4));
209
210 mm->context.low_slices_psize = lpsizes; 217 mm->context.low_slices_psize = lpsizes;
211 mm->context.high_slices_psize = hpsizes; 218
219 hpsizes = mm->context.high_slices_psize;
220 for (i = 0; i < SLICE_NUM_HIGH; i++) {
221 mask_index = i & 0x1;
222 index = i >> 1;
223 if (mask.high_slices & (1ul << i))
224 hpsizes[index] = (hpsizes[index] &
225 ~(0xf << (mask_index * 4))) |
226 (((unsigned long)psize) << (mask_index * 4));
227 }
212 228
213 slice_dbg(" lsps=%lx, hsps=%lx\n", 229 slice_dbg(" lsps=%lx, hsps=%lx\n",
214 mm->context.low_slices_psize, 230 mm->context.low_slices_psize,
@@ -587,18 +603,19 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
587 603
588unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) 604unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
589{ 605{
590 u64 psizes; 606 unsigned char *hpsizes;
591 int index; 607 int index, mask_index;
592 608
593 if (addr < SLICE_LOW_TOP) { 609 if (addr < SLICE_LOW_TOP) {
594 psizes = mm->context.low_slices_psize; 610 u64 lpsizes;
611 lpsizes = mm->context.low_slices_psize;
595 index = GET_LOW_SLICE_INDEX(addr); 612 index = GET_LOW_SLICE_INDEX(addr);
596 } else { 613 return (lpsizes >> (index * 4)) & 0xf;
597 psizes = mm->context.high_slices_psize;
598 index = GET_HIGH_SLICE_INDEX(addr);
599 } 614 }
600 615 hpsizes = mm->context.high_slices_psize;
601 return (psizes >> (index * 4)) & 0xf; 616 index = GET_HIGH_SLICE_INDEX(addr);
617 mask_index = index & 0x1;
618 return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
602} 619}
603EXPORT_SYMBOL_GPL(get_slice_psize); 620EXPORT_SYMBOL_GPL(get_slice_psize);
604 621
@@ -618,7 +635,9 @@ EXPORT_SYMBOL_GPL(get_slice_psize);
618 */ 635 */
619void slice_set_user_psize(struct mm_struct *mm, unsigned int psize) 636void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
620{ 637{
621 unsigned long flags, lpsizes, hpsizes; 638 int index, mask_index;
639 unsigned char *hpsizes;
640 unsigned long flags, lpsizes;
622 unsigned int old_psize; 641 unsigned int old_psize;
623 int i; 642 int i;
624 643
@@ -639,15 +658,21 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
639 if (((lpsizes >> (i * 4)) & 0xf) == old_psize) 658 if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
640 lpsizes = (lpsizes & ~(0xful << (i * 4))) | 659 lpsizes = (lpsizes & ~(0xful << (i * 4))) |
641 (((unsigned long)psize) << (i * 4)); 660 (((unsigned long)psize) << (i * 4));
661 /* Assign the value back */
662 mm->context.low_slices_psize = lpsizes;
642 663
643 hpsizes = mm->context.high_slices_psize; 664 hpsizes = mm->context.high_slices_psize;
644 for (i = 0; i < SLICE_NUM_HIGH; i++) 665 for (i = 0; i < SLICE_NUM_HIGH; i++) {
645 if (((hpsizes >> (i * 4)) & 0xf) == old_psize) 666 mask_index = i & 0x1;
646 hpsizes = (hpsizes & ~(0xful << (i * 4))) | 667 index = i >> 1;
647 (((unsigned long)psize) << (i * 4)); 668 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
669 hpsizes[index] = (hpsizes[index] &
670 ~(0xf << (mask_index * 4))) |
671 (((unsigned long)psize) << (mask_index * 4));
672 }
673
674
648 675
649 mm->context.low_slices_psize = lpsizes;
650 mm->context.high_slices_psize = hpsizes;
651 676
652 slice_dbg(" lsps=%lx, hsps=%lx\n", 677 slice_dbg(" lsps=%lx, hsps=%lx\n",
653 mm->context.low_slices_psize, 678 mm->context.low_slices_psize,
@@ -660,18 +685,27 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
660void slice_set_psize(struct mm_struct *mm, unsigned long address, 685void slice_set_psize(struct mm_struct *mm, unsigned long address,
661 unsigned int psize) 686 unsigned int psize)
662{ 687{
688 unsigned char *hpsizes;
663 unsigned long i, flags; 689 unsigned long i, flags;
664 u64 *p; 690 u64 *lpsizes;
665 691
666 spin_lock_irqsave(&slice_convert_lock, flags); 692 spin_lock_irqsave(&slice_convert_lock, flags);
667 if (address < SLICE_LOW_TOP) { 693 if (address < SLICE_LOW_TOP) {
668 i = GET_LOW_SLICE_INDEX(address); 694 i = GET_LOW_SLICE_INDEX(address);
669 p = &mm->context.low_slices_psize; 695 lpsizes = &mm->context.low_slices_psize;
696 *lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
697 ((unsigned long) psize << (i * 4));
670 } else { 698 } else {
699 int index, mask_index;
671 i = GET_HIGH_SLICE_INDEX(address); 700 i = GET_HIGH_SLICE_INDEX(address);
672 p = &mm->context.high_slices_psize; 701 hpsizes = mm->context.high_slices_psize;
702 mask_index = i & 0x1;
703 index = i >> 1;
704 hpsizes[index] = (hpsizes[index] &
705 ~(0xf << (mask_index * 4))) |
706 (((unsigned long)psize) << (mask_index * 4));
673 } 707 }
674 *p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4)); 708
675 spin_unlock_irqrestore(&slice_convert_lock, flags); 709 spin_unlock_irqrestore(&slice_convert_lock, flags);
676 710
677#ifdef CONFIG_SPU_BASE 711#ifdef CONFIG_SPU_BASE
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 9106ebb118f..3f8efa6f299 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -20,7 +20,6 @@
20#include <asm/paca.h> 20#include <asm/paca.h>
21#include <asm/cputable.h> 21#include <asm/cputable.h>
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/abs_addr.h>
24 23
25struct stab_entry { 24struct stab_entry {
26 unsigned long esid_data; 25 unsigned long esid_data;
@@ -257,7 +256,7 @@ void __init stabs_alloc(void)
257 memset((void *)newstab, 0, HW_PAGE_SIZE); 256 memset((void *)newstab, 0, HW_PAGE_SIZE);
258 257
259 paca[cpu].stab_addr = newstab; 258 paca[cpu].stab_addr = newstab;
260 paca[cpu].stab_real = virt_to_abs(newstab); 259 paca[cpu].stab_real = __pa(newstab);
261 printk(KERN_INFO "Segment table for CPU %d at 0x%llx " 260 printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
262 "virtual, 0x%llx absolute\n", 261 "virtual, 0x%llx absolute\n",
263 cpu, paca[cpu].stab_addr, paca[cpu].stab_real); 262 cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index e4f8f1fc81a..7c415ddde94 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -95,7 +95,8 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
95 struct mm_struct *mm = current->mm; 95 struct mm_struct *mm = current->mm;
96 struct subpage_prot_table *spt = &mm->context.spt; 96 struct subpage_prot_table *spt = &mm->context.spt;
97 u32 **spm, *spp; 97 u32 **spm, *spp;
98 int i, nw; 98 unsigned long i;
99 size_t nw;
99 unsigned long next, limit; 100 unsigned long next, limit;
100 101
101 down_write(&mm->mmap_sem); 102 down_write(&mm->mmap_sem);
@@ -144,7 +145,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
144 struct mm_struct *mm = current->mm; 145 struct mm_struct *mm = current->mm;
145 struct subpage_prot_table *spt = &mm->context.spt; 146 struct subpage_prot_table *spt = &mm->context.spt;
146 u32 **spm, *spp; 147 u32 **spm, *spp;
147 int i, nw; 148 unsigned long i;
149 size_t nw;
148 unsigned long next, limit; 150 unsigned long next, limit;
149 int err; 151 int err;
150 152
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 31f18207970..ae758b3ff72 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -42,8 +42,9 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
42void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 42void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
43 pte_t *ptep, unsigned long pte, int huge) 43 pte_t *ptep, unsigned long pte, int huge)
44{ 44{
45 unsigned long vpn;
45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); 46 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
46 unsigned long vsid, vaddr; 47 unsigned long vsid;
47 unsigned int psize; 48 unsigned int psize;
48 int ssize; 49 int ssize;
49 real_pte_t rpte; 50 real_pte_t rpte;
@@ -86,7 +87,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
87 ssize = mmu_kernel_ssize; 88 ssize = mmu_kernel_ssize;
88 } 89 }
89 vaddr = hpt_va(addr, vsid, ssize); 90 vpn = hpt_vpn(addr, vsid, ssize);
90 rpte = __real_pte(__pte(pte), ptep); 91 rpte = __real_pte(__pte(pte), ptep);
91 92
92 /* 93 /*
@@ -96,7 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
96 * and decide to use local invalidates instead... 97 * and decide to use local invalidates instead...
97 */ 98 */
98 if (!batch->active) { 99 if (!batch->active) {
99 flush_hash_page(vaddr, rpte, psize, ssize, 0); 100 flush_hash_page(vpn, rpte, psize, ssize, 0);
100 put_cpu_var(ppc64_tlb_batch); 101 put_cpu_var(ppc64_tlb_batch);
101 return; 102 return;
102 } 103 }
@@ -122,7 +123,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
122 batch->ssize = ssize; 123 batch->ssize = ssize;
123 } 124 }
124 batch->pte[i] = rpte; 125 batch->pte[i] = rpte;
125 batch->vaddr[i] = vaddr; 126 batch->vpn[i] = vpn;
126 batch->index = ++i; 127 batch->index = ++i;
127 if (i >= PPC64_TLB_BATCH_NR) 128 if (i >= PPC64_TLB_BATCH_NR)
128 __flush_tlb_pending(batch); 129 __flush_tlb_pending(batch);
@@ -146,7 +147,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
146 if (cpumask_equal(mm_cpumask(batch->mm), tmp)) 147 if (cpumask_equal(mm_cpumask(batch->mm), tmp))
147 local = 1; 148 local = 1;
148 if (i == 1) 149 if (i == 1)
149 flush_hash_page(batch->vaddr[0], batch->pte[0], 150 flush_hash_page(batch->vpn[0], batch->pte[0],
150 batch->psize, batch->ssize, local); 151 batch->psize, batch->ssize, local);
151 else 152 else
152 flush_hash_range(i, local); 153 flush_hash_range(i, local);
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index f09d48e3268..b4113bf8635 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -20,6 +20,8 @@
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/exception-64e.h> 21#include <asm/exception-64e.h>
22#include <asm/ppc-opcode.h> 22#include <asm/ppc-opcode.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_booke_hv_asm.h>
23 25
24#ifdef CONFIG_PPC_64K_PAGES 26#ifdef CONFIG_PPC_64K_PAGES
25#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1) 27#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
@@ -37,12 +39,18 @@
37 * * 39 * *
38 **********************************************************************/ 40 **********************************************************************/
39 41
40.macro tlb_prolog_bolted addr 42.macro tlb_prolog_bolted intnum addr
41 mtspr SPRN_SPRG_TLB_SCRATCH,r13 43 mtspr SPRN_SPRG_GEN_SCRATCH,r13
42 mfspr r13,SPRN_SPRG_PACA 44 mfspr r13,SPRN_SPRG_PACA
43 std r10,PACA_EXTLB+EX_TLB_R10(r13) 45 std r10,PACA_EXTLB+EX_TLB_R10(r13)
44 mfcr r10 46 mfcr r10
45 std r11,PACA_EXTLB+EX_TLB_R11(r13) 47 std r11,PACA_EXTLB+EX_TLB_R11(r13)
48#ifdef CONFIG_KVM_BOOKE_HV
49BEGIN_FTR_SECTION
50 mfspr r11, SPRN_SRR1
51END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
52#endif
53 DO_KVM \intnum, SPRN_SRR1
46 std r16,PACA_EXTLB+EX_TLB_R16(r13) 54 std r16,PACA_EXTLB+EX_TLB_R16(r13)
47 mfspr r16,\addr /* get faulting address */ 55 mfspr r16,\addr /* get faulting address */
48 std r14,PACA_EXTLB+EX_TLB_R14(r13) 56 std r14,PACA_EXTLB+EX_TLB_R14(r13)
@@ -61,12 +69,12 @@
61 ld r15,PACA_EXTLB+EX_TLB_R15(r13) 69 ld r15,PACA_EXTLB+EX_TLB_R15(r13)
62 TLB_MISS_RESTORE_STATS_BOLTED 70 TLB_MISS_RESTORE_STATS_BOLTED
63 ld r16,PACA_EXTLB+EX_TLB_R16(r13) 71 ld r16,PACA_EXTLB+EX_TLB_R16(r13)
64 mfspr r13,SPRN_SPRG_TLB_SCRATCH 72 mfspr r13,SPRN_SPRG_GEN_SCRATCH
65.endm 73.endm
66 74
67/* Data TLB miss */ 75/* Data TLB miss */
68 START_EXCEPTION(data_tlb_miss_bolted) 76 START_EXCEPTION(data_tlb_miss_bolted)
69 tlb_prolog_bolted SPRN_DEAR 77 tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
70 78
71 /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ 79 /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
72 80
@@ -214,7 +222,7 @@ itlb_miss_fault_bolted:
214 222
215/* Instruction TLB miss */ 223/* Instruction TLB miss */
216 START_EXCEPTION(instruction_tlb_miss_bolted) 224 START_EXCEPTION(instruction_tlb_miss_bolted)
217 tlb_prolog_bolted SPRN_SRR0 225 tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
218 226
219 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 227 rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
220 srdi r15,r16,60 /* get region */ 228 srdi r15,r16,60 /* get region */