aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2013-03-12 23:34:54 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-03-16 21:39:06 -0400
commitc60ac5693c47df32a2b4b18af97fca5635def015 (patch)
tree34da014c89f9a941f5c901fc06f4c3f5bca6ad43 /arch/powerpc/mm
parente39d1a471484662620651cd9520250d33843f235 (diff)
powerpc: Update kernel VSID range
This patch change the kernel VSID range so that we limit VSID_BITS to 37. This enables us to support 64TB with 65 bit VA (37+28). Without this patch we have boot hangs on platforms that only support 65 bit VA. With this patch we now have proto vsid generated as below: We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated from mmu context id and effective segment id of the address. For user processes max context id is limited to ((1ul << 19) - 5) for kernel space, we use the top 4 context ids to map address as below 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] Acked-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Tested-by: Geoff Levand <geoff@infradead.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> CC: <stable@vger.kernel.org> [v3.8]
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c20
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c11
-rw-r--r--arch/powerpc/mm/slb_low.S50
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
4 files changed, 42 insertions, 41 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 6ec6c1997b3a..f410c3e12c1e 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
195 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); 195 unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
196 unsigned long tprot = prot; 196 unsigned long tprot = prot;
197 197
198 /*
199 * If we hit a bad address return error.
200 */
201 if (!vsid)
202 return -1;
198 /* Make kernel text executable */ 203 /* Make kernel text executable */
199 if (overlaps_kernel_text(vaddr, vaddr + step)) 204 if (overlaps_kernel_text(vaddr, vaddr + step))
200 tprot &= ~HPTE_R_N; 205 tprot &= ~HPTE_R_N;
@@ -924,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
924 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 929 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
925 ea, access, trap); 930 ea, access, trap);
926 931
927 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
928 DBG_LOW(" out of pgtable range !\n");
929 return 1;
930 }
931
932 /* Get region & vsid */ 932 /* Get region & vsid */
933 switch (REGION_ID(ea)) { 933 switch (REGION_ID(ea)) {
934 case USER_REGION_ID: 934 case USER_REGION_ID:
@@ -959,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
959 } 959 }
960 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 960 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
961 961
962 /* Bad address. */
963 if (!vsid) {
964 DBG_LOW("Bad address!\n");
965 return 1;
966 }
962 /* Get pgdir */ 967 /* Get pgdir */
963 pgdir = mm->pgd; 968 pgdir = mm->pgd;
964 if (pgdir == NULL) 969 if (pgdir == NULL)
@@ -1128,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1128 /* Get VSID */ 1133 /* Get VSID */
1129 ssize = user_segment_size(ea); 1134 ssize = user_segment_size(ea);
1130 vsid = get_vsid(mm->context.id, ea, ssize); 1135 vsid = get_vsid(mm->context.id, ea, ssize);
1136 if (!vsid)
1137 return;
1131 1138
1132 /* Hash doesn't like irqs */ 1139 /* Hash doesn't like irqs */
1133 local_irq_save(flags); 1140 local_irq_save(flags);
@@ -1235,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
1235 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); 1242 hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
1236 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); 1243 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
1237 1244
1245 /* Don't create HPTE entries for bad address */
1246 if (!vsid)
1247 return;
1238 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), 1248 ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
1239 mode, HPTE_V_BOLTED, 1249 mode, HPTE_V_BOLTED,
1240 mmu_linear_psize, mmu_kernel_ssize); 1250 mmu_linear_psize, mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40bc5b0ace54..d1d1b92c5b99 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -29,15 +29,6 @@
29static DEFINE_SPINLOCK(mmu_context_lock); 29static DEFINE_SPINLOCK(mmu_context_lock);
30static DEFINE_IDA(mmu_context_ida); 30static DEFINE_IDA(mmu_context_ida);
31 31
32/*
33 * 256MB segment
34 * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
35 * available for user mappings. Each segment contains 2^28 bytes. Each
36 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
37 * (19 == 37 + 28 - 46).
38 */
39#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
40
41int __init_new_context(void) 32int __init_new_context(void)
42{ 33{
43 int index; 34 int index;
@@ -56,7 +47,7 @@ again:
56 else if (err) 47 else if (err)
57 return err; 48 return err;
58 49
59 if (index > MAX_CONTEXT) { 50 if (index > MAX_USER_CONTEXT) {
60 spin_lock(&mmu_context_lock); 51 spin_lock(&mmu_context_lock);
61 ida_remove(&mmu_context_ida, index); 52 ida_remove(&mmu_context_ida, index);
62 spin_unlock(&mmu_context_lock); 53 spin_unlock(&mmu_context_lock);
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 1a16ca227757..77aafaa1ab09 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -31,10 +31,15 @@
31 * No other registers are examined or changed. 31 * No other registers are examined or changed.
32 */ 32 */
33_GLOBAL(slb_allocate_realmode) 33_GLOBAL(slb_allocate_realmode)
34 /* r3 = faulting address */ 34 /*
35 * check for bad kernel/user address
36 * (ea & ~REGION_MASK) >= PGTABLE_RANGE
37 */
38 rldicr. r9,r3,4,(63 - 46 - 4)
39 bne- 8f
35 40
36 srdi r9,r3,60 /* get region */ 41 srdi r9,r3,60 /* get region */
37 srdi r10,r3,28 /* get esid */ 42 srdi r10,r3,SID_SHIFT /* get esid */
38 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ 43 cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
39 44
40 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ 45 /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
56 */ 61 */
57_GLOBAL(slb_miss_kernel_load_linear) 62_GLOBAL(slb_miss_kernel_load_linear)
58 li r11,0 63 li r11,0
59 li r9,0x1
60 /* 64 /*
61 * for 1T we shift 12 bits more. slb_finish_load_1T will do 65 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
62 * the necessary adjustment 66 * r9 = region id.
63 */ 67 */
64 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 68 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
69 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
70
71
65BEGIN_FTR_SECTION 72BEGIN_FTR_SECTION
66 b slb_finish_load 73 b slb_finish_load
67END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 74END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
91 _GLOBAL(slb_miss_kernel_load_io) 98 _GLOBAL(slb_miss_kernel_load_io)
92 li r11,0 99 li r11,0
936: 1006:
94 li r9,0x1
95 /* 101 /*
96 * for 1T we shift 12 bits more. slb_finish_load_1T will do 102 * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
97 * the necessary adjustment 103 * r9 = region id.
98 */ 104 */
99 rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 105 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
106 addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
107
100BEGIN_FTR_SECTION 108BEGIN_FTR_SECTION
101 b slb_finish_load 109 b slb_finish_load
102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) 110END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
103 b slb_finish_load_1T 111 b slb_finish_load_1T
104 112
1050: /* user address: proto-VSID = context << 15 | ESID. First check 1130:
106 * if the address is within the boundaries of the user region
107 */
108 srdi. r9,r10,USER_ESID_BITS
109 bne- 8f /* invalid ea bits set */
110
111
112 /* when using slices, we extract the psize off the slice bitmaps 114 /* when using slices, we extract the psize off the slice bitmaps
113 * and then we need to get the sllp encoding off the mmu_psize_defs 115 * and then we need to get the sllp encoding off the mmu_psize_defs
114 * array. 116 * array.
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
164 ld r9,PACACONTEXTID(r13) 166 ld r9,PACACONTEXTID(r13)
165BEGIN_FTR_SECTION 167BEGIN_FTR_SECTION
166 cmpldi r10,0x1000 168 cmpldi r10,0x1000
167END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
168 rldimi r10,r9,USER_ESID_BITS,0
169BEGIN_FTR_SECTION
170 bge slb_finish_load_1T 169 bge slb_finish_load_1T
171END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) 170END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
172 b slb_finish_load 171 b slb_finish_load
173 172
1748: /* invalid EA */ 1738: /* invalid EA */
175 li r10,0 /* BAD_VSID */ 174 li r10,0 /* BAD_VSID */
175 li r9,0 /* BAD_VSID */
176 li r11,SLB_VSID_USER /* flags don't much matter */ 176 li r11,SLB_VSID_USER /* flags don't much matter */
177 b slb_finish_load 177 b slb_finish_load
178 178
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
221 221
222 /* get context to calculate proto-VSID */ 222 /* get context to calculate proto-VSID */
223 ld r9,PACACONTEXTID(r13) 223 ld r9,PACACONTEXTID(r13)
224 rldimi r10,r9,USER_ESID_BITS,0
225
226 /* fall through slb_finish_load */ 224 /* fall through slb_finish_load */
227 225
228#endif /* __DISABLED__ */ 226#endif /* __DISABLED__ */
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
231/* 229/*
232 * Finish loading of an SLB entry and return 230 * Finish loading of an SLB entry and return
233 * 231 *
234 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET 232 * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
235 */ 233 */
236slb_finish_load: 234slb_finish_load:
235 rldimi r10,r9,USER_ESID_BITS,0
237 ASM_VSID_SCRAMBLE(r10,r9,256M) 236 ASM_VSID_SCRAMBLE(r10,r9,256M)
238 /* 237 /*
239 * bits above VSID_BITS_256M need to be ignored from r10 238 * bits above VSID_BITS_256M need to be ignored from r10
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
298/* 297/*
299 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. 298 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
300 * 299 *
301 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 300 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
302 */ 301 */
303slb_finish_load_1T: 302slb_finish_load_1T:
304 srdi r10,r10,40-28 /* get 1T ESID */ 303 srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
304 rldimi r10,r9,USER_ESID_BITS_1T,0
305 ASM_VSID_SCRAMBLE(r10,r9,1T) 305 ASM_VSID_SCRAMBLE(r10,r9,1T)
306 /* 306 /*
307 * bits above VSID_BITS_1T need to be ignored from r10 307 * bits above VSID_BITS_1T need to be ignored from r10
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 0d82ef50dc3f..023ec8a13f38 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
82 if (!is_kernel_addr(addr)) { 82 if (!is_kernel_addr(addr)) {
83 ssize = user_segment_size(addr); 83 ssize = user_segment_size(addr);
84 vsid = get_vsid(mm->context.id, addr, ssize); 84 vsid = get_vsid(mm->context.id, addr, ssize);
85 WARN_ON(vsid == 0);
86 } else { 85 } else {
87 vsid = get_kernel_vsid(addr, mmu_kernel_ssize); 86 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
88 ssize = mmu_kernel_ssize; 87 ssize = mmu_kernel_ssize;
89 } 88 }
89 WARN_ON(vsid == 0);
90 vpn = hpt_vpn(addr, vsid, ssize); 90 vpn = hpt_vpn(addr, vsid, ssize);
91 rpte = __real_pte(__pte(pte), ptep); 91 rpte = __real_pte(__pte(pte), ptep);
92 92