aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h2
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/paca.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c8
-rw-r--r--arch/powerpc/mm/mmap.c18
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c4
-rw-r--r--arch/powerpc/mm/slb_low.S2
-rw-r--r--arch/powerpc/mm/slice.c22
11 files changed, 27 insertions, 42 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 508275bb05d5..e91e115a816f 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -606,7 +606,7 @@ extern void slb_set_size(u16 size);
606 606
607/* 4 bits per slice and we have one slice per 1TB */ 607/* 4 bits per slice and we have one slice per 1TB */
608#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41) 608#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
609#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.addr_limit >> 41) 609#define TASK_SLICE_ARRAY_SZ(x) ((x)->context.slb_addr_limit >> 41)
610 610
611#ifndef __ASSEMBLY__ 611#ifndef __ASSEMBLY__
612 612
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index c3b00e8ff791..49a07c5d9e50 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -92,7 +92,7 @@ typedef struct {
92#ifdef CONFIG_PPC_MM_SLICES 92#ifdef CONFIG_PPC_MM_SLICES
93 u64 low_slices_psize; /* SLB page size encodings */ 93 u64 low_slices_psize; /* SLB page size encodings */
94 unsigned char high_slices_psize[SLICE_ARRAY_SIZE]; 94 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
95 unsigned long addr_limit; 95 unsigned long slb_addr_limit;
96#else 96#else
97 u16 sllp; /* SLB page size encoding */ 97 u16 sllp; /* SLB page size encoding */
98#endif 98#endif
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index c907ae23c956..3892db93b837 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -143,7 +143,7 @@ struct paca_struct {
143#ifdef CONFIG_PPC_MM_SLICES 143#ifdef CONFIG_PPC_MM_SLICES
144 u64 mm_ctx_low_slices_psize; 144 u64 mm_ctx_low_slices_psize;
145 unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE]; 145 unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
146 unsigned long addr_limit; 146 unsigned long mm_ctx_slb_addr_limit;
147#else 147#else
148 u16 mm_ctx_user_psize; 148 u16 mm_ctx_user_psize;
149 u16 mm_ctx_sllp; 149 u16 mm_ctx_sllp;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 200623e71474..9aace433491a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -185,7 +185,7 @@ int main(void)
185#ifdef CONFIG_PPC_MM_SLICES 185#ifdef CONFIG_PPC_MM_SLICES
186 OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize); 186 OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
187 OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize); 187 OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
188 DEFINE(PACA_ADDR_LIMIT, offsetof(struct paca_struct, addr_limit)); 188 OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
189 DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); 189 DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
190#endif /* CONFIG_PPC_MM_SLICES */ 190#endif /* CONFIG_PPC_MM_SLICES */
191#endif 191#endif
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 5d38d5ea9a24..d6597038931d 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -262,8 +262,8 @@ void copy_mm_to_paca(struct mm_struct *mm)
262 262
263 get_paca()->mm_ctx_id = context->id; 263 get_paca()->mm_ctx_id = context->id;
264#ifdef CONFIG_PPC_MM_SLICES 264#ifdef CONFIG_PPC_MM_SLICES
265 VM_BUG_ON(!mm->context.addr_limit); 265 VM_BUG_ON(!mm->context.slb_addr_limit);
266 get_paca()->addr_limit = mm->context.addr_limit; 266 get_paca()->mm_ctx_slb_addr_limit = mm->context.slb_addr_limit;
267 get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize; 267 get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize;
268 memcpy(&get_paca()->mm_ctx_high_slices_psize, 268 memcpy(&get_paca()->mm_ctx_high_slices_psize,
269 &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm)); 269 &context->high_slices_psize, TASK_SLICE_ARRAY_SZ(mm));
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index fa661ed616f5..2075322cd225 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -898,7 +898,8 @@ void __init setup_arch(char **cmdline_p)
898 898
899#ifdef CONFIG_PPC_MM_SLICES 899#ifdef CONFIG_PPC_MM_SLICES
900#ifdef CONFIG_PPC64 900#ifdef CONFIG_PPC64
901 init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64; 901 if (!radix_enabled())
902 init_mm.context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
902#else 903#else
903#error "context.addr_limit not initialized." 904#error "context.addr_limit not initialized."
904#endif 905#endif
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 0a3d71aae175..b54b581a2f7d 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -60,16 +60,10 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
60 return -EINVAL; 60 return -EINVAL;
61 if (len > high_limit) 61 if (len > high_limit)
62 return -ENOMEM; 62 return -ENOMEM;
63
63 if (fixed) { 64 if (fixed) {
64 if (addr > high_limit - len) 65 if (addr > high_limit - len)
65 return -ENOMEM; 66 return -ENOMEM;
66 }
67
68 if (unlikely(addr > mm->context.addr_limit &&
69 mm->context.addr_limit != TASK_SIZE))
70 mm->context.addr_limit = TASK_SIZE;
71
72 if (fixed) {
73 if (prepare_hugepage_range(file, addr, len)) 67 if (prepare_hugepage_range(file, addr, len))
74 return -EINVAL; 68 return -EINVAL;
75 return addr; 69 return addr;
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 6d476a7b5611..d503f344e476 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -116,17 +116,12 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
116 116
117 if (len > high_limit) 117 if (len > high_limit)
118 return -ENOMEM; 118 return -ENOMEM;
119
119 if (fixed) { 120 if (fixed) {
120 if (addr > high_limit - len) 121 if (addr > high_limit - len)
121 return -ENOMEM; 122 return -ENOMEM;
122 }
123
124 if (unlikely(addr > mm->context.addr_limit &&
125 mm->context.addr_limit != TASK_SIZE))
126 mm->context.addr_limit = TASK_SIZE;
127
128 if (fixed)
129 return addr; 123 return addr;
124 }
130 125
131 if (addr) { 126 if (addr) {
132 addr = PAGE_ALIGN(addr); 127 addr = PAGE_ALIGN(addr);
@@ -165,17 +160,12 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
165 160
166 if (len > high_limit) 161 if (len > high_limit)
167 return -ENOMEM; 162 return -ENOMEM;
163
168 if (fixed) { 164 if (fixed) {
169 if (addr > high_limit - len) 165 if (addr > high_limit - len)
170 return -ENOMEM; 166 return -ENOMEM;
171 }
172
173 if (unlikely(addr > mm->context.addr_limit &&
174 mm->context.addr_limit != TASK_SIZE))
175 mm->context.addr_limit = TASK_SIZE;
176
177 if (fixed)
178 return addr; 167 return addr;
168 }
179 169
180 if (addr) { 170 if (addr) {
181 addr = PAGE_ALIGN(addr); 171 addr = PAGE_ALIGN(addr);
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 846cbad45fce..5e193e444ee8 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -96,8 +96,8 @@ static int hash__init_new_context(struct mm_struct *mm)
96 * In the case of exec, use the default limit, 96 * In the case of exec, use the default limit,
97 * otherwise inherit it from the mm we are duplicating. 97 * otherwise inherit it from the mm we are duplicating.
98 */ 98 */
99 if (!mm->context.addr_limit) 99 if (!mm->context.slb_addr_limit)
100 mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64; 100 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64;
101 101
102 /* 102 /*
103 * The old code would re-promote on fork, we don't do that when using 103 * The old code would re-promote on fork, we don't do that when using
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index ed60ad861dfa..2cf5ef3fc50d 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -167,7 +167,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
167 /* 167 /*
168 * user space make sure we are within the allowed limit 168 * user space make sure we are within the allowed limit
169 */ 169 */
170 ld r11,PACA_ADDR_LIMIT(r13) 170 ld r11,PACA_SLB_ADDR_LIMIT(r13)
171 cmpld r3,r11 171 cmpld r3,r11
172 bge- 8f 172 bge- 8f
173 173
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index a4f93699194b..564fff06f5c1 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -96,7 +96,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
96{ 96{
97 struct vm_area_struct *vma; 97 struct vm_area_struct *vma;
98 98
99 if ((mm->context.addr_limit - len) < addr) 99 if ((mm->context.slb_addr_limit - len) < addr)
100 return 0; 100 return 0;
101 vma = find_vma(mm, addr); 101 vma = find_vma(mm, addr);
102 return (!vma || (addr + len) <= vm_start_gap(vma)); 102 return (!vma || (addr + len) <= vm_start_gap(vma));
@@ -133,10 +133,10 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
133 if (!slice_low_has_vma(mm, i)) 133 if (!slice_low_has_vma(mm, i))
134 ret->low_slices |= 1u << i; 134 ret->low_slices |= 1u << i;
135 135
136 if (mm->context.addr_limit <= SLICE_LOW_TOP) 136 if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
137 return; 137 return;
138 138
139 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) 139 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
140 if (!slice_high_has_vma(mm, i)) 140 if (!slice_high_has_vma(mm, i))
141 __set_bit(i, ret->high_slices); 141 __set_bit(i, ret->high_slices);
142} 142}
@@ -157,7 +157,7 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
157 ret->low_slices |= 1u << i; 157 ret->low_slices |= 1u << i;
158 158
159 hpsizes = mm->context.high_slices_psize; 159 hpsizes = mm->context.high_slices_psize;
160 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) { 160 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
161 mask_index = i & 0x1; 161 mask_index = i & 0x1;
162 index = i >> 1; 162 index = i >> 1;
163 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) 163 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -169,7 +169,7 @@ static int slice_check_fit(struct mm_struct *mm,
169 struct slice_mask mask, struct slice_mask available) 169 struct slice_mask mask, struct slice_mask available)
170{ 170{
171 DECLARE_BITMAP(result, SLICE_NUM_HIGH); 171 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
172 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit); 172 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
173 173
174 bitmap_and(result, mask.high_slices, 174 bitmap_and(result, mask.high_slices,
175 available.high_slices, slice_count); 175 available.high_slices, slice_count);
@@ -219,7 +219,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
219 mm->context.low_slices_psize = lpsizes; 219 mm->context.low_slices_psize = lpsizes;
220 220
221 hpsizes = mm->context.high_slices_psize; 221 hpsizes = mm->context.high_slices_psize;
222 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) { 222 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
223 mask_index = i & 0x1; 223 mask_index = i & 0x1;
224 index = i >> 1; 224 index = i >> 1;
225 if (test_bit(i, mask.high_slices)) 225 if (test_bit(i, mask.high_slices))
@@ -329,8 +329,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
329 * Only for that request for which high_limit is above 329 * Only for that request for which high_limit is above
330 * DEFAULT_MAP_WINDOW we should apply this. 330 * DEFAULT_MAP_WINDOW we should apply this.
331 */ 331 */
332 if (high_limit > DEFAULT_MAP_WINDOW) 332 if (high_limit > DEFAULT_MAP_WINDOW)
333 addr += mm->context.addr_limit - DEFAULT_MAP_WINDOW; 333 addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW;
334 334
335 while (addr > PAGE_SIZE) { 335 while (addr > PAGE_SIZE) {
336 info.high_limit = addr; 336 info.high_limit = addr;
@@ -432,8 +432,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
432 return -ENOMEM; 432 return -ENOMEM;
433 } 433 }
434 434
435 if (high_limit > mm->context.addr_limit) { 435 if (high_limit > mm->context.slb_addr_limit) {
436 mm->context.addr_limit = high_limit; 436 mm->context.slb_addr_limit = high_limit;
437 on_each_cpu(slice_flush_segments, mm, 1); 437 on_each_cpu(slice_flush_segments, mm, 1);
438 } 438 }
439 439
@@ -452,7 +452,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
452 452
453 /* Sanity checks */ 453 /* Sanity checks */
454 BUG_ON(mm->task_size == 0); 454 BUG_ON(mm->task_size == 0);
455 BUG_ON(mm->context.addr_limit == 0); 455 BUG_ON(mm->context.slb_addr_limit == 0);
456 VM_BUG_ON(radix_enabled()); 456 VM_BUG_ON(radix_enabled());
457 457
458 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); 458 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);