aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slice.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/slice.c')
-rw-r--r--arch/powerpc/mm/slice.c109
1 files changed, 33 insertions, 76 deletions
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index aec91dbcdc0b..97fbf7b54422 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -101,7 +101,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
101{ 101{
102 struct vm_area_struct *vma; 102 struct vm_area_struct *vma;
103 103
104 if ((mm->context.slb_addr_limit - len) < addr) 104 if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
105 return 0; 105 return 0;
106 vma = find_vma(mm, addr); 106 vma = find_vma(mm, addr);
107 return (!vma || (addr + len) <= vm_start_gap(vma)); 107 return (!vma || (addr + len) <= vm_start_gap(vma));
@@ -118,13 +118,11 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
118 unsigned long start = slice << SLICE_HIGH_SHIFT; 118 unsigned long start = slice << SLICE_HIGH_SHIFT;
119 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT); 119 unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
120 120
121#ifdef CONFIG_PPC64
122 /* Hack, so that each addresses is controlled by exactly one 121 /* Hack, so that each addresses is controlled by exactly one
123 * of the high or low area bitmaps, the first high area starts 122 * of the high or low area bitmaps, the first high area starts
124 * at 4GB, not 0 */ 123 * at 4GB, not 0 */
125 if (start == 0) 124 if (start == 0)
126 start = SLICE_LOW_TOP; 125 start = (unsigned long)SLICE_LOW_TOP;
127#endif
128 126
129 return !slice_area_is_free(mm, start, end - start); 127 return !slice_area_is_free(mm, start, end - start);
130} 128}
@@ -150,40 +148,6 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
150 __set_bit(i, ret->high_slices); 148 __set_bit(i, ret->high_slices);
151} 149}
152 150
153#ifdef CONFIG_PPC_BOOK3S_64
154static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
155{
156#ifdef CONFIG_PPC_64K_PAGES
157 if (psize == MMU_PAGE_64K)
158 return &mm->context.mask_64k;
159#endif
160 if (psize == MMU_PAGE_4K)
161 return &mm->context.mask_4k;
162#ifdef CONFIG_HUGETLB_PAGE
163 if (psize == MMU_PAGE_16M)
164 return &mm->context.mask_16m;
165 if (psize == MMU_PAGE_16G)
166 return &mm->context.mask_16g;
167#endif
168 BUG();
169}
170#elif defined(CONFIG_PPC_8xx)
171static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
172{
173 if (psize == mmu_virtual_psize)
174 return &mm->context.mask_base_psize;
175#ifdef CONFIG_HUGETLB_PAGE
176 if (psize == MMU_PAGE_512K)
177 return &mm->context.mask_512k;
178 if (psize == MMU_PAGE_8M)
179 return &mm->context.mask_8m;
180#endif
181 BUG();
182}
183#else
184#error "Must define the slice masks for page sizes supported by the platform"
185#endif
186
187static bool slice_check_range_fits(struct mm_struct *mm, 151static bool slice_check_range_fits(struct mm_struct *mm,
188 const struct slice_mask *available, 152 const struct slice_mask *available,
189 unsigned long start, unsigned long len) 153 unsigned long start, unsigned long len)
@@ -246,14 +210,14 @@ static void slice_convert(struct mm_struct *mm,
246 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize); 210 slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
247 slice_print_mask(" mask", mask); 211 slice_print_mask(" mask", mask);
248 212
249 psize_mask = slice_mask_for_size(mm, psize); 213 psize_mask = slice_mask_for_size(&mm->context, psize);
250 214
251 /* We need to use a spinlock here to protect against 215 /* We need to use a spinlock here to protect against
252 * concurrent 64k -> 4k demotion ... 216 * concurrent 64k -> 4k demotion ...
253 */ 217 */
254 spin_lock_irqsave(&slice_convert_lock, flags); 218 spin_lock_irqsave(&slice_convert_lock, flags);
255 219
256 lpsizes = mm->context.low_slices_psize; 220 lpsizes = mm_ctx_low_slices(&mm->context);
257 for (i = 0; i < SLICE_NUM_LOW; i++) { 221 for (i = 0; i < SLICE_NUM_LOW; i++) {
258 if (!(mask->low_slices & (1u << i))) 222 if (!(mask->low_slices & (1u << i)))
259 continue; 223 continue;
@@ -263,7 +227,7 @@ static void slice_convert(struct mm_struct *mm,
263 227
264 /* Update the slice_mask */ 228 /* Update the slice_mask */
265 old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf; 229 old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
266 old_mask = slice_mask_for_size(mm, old_psize); 230 old_mask = slice_mask_for_size(&mm->context, old_psize);
267 old_mask->low_slices &= ~(1u << i); 231 old_mask->low_slices &= ~(1u << i);
268 psize_mask->low_slices |= 1u << i; 232 psize_mask->low_slices |= 1u << i;
269 233
@@ -272,8 +236,8 @@ static void slice_convert(struct mm_struct *mm,
272 (((unsigned long)psize) << (mask_index * 4)); 236 (((unsigned long)psize) << (mask_index * 4));
273 } 237 }
274 238
275 hpsizes = mm->context.high_slices_psize; 239 hpsizes = mm_ctx_high_slices(&mm->context);
276 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) { 240 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
277 if (!test_bit(i, mask->high_slices)) 241 if (!test_bit(i, mask->high_slices))
278 continue; 242 continue;
279 243
@@ -282,7 +246,7 @@ static void slice_convert(struct mm_struct *mm,
282 246
283 /* Update the slice_mask */ 247 /* Update the slice_mask */
284 old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf; 248 old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
285 old_mask = slice_mask_for_size(mm, old_psize); 249 old_mask = slice_mask_for_size(&mm->context, old_psize);
286 __clear_bit(i, old_mask->high_slices); 250 __clear_bit(i, old_mask->high_slices);
287 __set_bit(i, psize_mask->high_slices); 251 __set_bit(i, psize_mask->high_slices);
288 252
@@ -292,8 +256,8 @@ static void slice_convert(struct mm_struct *mm,
292 } 256 }
293 257
294 slice_dbg(" lsps=%lx, hsps=%lx\n", 258 slice_dbg(" lsps=%lx, hsps=%lx\n",
295 (unsigned long)mm->context.low_slices_psize, 259 (unsigned long)mm_ctx_low_slices(&mm->context),
296 (unsigned long)mm->context.high_slices_psize); 260 (unsigned long)mm_ctx_high_slices(&mm->context));
297 261
298 spin_unlock_irqrestore(&slice_convert_lock, flags); 262 spin_unlock_irqrestore(&slice_convert_lock, flags);
299 263
@@ -393,7 +357,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
393 * DEFAULT_MAP_WINDOW we should apply this. 357 * DEFAULT_MAP_WINDOW we should apply this.
394 */ 358 */
395 if (high_limit > DEFAULT_MAP_WINDOW) 359 if (high_limit > DEFAULT_MAP_WINDOW)
396 addr += mm->context.slb_addr_limit - DEFAULT_MAP_WINDOW; 360 addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
397 361
398 while (addr > min_addr) { 362 while (addr > min_addr) {
399 info.high_limit = addr; 363 info.high_limit = addr;
@@ -505,20 +469,20 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
505 return -ENOMEM; 469 return -ENOMEM;
506 } 470 }
507 471
508 if (high_limit > mm->context.slb_addr_limit) { 472 if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
509 /* 473 /*
510 * Increasing the slb_addr_limit does not require 474 * Increasing the slb_addr_limit does not require
511 * slice mask cache to be recalculated because it should 475 * slice mask cache to be recalculated because it should
512 * be already initialised beyond the old address limit. 476 * be already initialised beyond the old address limit.
513 */ 477 */
514 mm->context.slb_addr_limit = high_limit; 478 mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
515 479
516 on_each_cpu(slice_flush_segments, mm, 1); 480 on_each_cpu(slice_flush_segments, mm, 1);
517 } 481 }
518 482
519 /* Sanity checks */ 483 /* Sanity checks */
520 BUG_ON(mm->task_size == 0); 484 BUG_ON(mm->task_size == 0);
521 BUG_ON(mm->context.slb_addr_limit == 0); 485 BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
522 VM_BUG_ON(radix_enabled()); 486 VM_BUG_ON(radix_enabled());
523 487
524 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize); 488 slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
@@ -538,7 +502,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
538 /* First make up a "good" mask of slices that have the right size 502 /* First make up a "good" mask of slices that have the right size
539 * already 503 * already
540 */ 504 */
541 maskp = slice_mask_for_size(mm, psize); 505 maskp = slice_mask_for_size(&mm->context, psize);
542 506
543 /* 507 /*
544 * Here "good" means slices that are already the right page size, 508 * Here "good" means slices that are already the right page size,
@@ -565,7 +529,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
565 * a pointer to good mask for the next code to use. 529 * a pointer to good mask for the next code to use.
566 */ 530 */
567 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) { 531 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
568 compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K); 532 compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
569 if (fixed) 533 if (fixed)
570 slice_or_mask(&good_mask, maskp, compat_maskp); 534 slice_or_mask(&good_mask, maskp, compat_maskp);
571 else 535 else
@@ -642,14 +606,13 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
642 newaddr = slice_find_area(mm, len, &potential_mask, 606 newaddr = slice_find_area(mm, len, &potential_mask,
643 psize, topdown, high_limit); 607 psize, topdown, high_limit);
644 608
645#ifdef CONFIG_PPC_64K_PAGES 609 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
646 if (newaddr == -ENOMEM && psize == MMU_PAGE_64K) { 610 psize == MMU_PAGE_64K) {
647 /* retry the search with 4k-page slices included */ 611 /* retry the search with 4k-page slices included */
648 slice_or_mask(&potential_mask, &potential_mask, compat_maskp); 612 slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
649 newaddr = slice_find_area(mm, len, &potential_mask, 613 newaddr = slice_find_area(mm, len, &potential_mask,
650 psize, topdown, high_limit); 614 psize, topdown, high_limit);
651 } 615 }
652#endif
653 616
654 if (newaddr == -ENOMEM) 617 if (newaddr == -ENOMEM)
655 return -ENOMEM; 618 return -ENOMEM;
@@ -696,7 +659,7 @@ unsigned long arch_get_unmapped_area(struct file *filp,
696 unsigned long flags) 659 unsigned long flags)
697{ 660{
698 return slice_get_unmapped_area(addr, len, flags, 661 return slice_get_unmapped_area(addr, len, flags,
699 current->mm->context.user_psize, 0); 662 mm_ctx_user_psize(&current->mm->context), 0);
700} 663}
701 664
702unsigned long arch_get_unmapped_area_topdown(struct file *filp, 665unsigned long arch_get_unmapped_area_topdown(struct file *filp,
@@ -706,7 +669,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
706 const unsigned long flags) 669 const unsigned long flags)
707{ 670{
708 return slice_get_unmapped_area(addr0, len, flags, 671 return slice_get_unmapped_area(addr0, len, flags,
709 current->mm->context.user_psize, 1); 672 mm_ctx_user_psize(&current->mm->context), 1);
710} 673}
711 674
712unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr) 675unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
@@ -717,10 +680,10 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
717 VM_BUG_ON(radix_enabled()); 680 VM_BUG_ON(radix_enabled());
718 681
719 if (slice_addr_is_low(addr)) { 682 if (slice_addr_is_low(addr)) {
720 psizes = mm->context.low_slices_psize; 683 psizes = mm_ctx_low_slices(&mm->context);
721 index = GET_LOW_SLICE_INDEX(addr); 684 index = GET_LOW_SLICE_INDEX(addr);
722 } else { 685 } else {
723 psizes = mm->context.high_slices_psize; 686 psizes = mm_ctx_high_slices(&mm->context);
724 index = GET_HIGH_SLICE_INDEX(addr); 687 index = GET_HIGH_SLICE_INDEX(addr);
725 } 688 }
726 mask_index = index & 0x1; 689 mask_index = index & 0x1;
@@ -741,27 +704,22 @@ void slice_init_new_context_exec(struct mm_struct *mm)
741 * case of fork it is just inherited from the mm being 704 * case of fork it is just inherited from the mm being
742 * duplicated. 705 * duplicated.
743 */ 706 */
744#ifdef CONFIG_PPC64 707 mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
745 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW_USER64; 708 mm_ctx_set_user_psize(&mm->context, psize);
746#else
747 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
748#endif
749
750 mm->context.user_psize = psize;
751 709
752 /* 710 /*
753 * Set all slice psizes to the default. 711 * Set all slice psizes to the default.
754 */ 712 */
755 lpsizes = mm->context.low_slices_psize; 713 lpsizes = mm_ctx_low_slices(&mm->context);
756 memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1); 714 memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
757 715
758 hpsizes = mm->context.high_slices_psize; 716 hpsizes = mm_ctx_high_slices(&mm->context);
759 memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1); 717 memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
760 718
761 /* 719 /*
762 * Slice mask cache starts zeroed, fill the default size cache. 720 * Slice mask cache starts zeroed, fill the default size cache.
763 */ 721 */
764 mask = slice_mask_for_size(mm, psize); 722 mask = slice_mask_for_size(&mm->context, psize);
765 mask->low_slices = ~0UL; 723 mask->low_slices = ~0UL;
766 if (SLICE_NUM_HIGH) 724 if (SLICE_NUM_HIGH)
767 bitmap_fill(mask->high_slices, SLICE_NUM_HIGH); 725 bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
@@ -777,7 +735,7 @@ void slice_setup_new_exec(void)
777 if (!is_32bit_task()) 735 if (!is_32bit_task())
778 return; 736 return;
779 737
780 mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW; 738 mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
781} 739}
782#endif 740#endif
783 741
@@ -816,22 +774,21 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
816 unsigned long len) 774 unsigned long len)
817{ 775{
818 const struct slice_mask *maskp; 776 const struct slice_mask *maskp;
819 unsigned int psize = mm->context.user_psize; 777 unsigned int psize = mm_ctx_user_psize(&mm->context);
820 778
821 VM_BUG_ON(radix_enabled()); 779 VM_BUG_ON(radix_enabled());
822 780
823 maskp = slice_mask_for_size(mm, psize); 781 maskp = slice_mask_for_size(&mm->context, psize);
824#ifdef CONFIG_PPC_64K_PAGES 782
825 /* We need to account for 4k slices too */ 783 /* We need to account for 4k slices too */
826 if (psize == MMU_PAGE_64K) { 784 if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
827 const struct slice_mask *compat_maskp; 785 const struct slice_mask *compat_maskp;
828 struct slice_mask available; 786 struct slice_mask available;
829 787
830 compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K); 788 compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
831 slice_or_mask(&available, maskp, compat_maskp); 789 slice_or_mask(&available, maskp, compat_maskp);
832 return !slice_check_range_fits(mm, &available, addr, len); 790 return !slice_check_range_fits(mm, &available, addr, len);
833 } 791 }
834#endif
835 792
836 return !slice_check_range_fits(mm, maskp, addr, len); 793 return !slice_check_range_fits(mm, maskp, addr, len);
837} 794}