aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-11-09 23:55:07 -0500
committerMichael Ellerman <mpe@ellerman.id.au>2017-11-20 03:28:25 -0500
commit7a06c66835f75fe2be4f154a93cc30cb81734b81 (patch)
tree947b1a6b42761db78af50abbc5572c191229274b
parent3ffa9d9e2a7c10127d8cbf91ea2be15390b450ed (diff)
powerpc/64s/slice: Use addr limit when computing slice mask
While computing slice mask for the free area we need make sure we only search in the addr limit applicable for this mmap. We update the slb_addr_limit after we request for a mmap above 128TB. But the following mmap request with hint addr below 128TB should still limit its search to below 128TB. ie. we should not use slb_addr_limit to compute slice mask in this case. Instead, we should derive high addr limit based on the mmap hint addr value. Fixes: f4ea6dcb08ea ("powerpc/mm: Enable mappings above 128TB") Cc: stable@vger.kernel.org # v4.12+ Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/slice.c34
1 files changed, 22 insertions, 12 deletions
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 564fff06f5c1..23ec2c5e3b78 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -122,7 +122,8 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
122 return !slice_area_is_free(mm, start, end - start); 122 return !slice_area_is_free(mm, start, end - start);
123} 123}
124 124
125static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret) 125static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
126 unsigned long high_limit)
126{ 127{
127 unsigned long i; 128 unsigned long i;
128 129
@@ -133,15 +134,16 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
133 if (!slice_low_has_vma(mm, i)) 134 if (!slice_low_has_vma(mm, i))
134 ret->low_slices |= 1u << i; 135 ret->low_slices |= 1u << i;
135 136
136 if (mm->context.slb_addr_limit <= SLICE_LOW_TOP) 137 if (high_limit <= SLICE_LOW_TOP)
137 return; 138 return;
138 139
139 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) 140 for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
140 if (!slice_high_has_vma(mm, i)) 141 if (!slice_high_has_vma(mm, i))
141 __set_bit(i, ret->high_slices); 142 __set_bit(i, ret->high_slices);
142} 143}
143 144
144static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret) 145static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
146 unsigned long high_limit)
145{ 147{
146 unsigned char *hpsizes; 148 unsigned char *hpsizes;
147 int index, mask_index; 149 int index, mask_index;
@@ -156,8 +158,11 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
156 if (((lpsizes >> (i * 4)) & 0xf) == psize) 158 if (((lpsizes >> (i * 4)) & 0xf) == psize)
157 ret->low_slices |= 1u << i; 159 ret->low_slices |= 1u << i;
158 160
161 if (high_limit <= SLICE_LOW_TOP)
162 return;
163
159 hpsizes = mm->context.high_slices_psize; 164 hpsizes = mm->context.high_slices_psize;
160 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) { 165 for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
161 mask_index = i & 0x1; 166 mask_index = i & 0x1;
162 index = i >> 1; 167 index = i >> 1;
163 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) 168 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -169,6 +174,10 @@ static int slice_check_fit(struct mm_struct *mm,
169 struct slice_mask mask, struct slice_mask available) 174 struct slice_mask mask, struct slice_mask available)
170{ 175{
171 DECLARE_BITMAP(result, SLICE_NUM_HIGH); 176 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
177 /*
178 * Make sure we just do bit compare only to the max
179 * addr limit and not the full bit map size.
180 */
172 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); 181 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
173 182
174 bitmap_and(result, mask.high_slices, 183 bitmap_and(result, mask.high_slices,
@@ -472,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
472 /* First make up a "good" mask of slices that have the right size 481 /* First make up a "good" mask of slices that have the right size
473 * already 482 * already
474 */ 483 */
475 slice_mask_for_size(mm, psize, &good_mask); 484 slice_mask_for_size(mm, psize, &good_mask, high_limit);
476 slice_print_mask(" good_mask", good_mask); 485 slice_print_mask(" good_mask", good_mask);
477 486
478 /* 487 /*
@@ -497,7 +506,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
497#ifdef CONFIG_PPC_64K_PAGES 506#ifdef CONFIG_PPC_64K_PAGES
498 /* If we support combo pages, we can allow 64k pages in 4k slices */ 507 /* If we support combo pages, we can allow 64k pages in 4k slices */
499 if (psize == MMU_PAGE_64K) { 508 if (psize == MMU_PAGE_64K) {
500 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask); 509 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
501 if (fixed) 510 if (fixed)
502 slice_or_mask(&good_mask, &compat_mask); 511 slice_or_mask(&good_mask, &compat_mask);
503 } 512 }
@@ -530,11 +539,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
530 return newaddr; 539 return newaddr;
531 } 540 }
532 } 541 }
533 542 /*
534 /* We don't fit in the good mask, check what other slices are 543 * We don't fit in the good mask, check what other slices are
535 * empty and thus can be converted 544 * empty and thus can be converted
536 */ 545 */
537 slice_mask_for_free(mm, &potential_mask); 546 slice_mask_for_free(mm, &potential_mask, high_limit);
538 slice_or_mask(&potential_mask, &good_mask); 547 slice_or_mask(&potential_mask, &good_mask);
539 slice_print_mask(" potential", potential_mask); 548 slice_print_mask(" potential", potential_mask);
540 549
@@ -744,17 +753,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
744{ 753{
745 struct slice_mask mask, available; 754 struct slice_mask mask, available;
746 unsigned int psize = mm->context.user_psize; 755 unsigned int psize = mm->context.user_psize;
756 unsigned long high_limit = mm->context.slb_addr_limit;
747 757
748 if (radix_enabled()) 758 if (radix_enabled())
749 return 0; 759 return 0;
750 760
751 slice_range_to_mask(addr, len, &mask); 761 slice_range_to_mask(addr, len, &mask);
752 slice_mask_for_size(mm, psize, &available); 762 slice_mask_for_size(mm, psize, &available, high_limit);
753#ifdef CONFIG_PPC_64K_PAGES 763#ifdef CONFIG_PPC_64K_PAGES
754 /* We need to account for 4k slices too */ 764 /* We need to account for 4k slices too */
755 if (psize == MMU_PAGE_64K) { 765 if (psize == MMU_PAGE_64K) {
756 struct slice_mask compat_mask; 766 struct slice_mask compat_mask;
757 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask); 767 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
758 slice_or_mask(&available, &compat_mask); 768 slice_or_mask(&available, &compat_mask);
759 } 769 }
760#endif 770#endif