aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slice.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-03-21 23:36:58 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2017-04-01 06:12:20 -0400
commit957b778a166e32e242a33fdab693ffb256a19cbd (patch)
treee4ffce7347a62e55ac2cd99b3efdb2e62c7cf070 /arch/powerpc/mm/slice.c
parentf6eedbba7a26fdaee9ea8121336dc86236c136c7 (diff)
powerpc/mm: Add addr_limit to mm_context and use it to derive max slice index
In the followup patch, we will increase the slice array size to handle 512TB range, but will limit the max addr to 128TB. Avoid doing unnecessary computation and avoid doing slice mask related operation above address limit. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/slice.c')
-rw-r--r--arch/powerpc/mm/slice.c20
1 files changed, 11 insertions, 9 deletions
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 95e5a20b1b6a..ded96edac817 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -136,7 +136,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
136 if (mm->task_size <= SLICE_LOW_TOP) 136 if (mm->task_size <= SLICE_LOW_TOP)
137 return; 137 return;
138 138
139 for (i = 0; i < SLICE_NUM_HIGH; i++) 139 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++)
140 if (!slice_high_has_vma(mm, i)) 140 if (!slice_high_has_vma(mm, i))
141 __set_bit(i, ret->high_slices); 141 __set_bit(i, ret->high_slices);
142} 142}
@@ -157,7 +157,7 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
157 ret->low_slices |= 1u << i; 157 ret->low_slices |= 1u << i;
158 158
159 hpsizes = mm->context.high_slices_psize; 159 hpsizes = mm->context.high_slices_psize;
160 for (i = 0; i < SLICE_NUM_HIGH; i++) { 160 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
161 mask_index = i & 0x1; 161 mask_index = i & 0x1;
162 index = i >> 1; 162 index = i >> 1;
163 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) 163 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
@@ -165,15 +165,17 @@ static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_ma
165 } 165 }
166} 166}
167 167
168static int slice_check_fit(struct slice_mask mask, struct slice_mask available) 168static int slice_check_fit(struct mm_struct *mm,
169 struct slice_mask mask, struct slice_mask available)
169{ 170{
170 DECLARE_BITMAP(result, SLICE_NUM_HIGH); 171 DECLARE_BITMAP(result, SLICE_NUM_HIGH);
172 unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.addr_limit);
171 173
172 bitmap_and(result, mask.high_slices, 174 bitmap_and(result, mask.high_slices,
173 available.high_slices, SLICE_NUM_HIGH); 175 available.high_slices, slice_count);
174 176
175 return (mask.low_slices & available.low_slices) == mask.low_slices && 177 return (mask.low_slices & available.low_slices) == mask.low_slices &&
176 bitmap_equal(result, mask.high_slices, SLICE_NUM_HIGH); 178 bitmap_equal(result, mask.high_slices, slice_count);
177} 179}
178 180
179static void slice_flush_segments(void *parm) 181static void slice_flush_segments(void *parm)
@@ -217,7 +219,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
217 mm->context.low_slices_psize = lpsizes; 219 mm->context.low_slices_psize = lpsizes;
218 220
219 hpsizes = mm->context.high_slices_psize; 221 hpsizes = mm->context.high_slices_psize;
220 for (i = 0; i < SLICE_NUM_HIGH; i++) { 222 for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.addr_limit); i++) {
221 mask_index = i & 0x1; 223 mask_index = i & 0x1;
222 index = i >> 1; 224 index = i >> 1;
223 if (test_bit(i, mask.high_slices)) 225 if (test_bit(i, mask.high_slices))
@@ -484,7 +486,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
484 /* Check if we fit in the good mask. If we do, we just return, 486 /* Check if we fit in the good mask. If we do, we just return,
485 * nothing else to do 487 * nothing else to do
486 */ 488 */
487 if (slice_check_fit(mask, good_mask)) { 489 if (slice_check_fit(mm, mask, good_mask)) {
488 slice_dbg(" fits good !\n"); 490 slice_dbg(" fits good !\n");
489 return addr; 491 return addr;
490 } 492 }
@@ -509,7 +511,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
509 slice_or_mask(&potential_mask, &good_mask); 511 slice_or_mask(&potential_mask, &good_mask);
510 slice_print_mask(" potential", potential_mask); 512 slice_print_mask(" potential", potential_mask);
511 513
512 if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) { 514 if ((addr != 0 || fixed) && slice_check_fit(mm, mask, potential_mask)) {
513 slice_dbg(" fits potential !\n"); 515 slice_dbg(" fits potential !\n");
514 goto convert; 516 goto convert;
515 } 517 }
@@ -734,6 +736,6 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
734 slice_print_mask(" mask", mask); 736 slice_print_mask(" mask", mask);
735 slice_print_mask(" available", available); 737 slice_print_mask(" available", available);
736#endif 738#endif
737 return !slice_check_fit(mask, available); 739 return !slice_check_fit(mm, mask, available);
738} 740}
739#endif 741#endif