aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slice.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2017-03-21 23:36:48 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2017-03-31 08:09:54 -0400
commita4d3621503290f73b2ca65a6de58f01296c0b85c (patch)
treead5267c96851dae1c16e55a6d9178c2c0bf9236a /arch/powerpc/mm/slice.c
parentf3207c124e7aa8d4d9cf32cc45b10ceb4defedb9 (diff)
powerpc/mm/slice: Update the function prototype
This avoid copying the slice_mask struct as function return value Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/slice.c')
-rw-r--r--arch/powerpc/mm/slice.c62
1 files changed, 28 insertions, 34 deletions
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 639c7171d174..d1da357583e3 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -75,19 +75,18 @@ static void slice_print_mask(const char *label, struct slice_mask mask) {}
75 75
76#endif 76#endif
77 77
78static struct slice_mask slice_range_to_mask(unsigned long start, 78static void slice_range_to_mask(unsigned long start, unsigned long len,
79 unsigned long len) 79 struct slice_mask *ret)
80{ 80{
81 unsigned long end = start + len - 1; 81 unsigned long end = start + len - 1;
82 struct slice_mask ret;
83 82
84 ret.low_slices = 0; 83 ret->low_slices = 0;
85 bitmap_zero(ret.high_slices, SLICE_NUM_HIGH); 84 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
86 85
87 if (start < SLICE_LOW_TOP) { 86 if (start < SLICE_LOW_TOP) {
88 unsigned long mend = min(end, (SLICE_LOW_TOP - 1)); 87 unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
89 88
90 ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1)) 89 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
91 - (1u << GET_LOW_SLICE_INDEX(start)); 90 - (1u << GET_LOW_SLICE_INDEX(start));
92 } 91 }
93 92
@@ -96,9 +95,8 @@ static struct slice_mask slice_range_to_mask(unsigned long start,
96 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT)); 95 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
97 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index; 96 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
98 97
99 bitmap_set(ret.high_slices, start_index, count); 98 bitmap_set(ret->high_slices, start_index, count);
100 } 99 }
101 return ret;
102} 100}
103 101
104static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, 102static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
@@ -132,53 +130,47 @@ static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
132 return !slice_area_is_free(mm, start, end - start); 130 return !slice_area_is_free(mm, start, end - start);
133} 131}
134 132
135static struct slice_mask slice_mask_for_free(struct mm_struct *mm) 133static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
136{ 134{
137 struct slice_mask ret;
138 unsigned long i; 135 unsigned long i;
139 136
140 ret.low_slices = 0; 137 ret->low_slices = 0;
141 bitmap_zero(ret.high_slices, SLICE_NUM_HIGH); 138 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
142 139
143 for (i = 0; i < SLICE_NUM_LOW; i++) 140 for (i = 0; i < SLICE_NUM_LOW; i++)
144 if (!slice_low_has_vma(mm, i)) 141 if (!slice_low_has_vma(mm, i))
145 ret.low_slices |= 1u << i; 142 ret->low_slices |= 1u << i;
146 143
147 if (mm->task_size <= SLICE_LOW_TOP) 144 if (mm->task_size <= SLICE_LOW_TOP)
148 return ret; 145 return;
149 146
150 for (i = 0; i < SLICE_NUM_HIGH; i++) 147 for (i = 0; i < SLICE_NUM_HIGH; i++)
151 if (!slice_high_has_vma(mm, i)) 148 if (!slice_high_has_vma(mm, i))
152 __set_bit(i, ret.high_slices); 149 __set_bit(i, ret->high_slices);
153
154 return ret;
155} 150}
156 151
157static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize) 152static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
158{ 153{
159 unsigned char *hpsizes; 154 unsigned char *hpsizes;
160 int index, mask_index; 155 int index, mask_index;
161 struct slice_mask ret;
162 unsigned long i; 156 unsigned long i;
163 u64 lpsizes; 157 u64 lpsizes;
164 158
165 ret.low_slices = 0; 159 ret->low_slices = 0;
166 bitmap_zero(ret.high_slices, SLICE_NUM_HIGH); 160 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
167 161
168 lpsizes = mm->context.low_slices_psize; 162 lpsizes = mm->context.low_slices_psize;
169 for (i = 0; i < SLICE_NUM_LOW; i++) 163 for (i = 0; i < SLICE_NUM_LOW; i++)
170 if (((lpsizes >> (i * 4)) & 0xf) == psize) 164 if (((lpsizes >> (i * 4)) & 0xf) == psize)
171 ret.low_slices |= 1u << i; 165 ret->low_slices |= 1u << i;
172 166
173 hpsizes = mm->context.high_slices_psize; 167 hpsizes = mm->context.high_slices_psize;
174 for (i = 0; i < SLICE_NUM_HIGH; i++) { 168 for (i = 0; i < SLICE_NUM_HIGH; i++) {
175 mask_index = i & 0x1; 169 mask_index = i & 0x1;
176 index = i >> 1; 170 index = i >> 1;
177 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize) 171 if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
178 __set_bit(i, ret.high_slices); 172 __set_bit(i, ret->high_slices);
179 } 173 }
180
181 return ret;
182} 174}
183 175
184static int slice_check_fit(struct slice_mask mask, struct slice_mask available) 176static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
@@ -460,7 +452,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
460 /* First make up a "good" mask of slices that have the right size 452 /* First make up a "good" mask of slices that have the right size
461 * already 453 * already
462 */ 454 */
463 good_mask = slice_mask_for_size(mm, psize); 455 slice_mask_for_size(mm, psize, &good_mask);
464 slice_print_mask(" good_mask", good_mask); 456 slice_print_mask(" good_mask", good_mask);
465 457
466 /* 458 /*
@@ -485,7 +477,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
485#ifdef CONFIG_PPC_64K_PAGES 477#ifdef CONFIG_PPC_64K_PAGES
486 /* If we support combo pages, we can allow 64k pages in 4k slices */ 478 /* If we support combo pages, we can allow 64k pages in 4k slices */
487 if (psize == MMU_PAGE_64K) { 479 if (psize == MMU_PAGE_64K) {
488 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); 480 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
489 if (fixed) 481 if (fixed)
490 slice_or_mask(&good_mask, &compat_mask); 482 slice_or_mask(&good_mask, &compat_mask);
491 } 483 }
@@ -494,7 +486,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
494 /* First check hint if it's valid or if we have MAP_FIXED */ 486 /* First check hint if it's valid or if we have MAP_FIXED */
495 if (addr != 0 || fixed) { 487 if (addr != 0 || fixed) {
496 /* Build a mask for the requested range */ 488 /* Build a mask for the requested range */
497 mask = slice_range_to_mask(addr, len); 489 slice_range_to_mask(addr, len, &mask);
498 slice_print_mask(" mask", mask); 490 slice_print_mask(" mask", mask);
499 491
500 /* Check if we fit in the good mask. If we do, we just return, 492 /* Check if we fit in the good mask. If we do, we just return,
@@ -521,7 +513,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
521 /* We don't fit in the good mask, check what other slices are 513 /* We don't fit in the good mask, check what other slices are
522 * empty and thus can be converted 514 * empty and thus can be converted
523 */ 515 */
524 potential_mask = slice_mask_for_free(mm); 516 slice_mask_for_free(mm, &potential_mask);
525 slice_or_mask(&potential_mask, &good_mask); 517 slice_or_mask(&potential_mask, &good_mask);
526 slice_print_mask(" potential", potential_mask); 518 slice_print_mask(" potential", potential_mask);
527 519
@@ -564,7 +556,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
564 if (addr == -ENOMEM) 556 if (addr == -ENOMEM)
565 return -ENOMEM; 557 return -ENOMEM;
566 558
567 mask = slice_range_to_mask(addr, len); 559 slice_range_to_mask(addr, len, &mask);
568 slice_dbg(" found potential area at 0x%lx\n", addr); 560 slice_dbg(" found potential area at 0x%lx\n", addr);
569 slice_print_mask(" mask", mask); 561 slice_print_mask(" mask", mask);
570 562
@@ -696,9 +688,11 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
696void slice_set_range_psize(struct mm_struct *mm, unsigned long start, 688void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
697 unsigned long len, unsigned int psize) 689 unsigned long len, unsigned int psize)
698{ 690{
699 struct slice_mask mask = slice_range_to_mask(start, len); 691 struct slice_mask mask;
700 692
701 VM_BUG_ON(radix_enabled()); 693 VM_BUG_ON(radix_enabled());
694
695 slice_range_to_mask(start, len, &mask);
702 slice_convert(mm, mask, psize); 696 slice_convert(mm, mask, psize);
703} 697}
704 698
@@ -731,13 +725,13 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
731 if (radix_enabled()) 725 if (radix_enabled())
732 return 0; 726 return 0;
733 727
734 mask = slice_range_to_mask(addr, len); 728 slice_range_to_mask(addr, len, &mask);
735 available = slice_mask_for_size(mm, psize); 729 slice_mask_for_size(mm, psize, &available);
736#ifdef CONFIG_PPC_64K_PAGES 730#ifdef CONFIG_PPC_64K_PAGES
737 /* We need to account for 4k slices too */ 731 /* We need to account for 4k slices too */
738 if (psize == MMU_PAGE_64K) { 732 if (psize == MMU_PAGE_64K) {
739 struct slice_mask compat_mask; 733 struct slice_mask compat_mask;
740 compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); 734 slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
741 slice_or_mask(&available, &compat_mask); 735 slice_or_mask(&available, &compat_mask);
742 } 736 }
743#endif 737#endif