diff options
author | Yinghai Lu <yinghai@kernel.org> | 2012-11-16 22:39:12 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-11-17 14:59:40 -0500 |
commit | 5a0d3aeeeffbd1534a510fc10c4ab7c99c45afce (patch) | |
tree | 6d31a6883eb147792d59f63647e1afe695e52cc4 /arch/x86/mm/init.c | |
parent | 11ed9e927d573d78beda6e6a166612666ae97064 (diff) |
x86, mm: use round_up/down in split_mem_range()
to replace own inline version for those roundup and rounddown.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-36-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 30 |
1 files changed, 12 insertions, 18 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 8168bf8fcda7..0e625e606e5d 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -218,13 +218,11 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
218 | * slowdowns. | 218 | * slowdowns. |
219 | */ | 219 | */ |
220 | if (pos == 0) | 220 | if (pos == 0) |
221 | end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT); | 221 | end_pfn = PMD_SIZE >> PAGE_SHIFT; |
222 | else | 222 | else |
223 | end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 223 | end_pfn = round_up(pos, PMD_SIZE) >> PAGE_SHIFT; |
224 | << (PMD_SHIFT - PAGE_SHIFT); | ||
225 | #else /* CONFIG_X86_64 */ | 224 | #else /* CONFIG_X86_64 */ |
226 | end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) | 225 | end_pfn = round_up(pos, PMD_SIZE) >> PAGE_SHIFT; |
227 | << (PMD_SHIFT - PAGE_SHIFT); | ||
228 | #endif | 226 | #endif |
229 | if (end_pfn > (end >> PAGE_SHIFT)) | 227 | if (end_pfn > (end >> PAGE_SHIFT)) |
230 | end_pfn = end >> PAGE_SHIFT; | 228 | end_pfn = end >> PAGE_SHIFT; |
@@ -234,15 +232,13 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
234 | } | 232 | } |
235 | 233 | ||
236 | /* big page (2M) range */ | 234 | /* big page (2M) range */ |
237 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 235 | start_pfn = round_up(pos, PMD_SIZE) >> PAGE_SHIFT; |
238 | << (PMD_SHIFT - PAGE_SHIFT); | ||
239 | #ifdef CONFIG_X86_32 | 236 | #ifdef CONFIG_X86_32 |
240 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | 237 | end_pfn = round_down(end, PMD_SIZE) >> PAGE_SHIFT; |
241 | #else /* CONFIG_X86_64 */ | 238 | #else /* CONFIG_X86_64 */ |
242 | end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) | 239 | end_pfn = round_up(pos, PUD_SIZE) >> PAGE_SHIFT; |
243 | << (PUD_SHIFT - PAGE_SHIFT); | 240 | if (end_pfn > (round_down(end, PMD_SIZE) >> PAGE_SHIFT)) |
244 | if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT))) | 241 | end_pfn = round_down(end, PMD_SIZE) >> PAGE_SHIFT; |
245 | end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)); | ||
246 | #endif | 242 | #endif |
247 | 243 | ||
248 | if (start_pfn < end_pfn) { | 244 | if (start_pfn < end_pfn) { |
@@ -253,9 +249,8 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
253 | 249 | ||
254 | #ifdef CONFIG_X86_64 | 250 | #ifdef CONFIG_X86_64 |
255 | /* big page (1G) range */ | 251 | /* big page (1G) range */ |
256 | start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT) | 252 | start_pfn = round_up(pos, PUD_SIZE) >> PAGE_SHIFT; |
257 | << (PUD_SHIFT - PAGE_SHIFT); | 253 | end_pfn = round_down(end, PUD_SIZE) >> PAGE_SHIFT; |
258 | end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT); | ||
259 | if (start_pfn < end_pfn) { | 254 | if (start_pfn < end_pfn) { |
260 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 255 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
261 | page_size_mask & | 256 | page_size_mask & |
@@ -264,9 +259,8 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
264 | } | 259 | } |
265 | 260 | ||
266 | /* tail is not big page (1G) alignment */ | 261 | /* tail is not big page (1G) alignment */ |
267 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | 262 | start_pfn = round_up(pos, PMD_SIZE) >> PAGE_SHIFT; |
268 | << (PMD_SHIFT - PAGE_SHIFT); | 263 | end_pfn = round_down(end, PMD_SIZE) >> PAGE_SHIFT; |
269 | end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); | ||
270 | if (start_pfn < end_pfn) { | 264 | if (start_pfn < end_pfn) { |
271 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 265 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
272 | page_size_mask & (1<<PG_LEVEL_2M)); | 266 | page_size_mask & (1<<PG_LEVEL_2M)); |