diff options
author | Yinghai Lu <yinghai@kernel.org> | 2012-11-16 22:39:14 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-11-17 14:59:41 -0500 |
commit | 1829ae9ad7380bf17333ab9ad1610631d9cb8664 (patch) | |
tree | ae54f697df1ac557090f626af4349613b619f462 /arch | |
parent | 84d770019bb990dcd8013d9d08174d0e1516b517 (diff) |
x86, mm: use pfn instead of pos in split_mem_range
could save some bit shifting operations.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-38-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/init.c | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 1cca052b2cbd..4bf1c5374928 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -204,12 +204,11 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
204 | unsigned long end) | 204 | unsigned long end) |
205 | { | 205 | { |
206 | unsigned long start_pfn, end_pfn; | 206 | unsigned long start_pfn, end_pfn; |
207 | unsigned long pos; | 207 | unsigned long pfn; |
208 | int i; | 208 | int i; |
209 | 209 | ||
210 | /* head if not big page alignment ? */ | 210 | /* head if not big page alignment ? */ |
211 | start_pfn = PFN_DOWN(start); | 211 | pfn = start_pfn = PFN_DOWN(start); |
212 | pos = PFN_PHYS(start_pfn); | ||
213 | #ifdef CONFIG_X86_32 | 212 | #ifdef CONFIG_X86_32 |
214 | /* | 213 | /* |
215 | * Don't use a large page for the first 2/4MB of memory | 214 | * Don't use a large page for the first 2/4MB of memory |
@@ -217,26 +216,26 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
217 | * and overlapping MTRRs into large pages can cause | 216 | * and overlapping MTRRs into large pages can cause |
218 | * slowdowns. | 217 | * slowdowns. |
219 | */ | 218 | */ |
220 | if (pos == 0) | 219 | if (pfn == 0) |
221 | end_pfn = PFN_DOWN(PMD_SIZE); | 220 | end_pfn = PFN_DOWN(PMD_SIZE); |
222 | else | 221 | else |
223 | end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE)); | 222 | end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); |
224 | #else /* CONFIG_X86_64 */ | 223 | #else /* CONFIG_X86_64 */ |
225 | end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE)); | 224 | end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); |
226 | #endif | 225 | #endif |
227 | if (end_pfn > PFN_DOWN(end)) | 226 | if (end_pfn > PFN_DOWN(end)) |
228 | end_pfn = PFN_DOWN(end); | 227 | end_pfn = PFN_DOWN(end); |
229 | if (start_pfn < end_pfn) { | 228 | if (start_pfn < end_pfn) { |
230 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 229 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
231 | pos = PFN_PHYS(end_pfn); | 230 | pfn = end_pfn; |
232 | } | 231 | } |
233 | 232 | ||
234 | /* big page (2M) range */ | 233 | /* big page (2M) range */ |
235 | start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE)); | 234 | start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); |
236 | #ifdef CONFIG_X86_32 | 235 | #ifdef CONFIG_X86_32 |
237 | end_pfn = PFN_DOWN(round_down(end, PMD_SIZE)); | 236 | end_pfn = PFN_DOWN(round_down(end, PMD_SIZE)); |
238 | #else /* CONFIG_X86_64 */ | 237 | #else /* CONFIG_X86_64 */ |
239 | end_pfn = PFN_DOWN(round_up(pos, PUD_SIZE)); | 238 | end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); |
240 | if (end_pfn > PFN_DOWN(round_down(end, PMD_SIZE))) | 239 | if (end_pfn > PFN_DOWN(round_down(end, PMD_SIZE))) |
241 | end_pfn = PFN_DOWN(round_down(end, PMD_SIZE)); | 240 | end_pfn = PFN_DOWN(round_down(end, PMD_SIZE)); |
242 | #endif | 241 | #endif |
@@ -244,32 +243,32 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
244 | if (start_pfn < end_pfn) { | 243 | if (start_pfn < end_pfn) { |
245 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 244 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
246 | page_size_mask & (1<<PG_LEVEL_2M)); | 245 | page_size_mask & (1<<PG_LEVEL_2M)); |
247 | pos = PFN_PHYS(end_pfn); | 246 | pfn = end_pfn; |
248 | } | 247 | } |
249 | 248 | ||
250 | #ifdef CONFIG_X86_64 | 249 | #ifdef CONFIG_X86_64 |
251 | /* big page (1G) range */ | 250 | /* big page (1G) range */ |
252 | start_pfn = PFN_DOWN(round_up(pos, PUD_SIZE)); | 251 | start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); |
253 | end_pfn = PFN_DOWN(round_down(end, PUD_SIZE)); | 252 | end_pfn = PFN_DOWN(round_down(end, PUD_SIZE)); |
254 | if (start_pfn < end_pfn) { | 253 | if (start_pfn < end_pfn) { |
255 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 254 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
256 | page_size_mask & | 255 | page_size_mask & |
257 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); | 256 | ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); |
258 | pos = PFN_PHYS(end_pfn); | 257 | pfn = end_pfn; |
259 | } | 258 | } |
260 | 259 | ||
261 | /* tail is not big page (1G) alignment */ | 260 | /* tail is not big page (1G) alignment */ |
262 | start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE)); | 261 | start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); |
263 | end_pfn = PFN_DOWN(round_down(end, PMD_SIZE)); | 262 | end_pfn = PFN_DOWN(round_down(end, PMD_SIZE)); |
264 | if (start_pfn < end_pfn) { | 263 | if (start_pfn < end_pfn) { |
265 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | 264 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, |
266 | page_size_mask & (1<<PG_LEVEL_2M)); | 265 | page_size_mask & (1<<PG_LEVEL_2M)); |
267 | pos = PFN_PHYS(end_pfn); | 266 | pfn = end_pfn; |
268 | } | 267 | } |
269 | #endif | 268 | #endif |
270 | 269 | ||
271 | /* tail is not big page (2M) alignment */ | 270 | /* tail is not big page (2M) alignment */ |
272 | start_pfn = PFN_DOWN(pos); | 271 | start_pfn = pfn; |
273 | end_pfn = PFN_DOWN(end); | 272 | end_pfn = PFN_DOWN(end); |
274 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 273 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
275 | 274 | ||