aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 07:34:09 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:09 -0500
commit0879750f5d75dee0546316b7b0e83fb6cd258ad7 (patch)
tree76190c94a6e3abc12964fd6a4835e0a1f5019fe5 /arch
parent86f03989d99cfa2e1216cdd7aa996852236909cf (diff)
x86: cpa cleanup the 64-bit alias math
Cleanup the address calculations, which are necessary to identify the high/low alias mappings of the kernel on 64 bit machines. Instead of calling __pa/__va back and forth, calculate the physical address once and base the other calculations on it. Add understandable constants so we can use the already available within() helper. Also add comments, which help mere mortals to understand what this code does. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/pageattr.c56
1 files changed, 30 insertions, 26 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 532a40bc0e7..ec07c1873d6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -305,49 +305,53 @@ repeat:
305 * Modules and drivers should use the set_memory_* APIs instead. 305 * Modules and drivers should use the set_memory_* APIs instead.
306 */ 306 */
307 307
308#define HIGH_MAP_START __START_KERNEL_map
309#define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE)
310
308static int 311static int
309change_page_attr_addr(unsigned long address, pgprot_t mask_set, 312change_page_attr_addr(unsigned long address, pgprot_t mask_set,
310 pgprot_t mask_clr) 313 pgprot_t mask_clr)
311{ 314{
312 int err = 0, kernel_map = 0; 315 unsigned long phys_addr = __pa(address);
313 unsigned long pfn; 316 unsigned long pfn = phys_addr >> PAGE_SHIFT;
317 int err;
314 318
315#ifdef CONFIG_X86_64 319#ifdef CONFIG_X86_64
316 if (address >= __START_KERNEL_map && 320 /*
317 address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { 321 * If we are inside the high mapped kernel range, then we
318 322 * fixup the low mapping first. __va() returns the virtual
319 address = (unsigned long)__va(__pa((void *)address)); 323 * address in the linear mapping:
320 kernel_map = 1; 324 */
321 } 325 if (within(address, HIGH_MAP_START, HIGH_MAP_END))
326 address = (unsigned long) __va(phys_addr);
322#endif 327#endif
323 328
324 pfn = __pa(address) >> PAGE_SHIFT; 329 err = __change_page_attr(address, pfn, mask_set, mask_clr);
325 330 if (err)
326 if (!kernel_map || 1) { 331 return err;
327 err = __change_page_attr(address, pfn, mask_set, mask_clr);
328 if (err)
329 return err;
330 }
331 332
332#ifdef CONFIG_X86_64 333#ifdef CONFIG_X86_64
333 /* 334 /*
334 * Handle kernel mapping too which aliases part of 335 * If the physical address is inside the kernel map, we need
335 * lowmem: 336 * to touch the high mapped kernel as well:
336 */ 337 */
337 if (__pa(address) < KERNEL_TEXT_SIZE) { 338 if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) {
338 unsigned long addr2; 339 /*
339 340 * Calc the high mapping address. See __phys_addr()
340 addr2 = __pa(address) + __START_KERNEL_map - phys_base; 341 * for the non obvious details.
342 */
343 address = phys_addr + HIGH_MAP_START - phys_base;
341 /* Make sure the kernel mappings stay executable */ 344 /* Make sure the kernel mappings stay executable */
342 pgprot_val(mask_clr) |= _PAGE_NX; 345 pgprot_val(mask_clr) |= _PAGE_NX;
346
343 /* 347 /*
344 * Our high aliases are imprecise, so do not propagate 348 * Our high aliases are imprecise, because we check
345 * failures back to users: 349 * everything between 0 and KERNEL_TEXT_SIZE, so do
350 * not propagate lookup failures back to users:
346 */ 351 */
347 __change_page_attr(addr2, pfn, mask_set, mask_clr); 352 __change_page_attr(address, pfn, mask_set, mask_clr);
348 } 353 }
349#endif 354#endif
350
351 return err; 355 return err;
352} 356}
353 357