diff options
author | Arjan van de Ven <arjan@linux.intel.com> | 2008-02-04 10:48:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-02-04 10:48:05 -0500 |
commit | 626c2c9d065da0cbd9997e112501487958fde690 (patch) | |
tree | fccad0409c38a4cd845c82f407e2e77bd8dbfc47 /arch/x86/mm/pageattr.c | |
parent | cc0f21bbc12dc9f05b2e7f2469128f8717b2f4d3 (diff) |
x86: use the pfn from the page when change its attributes
When changing the attributes of a pte, we should use the PFN from the
existing PTE rather than going through hoops calculating what we think
it might have been; this is both fragile and totally unneeded. It also
makes it more hairy to call any of these functions on non-direct maps
for no good reason whatsover.
With this change, __change_page_attr() no longer takes a pfn as argument,
which simplifies all the callers.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@tglx.de>
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r-- | arch/x86/mm/pageattr.c | 31 |
1 files changed, 17 insertions, 14 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bf5e33f6a322..6c55fbdbd7e8 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -277,17 +277,12 @@ out_unlock: | |||
277 | } | 277 | } |
278 | 278 | ||
279 | static int | 279 | static int |
280 | __change_page_attr(unsigned long address, unsigned long pfn, | 280 | __change_page_attr(unsigned long address, pgprot_t mask_set, pgprot_t mask_clr) |
281 | pgprot_t mask_set, pgprot_t mask_clr) | ||
282 | { | 281 | { |
283 | struct page *kpte_page; | 282 | struct page *kpte_page; |
284 | int level, err = 0; | 283 | int level, err = 0; |
285 | pte_t *kpte; | 284 | pte_t *kpte; |
286 | 285 | ||
287 | #ifdef CONFIG_X86_32 | ||
288 | BUG_ON(pfn > max_low_pfn); | ||
289 | #endif | ||
290 | |||
291 | repeat: | 286 | repeat: |
292 | kpte = lookup_address(address, &level); | 287 | kpte = lookup_address(address, &level); |
293 | if (!kpte) | 288 | if (!kpte) |
@@ -298,17 +293,25 @@ repeat: | |||
298 | BUG_ON(PageCompound(kpte_page)); | 293 | BUG_ON(PageCompound(kpte_page)); |
299 | 294 | ||
300 | if (level == PG_LEVEL_4K) { | 295 | if (level == PG_LEVEL_4K) { |
301 | pgprot_t new_prot = pte_pgprot(*kpte); | ||
302 | pte_t new_pte, old_pte = *kpte; | 296 | pte_t new_pte, old_pte = *kpte; |
297 | pgprot_t new_prot = pte_pgprot(old_pte); | ||
298 | |||
299 | if(!pte_val(old_pte)) { | ||
300 | WARN_ON_ONCE(1); | ||
301 | return -EINVAL; | ||
302 | } | ||
303 | 303 | ||
304 | pgprot_val(new_prot) &= ~pgprot_val(mask_clr); | 304 | pgprot_val(new_prot) &= ~pgprot_val(mask_clr); |
305 | pgprot_val(new_prot) |= pgprot_val(mask_set); | 305 | pgprot_val(new_prot) |= pgprot_val(mask_set); |
306 | 306 | ||
307 | new_prot = static_protections(new_prot, address); | 307 | new_prot = static_protections(new_prot, address); |
308 | 308 | ||
309 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); | 309 | /* |
310 | BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte)); | 310 | * We need to keep the pfn from the existing PTE, |
311 | 311 | * after all we're only going to change it's attributes | |
312 | * not the memory it points to | ||
313 | */ | ||
314 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); | ||
312 | set_pte_atomic(kpte, new_pte); | 315 | set_pte_atomic(kpte, new_pte); |
313 | } else { | 316 | } else { |
314 | err = split_large_page(kpte, address); | 317 | err = split_large_page(kpte, address); |
@@ -337,11 +340,11 @@ static int | |||
337 | change_page_attr_addr(unsigned long address, pgprot_t mask_set, | 340 | change_page_attr_addr(unsigned long address, pgprot_t mask_set, |
338 | pgprot_t mask_clr) | 341 | pgprot_t mask_clr) |
339 | { | 342 | { |
340 | unsigned long phys_addr = __pa(address); | ||
341 | unsigned long pfn = phys_addr >> PAGE_SHIFT; | ||
342 | int err; | 343 | int err; |
343 | 344 | ||
344 | #ifdef CONFIG_X86_64 | 345 | #ifdef CONFIG_X86_64 |
346 | unsigned long phys_addr = __pa(address); | ||
347 | |||
345 | /* | 348 | /* |
346 | * If we are inside the high mapped kernel range, then we | 349 | * If we are inside the high mapped kernel range, then we |
347 | * fixup the low mapping first. __va() returns the virtual | 350 | * fixup the low mapping first. __va() returns the virtual |
@@ -351,7 +354,7 @@ change_page_attr_addr(unsigned long address, pgprot_t mask_set, | |||
351 | address = (unsigned long) __va(phys_addr); | 354 | address = (unsigned long) __va(phys_addr); |
352 | #endif | 355 | #endif |
353 | 356 | ||
354 | err = __change_page_attr(address, pfn, mask_set, mask_clr); | 357 | err = __change_page_attr(address, mask_set, mask_clr); |
355 | if (err) | 358 | if (err) |
356 | return err; | 359 | return err; |
357 | 360 | ||
@@ -375,7 +378,7 @@ change_page_attr_addr(unsigned long address, pgprot_t mask_set, | |||
375 | * everything between 0 and KERNEL_TEXT_SIZE, so do | 378 | * everything between 0 and KERNEL_TEXT_SIZE, so do |
376 | * not propagate lookup failures back to users: | 379 | * not propagate lookup failures back to users: |
377 | */ | 380 | */ |
378 | __change_page_attr(address, pfn, mask_set, mask_clr); | 381 | __change_page_attr(address, mask_set, mask_clr); |
379 | } | 382 | } |
380 | #endif | 383 | #endif |
381 | return err; | 384 | return err; |