diff options
author | Andi Kleen <ak@suse.de> | 2008-02-04 10:48:06 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-02-04 10:48:06 -0500 |
commit | 6bb8383bebc02dae08a17f561401f58005f75c03 (patch) | |
tree | bb06b03c0991a2f34e69ddbd07ab1a916858fc6c /arch/x86 | |
parent | 9bf5a47572fe4ea4e5ed2691e4313ea0bb68a74e (diff) |
x86: cpa, only flush the cache if the caching attributes have changed
We only need to flush the caches in cpa() if the the caching attributes
have changed. Otherwise only flush the TLBs.
This checks the PAT bits too although they are currently not used by
the kernel.
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/mm/pageattr.c | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index f60b93dc2e57..456ad0ab9c7e 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -52,21 +52,23 @@ void clflush_cache_range(void *vaddr, unsigned int size) | |||
52 | 52 | ||
53 | static void __cpa_flush_all(void *arg) | 53 | static void __cpa_flush_all(void *arg) |
54 | { | 54 | { |
55 | unsigned long cache = (unsigned long)arg; | ||
56 | |||
55 | /* | 57 | /* |
56 | * Flush all to work around Errata in early athlons regarding | 58 | * Flush all to work around Errata in early athlons regarding |
57 | * large page flushing. | 59 | * large page flushing. |
58 | */ | 60 | */ |
59 | __flush_tlb_all(); | 61 | __flush_tlb_all(); |
60 | 62 | ||
61 | if (boot_cpu_data.x86_model >= 4) | 63 | if (cache && boot_cpu_data.x86_model >= 4) |
62 | wbinvd(); | 64 | wbinvd(); |
63 | } | 65 | } |
64 | 66 | ||
65 | static void cpa_flush_all(void) | 67 | static void cpa_flush_all(unsigned long cache) |
66 | { | 68 | { |
67 | BUG_ON(irqs_disabled()); | 69 | BUG_ON(irqs_disabled()); |
68 | 70 | ||
69 | on_each_cpu(__cpa_flush_all, NULL, 1, 1); | 71 | on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); |
70 | } | 72 | } |
71 | 73 | ||
72 | static void __cpa_flush_range(void *arg) | 74 | static void __cpa_flush_range(void *arg) |
@@ -79,7 +81,7 @@ static void __cpa_flush_range(void *arg) | |||
79 | __flush_tlb_all(); | 81 | __flush_tlb_all(); |
80 | } | 82 | } |
81 | 83 | ||
82 | static void cpa_flush_range(unsigned long start, int numpages) | 84 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
83 | { | 85 | { |
84 | unsigned int i, level; | 86 | unsigned int i, level; |
85 | unsigned long addr; | 87 | unsigned long addr; |
@@ -89,6 +91,9 @@ static void cpa_flush_range(unsigned long start, int numpages) | |||
89 | 91 | ||
90 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); | 92 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); |
91 | 93 | ||
94 | if (!cache) | ||
95 | return; | ||
96 | |||
92 | /* | 97 | /* |
93 | * We only need to flush on one CPU, | 98 | * We only need to flush on one CPU, |
94 | * clflush is a MESI-coherent instruction that | 99 | * clflush is a MESI-coherent instruction that |
@@ -402,10 +407,16 @@ static int __change_page_attr_set_clr(unsigned long addr, int numpages, | |||
402 | return 0; | 407 | return 0; |
403 | } | 408 | } |
404 | 409 | ||
410 | static inline int cache_attr(pgprot_t attr) | ||
411 | { | ||
412 | return pgprot_val(attr) & | ||
413 | (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); | ||
414 | } | ||
415 | |||
405 | static int change_page_attr_set_clr(unsigned long addr, int numpages, | 416 | static int change_page_attr_set_clr(unsigned long addr, int numpages, |
406 | pgprot_t mask_set, pgprot_t mask_clr) | 417 | pgprot_t mask_set, pgprot_t mask_clr) |
407 | { | 418 | { |
408 | int ret; | 419 | int ret, cache; |
409 | 420 | ||
410 | /* | 421 | /* |
411 | * Check, if we are requested to change a not supported | 422 | * Check, if we are requested to change a not supported |
@@ -419,15 +430,21 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages, | |||
419 | ret = __change_page_attr_set_clr(addr, numpages, mask_set, mask_clr); | 430 | ret = __change_page_attr_set_clr(addr, numpages, mask_set, mask_clr); |
420 | 431 | ||
421 | /* | 432 | /* |
433 | * No need to flush, when we did not set any of the caching | ||
434 | * attributes: | ||
435 | */ | ||
436 | cache = cache_attr(mask_set); | ||
437 | |||
438 | /* | ||
422 | * On success we use clflush, when the CPU supports it to | 439 | * On success we use clflush, when the CPU supports it to |
423 | * avoid the wbindv. If the CPU does not support it and in the | 440 | * avoid the wbindv. If the CPU does not support it and in the |
424 | * error case we fall back to cpa_flush_all (which uses | 441 | * error case we fall back to cpa_flush_all (which uses |
425 | * wbindv): | 442 | * wbindv): |
426 | */ | 443 | */ |
427 | if (!ret && cpu_has_clflush) | 444 | if (!ret && cpu_has_clflush) |
428 | cpa_flush_range(addr, numpages); | 445 | cpa_flush_range(addr, numpages, cache); |
429 | else | 446 | else |
430 | cpa_flush_all(); | 447 | cpa_flush_all(cache); |
431 | 448 | ||
432 | return ret; | 449 | return ret; |
433 | } | 450 | } |