diff options
author | Andi Kleen <ak@suse.de> | 2008-01-30 07:33:52 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:33:52 -0500 |
commit | 3c868823413d76bdd80c643603be8ab09dcb4d65 (patch) | |
tree | b40ef3575608c40243cef41965ee16f807a7da7c | |
parent | 6ba9b7d8f0fe786954015ce5c0ef1837d5df56b7 (diff) |
x86: c_p_a() fix: reorder TLB / cache flushes to follow Intel recommendation
Intel recommends to first flush the TLBs and then the caches
on caching attribute changes. c_p_a() previously did it the
other way round. Reorder that.
The procedure is still not fully compliant to the Intel documentation
because Intel recommends a all CPU synchronization step between
the TLB flushes and the cache flushes.
However on all new Intel CPUs this is now meaningless anyways
because they support Self-Snoop and can skip the cache flush
step anyway.
[ mingo@elte.hu: decoupled from clflush and ported it to x86.git ]
Signed-off-by: Andi Kleen <ak@suse.de>
Acked-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/mm/pageattr_32.c | 12 | ||||
-rw-r--r-- | arch/x86/mm/pageattr_64.c | 3 |
2 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/mm/pageattr_32.c b/arch/x86/mm/pageattr_32.c index 523fd5b37df9..5cb5c7101f41 100644 --- a/arch/x86/mm/pageattr_32.c +++ b/arch/x86/mm/pageattr_32.c | |||
@@ -87,6 +87,12 @@ static void flush_kernel_map(void *arg) | |||
87 | struct list_head *lh = (struct list_head *)arg; | 87 | struct list_head *lh = (struct list_head *)arg; |
88 | struct page *p; | 88 | struct page *p; |
89 | 89 | ||
90 | /* | ||
91 | * Flush all to work around Errata in early athlons regarding | ||
92 | * large page flushing. | ||
93 | */ | ||
94 | __flush_tlb_all(); | ||
95 | |||
90 | /* High level code is not ready for clflush yet */ | 96 | /* High level code is not ready for clflush yet */ |
91 | if (0 && cpu_has_clflush) { | 97 | if (0 && cpu_has_clflush) { |
92 | list_for_each_entry(p, lh, lru) | 98 | list_for_each_entry(p, lh, lru) |
@@ -95,12 +101,6 @@ static void flush_kernel_map(void *arg) | |||
95 | if (boot_cpu_data.x86_model >= 4) | 101 | if (boot_cpu_data.x86_model >= 4) |
96 | wbinvd(); | 102 | wbinvd(); |
97 | } | 103 | } |
98 | |||
99 | /* | ||
100 | * Flush all to work around Errata in early athlons regarding | ||
101 | * large page flushing. | ||
102 | */ | ||
103 | __flush_tlb_all(); | ||
104 | } | 104 | } |
105 | 105 | ||
106 | static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | 106 | static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c index 4d172881af70..3ccdb1401e67 100644 --- a/arch/x86/mm/pageattr_64.c +++ b/arch/x86/mm/pageattr_64.c | |||
@@ -82,6 +82,8 @@ static void flush_kernel_map(void *arg) | |||
82 | struct list_head *l = (struct list_head *)arg; | 82 | struct list_head *l = (struct list_head *)arg; |
83 | struct page *pg; | 83 | struct page *pg; |
84 | 84 | ||
85 | __flush_tlb_all(); | ||
86 | |||
85 | /* When clflush is available always use it because it is | 87 | /* When clflush is available always use it because it is |
86 | much cheaper than WBINVD. */ | 88 | much cheaper than WBINVD. */ |
87 | /* clflush is still broken. Disable for now. */ | 89 | /* clflush is still broken. Disable for now. */ |
@@ -94,7 +96,6 @@ static void flush_kernel_map(void *arg) | |||
94 | clflush_cache_range(addr, PAGE_SIZE); | 96 | clflush_cache_range(addr, PAGE_SIZE); |
95 | } | 97 | } |
96 | } | 98 | } |
97 | __flush_tlb_all(); | ||
98 | } | 99 | } |
99 | 100 | ||
100 | static inline void flush_map(struct list_head *l) | 101 | static inline void flush_map(struct list_head *l) |