diff options
author | Andi Kleen <ak@suse.de> | 2007-06-20 06:23:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-06-20 17:27:26 -0400 |
commit | 018d2ad0cccfa9bb8bee1d160c353e568484a137 (patch) | |
tree | b50362cead47a3b472a6b645a9e7dd0262a757f9 /arch/i386/mm/pageattr.c | |
parent | 55181000cd60334fe920c65ffbcdfe0e3f1de406 (diff) |
x86: change_page_attr bandaids
- Disable CLFLUSH again; it is still broken. Always do WBINVD.
- Always flush in the i386 case, not only when there are deferred pages.
These are both brute-force inefficient fixes, to be improved
next release cycle.
The changes to i386 are a little more extensive than strictly
needed (some dead code added), but it is more similar to the x86-64 version
now and the dead code will be used soon.
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386/mm/pageattr.c')
-rw-r--r-- | arch/i386/mm/pageattr.c | 30 |
1 files changed, 18 insertions, 12 deletions
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c index 47bd477c8ecc..2eb14a73be9c 100644 --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c | |||
@@ -68,14 +68,23 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, | |||
68 | return base; | 68 | return base; |
69 | } | 69 | } |
70 | 70 | ||
71 | static void flush_kernel_map(void *arg) | 71 | static void cache_flush_page(struct page *p) |
72 | { | 72 | { |
73 | unsigned long adr = (unsigned long)arg; | 73 | unsigned long adr = (unsigned long)page_address(p); |
74 | int i; | ||
75 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) | ||
76 | asm volatile("clflush (%0)" :: "r" (adr + i)); | ||
77 | } | ||
78 | |||
79 | static void flush_kernel_map(void *arg) | ||
80 | { | ||
81 | struct list_head *lh = (struct list_head *)arg; | ||
82 | struct page *p; | ||
74 | 83 | ||
75 | if (adr && cpu_has_clflush) { | 84 | /* High level code is not ready for clflush yet */ |
76 | int i; | 85 | if (0 && cpu_has_clflush) { |
77 | for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) | 86 | list_for_each_entry (p, lh, lru) |
78 | asm volatile("clflush (%0)" :: "r" (adr + i)); | 87 | cache_flush_page(p); |
79 | } else if (boot_cpu_data.x86_model >= 4) | 88 | } else if (boot_cpu_data.x86_model >= 4) |
80 | wbinvd(); | 89 | wbinvd(); |
81 | 90 | ||
@@ -181,9 +190,9 @@ __change_page_attr(struct page *page, pgprot_t prot) | |||
181 | return 0; | 190 | return 0; |
182 | } | 191 | } |
183 | 192 | ||
184 | static inline void flush_map(void *adr) | 193 | static inline void flush_map(struct list_head *l) |
185 | { | 194 | { |
186 | on_each_cpu(flush_kernel_map, adr, 1, 1); | 195 | on_each_cpu(flush_kernel_map, l, 1, 1); |
187 | } | 196 | } |
188 | 197 | ||
189 | /* | 198 | /* |
@@ -225,11 +234,8 @@ void global_flush_tlb(void) | |||
225 | spin_lock_irq(&cpa_lock); | 234 | spin_lock_irq(&cpa_lock); |
226 | list_replace_init(&df_list, &l); | 235 | list_replace_init(&df_list, &l); |
227 | spin_unlock_irq(&cpa_lock); | 236 | spin_unlock_irq(&cpa_lock); |
228 | if (!cpu_has_clflush) | 237 | flush_map(&l); |
229 | flush_map(NULL); | ||
230 | list_for_each_entry_safe(pg, next, &l, lru) { | 238 | list_for_each_entry_safe(pg, next, &l, lru) { |
231 | if (cpu_has_clflush) | ||
232 | flush_map(page_address(pg)); | ||
233 | __free_page(pg); | 239 | __free_page(pg); |
234 | } | 240 | } |
235 | } | 241 | } |