aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-12-06 20:14:05 -0500
committerAndi Kleen <andi@basil.nowhere.org>2006-12-06 20:14:05 -0500
commitea7322decb974a4a3e804f96a0201e893ff88ce3 (patch)
tree1298d8ddef4637f84c0eecee774e07607eae57d6 /arch/x86_64
parent74b47a7844501445d41d704fe7c626f4b1819508 (diff)
[PATCH] x86-64: Speed and clean up cache flushing in change_page_attr
CLFLUSH is a lot faster than WBINVD so avoid the later if at all possible. Always pass the complete list of pages to other CPUs to cut down the number of IPIs. Minor other cleanup and sync with i386 version. Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/mm/pageattr.c58
1 files changed, 32 insertions, 26 deletions
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c
index 3e231d762aaa..ccb91dd996a9 100644
--- a/arch/x86_64/mm/pageattr.c
+++ b/arch/x86_64/mm/pageattr.c
@@ -61,34 +61,40 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
61 return base; 61 return base;
62} 62}
63 63
64 64static void cache_flush_page(void *adr)
65static void flush_kernel_map(void *address)
66{ 65{
67 if (0 && address && cpu_has_clflush) { 66 int i;
68 /* is this worth it? */ 67 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
69 int i; 68 asm volatile("clflush (%0)" :: "r" (adr + i));
70 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
71 asm volatile("clflush (%0)" :: "r" (address + i));
72 } else
73 asm volatile("wbinvd":::"memory");
74 if (address)
75 __flush_tlb_one(address);
76 else
77 __flush_tlb_all();
78} 69}
79 70
71static void flush_kernel_map(void *arg)
72{
73 struct list_head *l = (struct list_head *)arg;
74 struct page *pg;
75
76 /* When clflush is available always use it because it is
77 much cheaper than WBINVD */
78 if (!cpu_has_clflush)
79 asm volatile("wbinvd" ::: "memory");
80 list_for_each_entry(pg, l, lru) {
81 void *adr = page_address(pg);
82 if (cpu_has_clflush)
83 cache_flush_page(adr);
84 __flush_tlb_one(adr);
85 }
86}
80 87
81static inline void flush_map(unsigned long address) 88static inline void flush_map(struct list_head *l)
82{ 89{
83 on_each_cpu(flush_kernel_map, (void *)address, 1, 1); 90 on_each_cpu(flush_kernel_map, l, 1, 1);
84} 91}
85 92
86static struct page *deferred_pages; /* protected by init_mm.mmap_sem */ 93static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
87 94
88static inline void save_page(struct page *fpage) 95static inline void save_page(struct page *fpage)
89{ 96{
90 fpage->lru.next = (struct list_head *)deferred_pages; 97 list_add(&fpage->lru, &deferred_pages);
91 deferred_pages = fpage;
92} 98}
93 99
94/* 100/*
@@ -207,18 +213,18 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot)
207 213
208void global_flush_tlb(void) 214void global_flush_tlb(void)
209{ 215{
210 struct page *dpage; 216 struct page *pg, *next;
217 struct list_head l;
211 218
212 down_read(&init_mm.mmap_sem); 219 down_read(&init_mm.mmap_sem);
213 dpage = xchg(&deferred_pages, NULL); 220 list_replace_init(&deferred_pages, &l);
214 up_read(&init_mm.mmap_sem); 221 up_read(&init_mm.mmap_sem);
215 222
216 flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0); 223 flush_map(&l);
217 while (dpage) { 224
218 struct page *tmp = dpage; 225 list_for_each_entry_safe(pg, next, &l, lru) {
219 dpage = (struct page *)dpage->lru.next; 226 ClearPagePrivate(pg);
220 ClearPagePrivate(tmp); 227 __free_page(pg);
221 __free_page(tmp);
222 } 228 }
223} 229}
224 230