From ea7322decb974a4a3e804f96a0201e893ff88ce3 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 7 Dec 2006 02:14:05 +0100 Subject: [PATCH] x86-64: Speed and clean up cache flushing in change_page_attr CLFLUSH is a lot faster than WBINVD so avoid the later if at all possible. Always pass the complete list of pages to other CPUs to cut down the number of IPIs. Minor other cleanup and sync with i386 version. Signed-off-by: Andi Kleen --- arch/x86_64/mm/pageattr.c | 58 ++++++++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 26 deletions(-) (limited to 'arch/x86_64/mm') diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index 3e231d762aaa..ccb91dd996a9 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c @@ -61,34 +61,40 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, return base; } - -static void flush_kernel_map(void *address) +static void cache_flush_page(void *adr) { - if (0 && address && cpu_has_clflush) { - /* is this worth it? */ - int i; - for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) - asm volatile("clflush (%0)" :: "r" (address + i)); - } else - asm volatile("wbinvd":::"memory"); - if (address) - __flush_tlb_one(address); - else - __flush_tlb_all(); + int i; + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) + asm volatile("clflush (%0)" :: "r" (adr + i)); } +static void flush_kernel_map(void *arg) +{ + struct list_head *l = (struct list_head *)arg; + struct page *pg; + + /* When clflush is available always use it because it is + much cheaper than WBINVD */ + if (!cpu_has_clflush) + asm volatile("wbinvd" ::: "memory"); + list_for_each_entry(pg, l, lru) { + void *adr = page_address(pg); + if (cpu_has_clflush) + cache_flush_page(adr); + __flush_tlb_one(adr); + } +} -static inline void flush_map(unsigned long address) +static inline void flush_map(struct list_head *l) { - on_each_cpu(flush_kernel_map, (void *)address, 1, 1); + on_each_cpu(flush_kernel_map, l, 1, 1); } -static struct page *deferred_pages; /* protected by init_mm.mmap_sem */ +static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */ static inline void save_page(struct page *fpage) { - fpage->lru.next = (struct list_head *)deferred_pages; - deferred_pages = fpage; + list_add(&fpage->lru, &deferred_pages); } /* @@ -207,18 +213,18 @@ int change_page_attr(struct page *page, int numpages, pgprot_t prot) void global_flush_tlb(void) { - struct page *dpage; + struct page *pg, *next; + struct list_head l; down_read(&init_mm.mmap_sem); - dpage = xchg(&deferred_pages, NULL); + list_replace_init(&deferred_pages, &l); up_read(&init_mm.mmap_sem); - flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0); - while (dpage) { - struct page *tmp = dpage; - dpage = (struct page *)dpage->lru.next; - ClearPagePrivate(tmp); - __free_page(tmp); + flush_map(&l); + + list_for_each_entry_safe(pg, next, &l, lru) { + ClearPagePrivate(pg); + __free_page(pg); } } -- cgit v1.2.2