diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:34:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:34:09 -0500 |
commit | 4c61afcdb2cd4be299c1442b33adf312b695e2d7 (patch) | |
tree | 8f51b96e2f6520c63b7c54dd84f4840ab9157590 /arch | |
parent | 3b233e52f70bf102078b2c0c3f7f86a441689056 (diff) |
x86: fix clflush_page_range logic
only present ptes must be flushed.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/pageattr.c | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index bbfc8e2466ab..97ec9e7d29d9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -26,7 +26,6 @@ within(unsigned long addr, unsigned long start, unsigned long end) | |||
26 | * Flushing functions | 26 | * Flushing functions |
27 | */ | 27 | */ |
28 | 28 | ||
29 | |||
30 | /** | 29 | /** |
31 | * clflush_cache_range - flush a cache range with clflush | 30 | * clflush_cache_range - flush a cache range with clflush |
32 | * @addr: virtual start address | 31 | * @addr: virtual start address |
@@ -35,13 +34,19 @@ within(unsigned long addr, unsigned long start, unsigned long end) | |||
35 | * clflush is an unordered instruction which needs fencing with mfence | 34 | * clflush is an unordered instruction which needs fencing with mfence |
36 | * to avoid ordering issues. | 35 | * to avoid ordering issues. |
37 | */ | 36 | */ |
38 | void clflush_cache_range(void *addr, int size) | 37 | void clflush_cache_range(void *vaddr, unsigned int size) |
39 | { | 38 | { |
40 | int i; | 39 | void *vend = vaddr + size - 1; |
41 | 40 | ||
42 | mb(); | 41 | mb(); |
43 | for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) | 42 | |
44 | clflush(addr+i); | 43 | for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) |
44 | clflush(vaddr); | ||
45 | /* | ||
46 | * Flush any possible final partial cacheline: | ||
47 | */ | ||
48 | clflush(vend); | ||
49 | |||
45 | mb(); | 50 | mb(); |
46 | } | 51 | } |
47 | 52 | ||
@@ -74,9 +79,13 @@ static void __cpa_flush_range(void *arg) | |||
74 | __flush_tlb_all(); | 79 | __flush_tlb_all(); |
75 | } | 80 | } |
76 | 81 | ||
77 | static void cpa_flush_range(unsigned long addr, int numpages) | 82 | static void cpa_flush_range(unsigned long start, int numpages) |
78 | { | 83 | { |
84 | unsigned int i, level; | ||
85 | unsigned long addr; | ||
86 | |||
79 | BUG_ON(irqs_disabled()); | 87 | BUG_ON(irqs_disabled()); |
88 | WARN_ON(PAGE_ALIGN(start) != start); | ||
80 | 89 | ||
81 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); | 90 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); |
82 | 91 | ||
@@ -86,7 +95,15 @@ static void cpa_flush_range(unsigned long addr, int numpages) | |||
86 | * will cause all other CPUs to flush the same | 95 | * will cause all other CPUs to flush the same |
87 | * cachelines: | 96 | * cachelines: |
88 | */ | 97 | */ |
89 | clflush_cache_range((void *) addr, numpages * PAGE_SIZE); | 98 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
99 | pte_t *pte = lookup_address(addr, &level); | ||
100 | |||
101 | /* | ||
102 | * Only flush present addresses: | ||
103 | */ | ||
104 | if (pte && pte_present(*pte)) | ||
105 | clflush_cache_range((void *) addr, PAGE_SIZE); | ||
106 | } | ||
90 | } | 107 | } |
91 | 108 | ||
92 | /* | 109 | /* |