diff options
author | Andi Kleen <ak@suse.de> | 2007-07-21 11:09:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-21 21:37:07 -0400 |
commit | 65d2f0bc65b0249a22a6c1f49ec29ee5e2980c7b (patch) | |
tree | 19204282146d83bd8a1eb6d0f816dd15421afd8c /arch/x86_64/mm | |
parent | ad386589676403eca4e8f52c944995db56e445c3 (diff) |
x86: Always flush pages in change_page_attr
Fix a bug introduced with the CLFLUSH changes: we must always flush pages
changed in cpa(), not just when they are reverted.
Reenable CLFLUSH usage with that now (it was temporarily disabled
for .22)
Add some BUG_ONs
Contains fixes from Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r-- | arch/x86_64/mm/pageattr.c | 23 |
1 files changed, 14 insertions, 9 deletions
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index 9148f4a4cec6..36377b6b8efe 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c | |||
@@ -74,14 +74,12 @@ static void flush_kernel_map(void *arg) | |||
74 | struct page *pg; | 74 | struct page *pg; |
75 | 75 | ||
76 | /* When clflush is available always use it because it is | 76 | /* When clflush is available always use it because it is |
77 | much cheaper than WBINVD. Disable clflush for now because | 77 | much cheaper than WBINVD. */ |
78 | the high level code is not ready yet */ | 78 | if (!cpu_has_clflush) |
79 | if (1 || !cpu_has_clflush) | ||
80 | asm volatile("wbinvd" ::: "memory"); | 79 | asm volatile("wbinvd" ::: "memory"); |
81 | else list_for_each_entry(pg, l, lru) { | 80 | else list_for_each_entry(pg, l, lru) { |
82 | void *adr = page_address(pg); | 81 | void *adr = page_address(pg); |
83 | if (cpu_has_clflush) | 82 | cache_flush_page(adr); |
84 | cache_flush_page(adr); | ||
85 | } | 83 | } |
86 | __flush_tlb_all(); | 84 | __flush_tlb_all(); |
87 | } | 85 | } |
@@ -95,7 +93,8 @@ static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */ | |||
95 | 93 | ||
96 | static inline void save_page(struct page *fpage) | 94 | static inline void save_page(struct page *fpage) |
97 | { | 95 | { |
98 | list_add(&fpage->lru, &deferred_pages); | 96 | if (!test_and_set_bit(PG_arch_1, &fpage->flags)) |
97 | list_add(&fpage->lru, &deferred_pages); | ||
99 | } | 98 | } |
100 | 99 | ||
101 | /* | 100 | /* |
@@ -129,9 +128,12 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
129 | pte_t *kpte; | 128 | pte_t *kpte; |
130 | struct page *kpte_page; | 129 | struct page *kpte_page; |
131 | pgprot_t ref_prot2; | 130 | pgprot_t ref_prot2; |
131 | |||
132 | kpte = lookup_address(address); | 132 | kpte = lookup_address(address); |
133 | if (!kpte) return 0; | 133 | if (!kpte) return 0; |
134 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); | 134 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); |
135 | BUG_ON(PageLRU(kpte_page)); | ||
136 | BUG_ON(PageCompound(kpte_page)); | ||
135 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { | 137 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { |
136 | if (!pte_huge(*kpte)) { | 138 | if (!pte_huge(*kpte)) { |
137 | set_pte(kpte, pfn_pte(pfn, prot)); | 139 | set_pte(kpte, pfn_pte(pfn, prot)); |
@@ -159,10 +161,9 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
159 | /* on x86-64 the direct mapping set at boot is not using 4k pages */ | 161 | /* on x86-64 the direct mapping set at boot is not using 4k pages */ |
160 | BUG_ON(PageReserved(kpte_page)); | 162 | BUG_ON(PageReserved(kpte_page)); |
161 | 163 | ||
162 | if (page_private(kpte_page) == 0) { | 164 | save_page(kpte_page); |
163 | save_page(kpte_page); | 165 | if (page_private(kpte_page) == 0) |
164 | revert_page(address, ref_prot); | 166 | revert_page(address, ref_prot); |
165 | } | ||
166 | return 0; | 167 | return 0; |
167 | } | 168 | } |
168 | 169 | ||
@@ -234,6 +235,10 @@ void global_flush_tlb(void) | |||
234 | flush_map(&l); | 235 | flush_map(&l); |
235 | 236 | ||
236 | list_for_each_entry_safe(pg, next, &l, lru) { | 237 | list_for_each_entry_safe(pg, next, &l, lru) { |
238 | list_del(&pg->lru); | ||
239 | clear_bit(PG_arch_1, &pg->flags); | ||
240 | if (page_private(pg) != 0) | ||
241 | continue; | ||
237 | ClearPagePrivate(pg); | 242 | ClearPagePrivate(pg); |
238 | __free_page(pg); | 243 | __free_page(pg); |
239 | } | 244 | } |