diff options
author | Andi Kleen <ak@suse.de> | 2007-07-21 11:09:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-21 21:37:07 -0400 |
commit | 65d2f0bc65b0249a22a6c1f49ec29ee5e2980c7b (patch) | |
tree | 19204282146d83bd8a1eb6d0f816dd15421afd8c | |
parent | ad386589676403eca4e8f52c944995db56e445c3 (diff) |
x86: Always flush pages in change_page_attr
Fix a bug introduced with the CLFLUSH changes: we must always flush pages
changed in cpa(), not just when they are reverted.
Reenable CLFLUSH usage with that now (it was temporarily disabled
for .22)
Add some BUG_ONs
Contains fixes from Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/i386/mm/pageattr.c | 20 | ||||
-rw-r--r-- | arch/x86_64/mm/pageattr.c | 23 |
2 files changed, 31 insertions, 12 deletions
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c index 37992ffb1633..8927222b3ab2 100644 --- a/arch/i386/mm/pageattr.c +++ b/arch/i386/mm/pageattr.c | |||
@@ -82,7 +82,7 @@ static void flush_kernel_map(void *arg) | |||
82 | struct page *p; | 82 | struct page *p; |
83 | 83 | ||
84 | /* High level code is not ready for clflush yet */ | 84 | /* High level code is not ready for clflush yet */ |
85 | if (0 && cpu_has_clflush) { | 85 | if (cpu_has_clflush) { |
86 | list_for_each_entry (p, lh, lru) | 86 | list_for_each_entry (p, lh, lru) |
87 | cache_flush_page(p); | 87 | cache_flush_page(p); |
88 | } else if (boot_cpu_data.x86_model >= 4) | 88 | } else if (boot_cpu_data.x86_model >= 4) |
@@ -136,6 +136,12 @@ static inline void revert_page(struct page *kpte_page, unsigned long address) | |||
136 | ref_prot)); | 136 | ref_prot)); |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline void save_page(struct page *kpte_page) | ||
140 | { | ||
141 | if (!test_and_set_bit(PG_arch_1, &kpte_page->flags)) | ||
142 | list_add(&kpte_page->lru, &df_list); | ||
143 | } | ||
144 | |||
139 | static int | 145 | static int |
140 | __change_page_attr(struct page *page, pgprot_t prot) | 146 | __change_page_attr(struct page *page, pgprot_t prot) |
141 | { | 147 | { |
@@ -150,6 +156,9 @@ __change_page_attr(struct page *page, pgprot_t prot) | |||
150 | if (!kpte) | 156 | if (!kpte) |
151 | return -EINVAL; | 157 | return -EINVAL; |
152 | kpte_page = virt_to_page(kpte); | 158 | kpte_page = virt_to_page(kpte); |
159 | BUG_ON(PageLRU(kpte_page)); | ||
160 | BUG_ON(PageCompound(kpte_page)); | ||
161 | |||
153 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { | 162 | if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { |
154 | if (!pte_huge(*kpte)) { | 163 | if (!pte_huge(*kpte)) { |
155 | set_pte_atomic(kpte, mk_pte(page, prot)); | 164 | set_pte_atomic(kpte, mk_pte(page, prot)); |
@@ -179,11 +188,11 @@ __change_page_attr(struct page *page, pgprot_t prot) | |||
179 | * time (not via split_large_page) and in turn we must not | 188 | * time (not via split_large_page) and in turn we must not |
180 | * replace it with a largepage. | 189 | * replace it with a largepage. |
181 | */ | 190 | */ |
191 | |||
192 | save_page(kpte_page); | ||
182 | if (!PageReserved(kpte_page)) { | 193 | if (!PageReserved(kpte_page)) { |
183 | if (cpu_has_pse && (page_private(kpte_page) == 0)) { | 194 | if (cpu_has_pse && (page_private(kpte_page) == 0)) { |
184 | ClearPagePrivate(kpte_page); | ||
185 | paravirt_release_pt(page_to_pfn(kpte_page)); | 195 | paravirt_release_pt(page_to_pfn(kpte_page)); |
186 | list_add(&kpte_page->lru, &df_list); | ||
187 | revert_page(kpte_page, address); | 196 | revert_page(kpte_page, address); |
188 | } | 197 | } |
189 | } | 198 | } |
@@ -236,6 +245,11 @@ void global_flush_tlb(void) | |||
236 | spin_unlock_irq(&cpa_lock); | 245 | spin_unlock_irq(&cpa_lock); |
237 | flush_map(&l); | 246 | flush_map(&l); |
238 | list_for_each_entry_safe(pg, next, &l, lru) { | 247 | list_for_each_entry_safe(pg, next, &l, lru) { |
248 | list_del(&pg->lru); | ||
249 | clear_bit(PG_arch_1, &pg->flags); | ||
250 | if (PageReserved(pg) || !cpu_has_pse || page_private(pg) != 0) | ||
251 | continue; | ||
252 | ClearPagePrivate(pg); | ||
239 | __free_page(pg); | 253 | __free_page(pg); |
240 | } | 254 | } |
241 | } | 255 | } |
diff --git a/arch/x86_64/mm/pageattr.c b/arch/x86_64/mm/pageattr.c index 9148f4a4cec6..36377b6b8efe 100644 --- a/arch/x86_64/mm/pageattr.c +++ b/arch/x86_64/mm/pageattr.c | |||
@@ -74,14 +74,12 @@ static void flush_kernel_map(void *arg) | |||
74 | struct page *pg; | 74 | struct page *pg; |
75 | 75 | ||
76 | /* When clflush is available always use it because it is | 76 | /* When clflush is available always use it because it is |
77 | much cheaper than WBINVD. Disable clflush for now because | 77 | much cheaper than WBINVD. */ |
78 | the high level code is not ready yet */ | 78 | if (!cpu_has_clflush) |
79 | if (1 || !cpu_has_clflush) | ||
80 | asm volatile("wbinvd" ::: "memory"); | 79 | asm volatile("wbinvd" ::: "memory"); |
81 | else list_for_each_entry(pg, l, lru) { | 80 | else list_for_each_entry(pg, l, lru) { |
82 | void *adr = page_address(pg); | 81 | void *adr = page_address(pg); |
83 | if (cpu_has_clflush) | 82 | cache_flush_page(adr); |
84 | cache_flush_page(adr); | ||
85 | } | 83 | } |
86 | __flush_tlb_all(); | 84 | __flush_tlb_all(); |
87 | } | 85 | } |
@@ -95,7 +93,8 @@ static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */ | |||
95 | 93 | ||
96 | static inline void save_page(struct page *fpage) | 94 | static inline void save_page(struct page *fpage) |
97 | { | 95 | { |
98 | list_add(&fpage->lru, &deferred_pages); | 96 | if (!test_and_set_bit(PG_arch_1, &fpage->flags)) |
97 | list_add(&fpage->lru, &deferred_pages); | ||
99 | } | 98 | } |
100 | 99 | ||
101 | /* | 100 | /* |
@@ -129,9 +128,12 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
129 | pte_t *kpte; | 128 | pte_t *kpte; |
130 | struct page *kpte_page; | 129 | struct page *kpte_page; |
131 | pgprot_t ref_prot2; | 130 | pgprot_t ref_prot2; |
131 | |||
132 | kpte = lookup_address(address); | 132 | kpte = lookup_address(address); |
133 | if (!kpte) return 0; | 133 | if (!kpte) return 0; |
134 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); | 134 | kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); |
135 | BUG_ON(PageLRU(kpte_page)); | ||
136 | BUG_ON(PageCompound(kpte_page)); | ||
135 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { | 137 | if (pgprot_val(prot) != pgprot_val(ref_prot)) { |
136 | if (!pte_huge(*kpte)) { | 138 | if (!pte_huge(*kpte)) { |
137 | set_pte(kpte, pfn_pte(pfn, prot)); | 139 | set_pte(kpte, pfn_pte(pfn, prot)); |
@@ -159,10 +161,9 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, | |||
159 | /* on x86-64 the direct mapping set at boot is not using 4k pages */ | 161 | /* on x86-64 the direct mapping set at boot is not using 4k pages */ |
160 | BUG_ON(PageReserved(kpte_page)); | 162 | BUG_ON(PageReserved(kpte_page)); |
161 | 163 | ||
162 | if (page_private(kpte_page) == 0) { | 164 | save_page(kpte_page); |
163 | save_page(kpte_page); | 165 | if (page_private(kpte_page) == 0) |
164 | revert_page(address, ref_prot); | 166 | revert_page(address, ref_prot); |
165 | } | ||
166 | return 0; | 167 | return 0; |
167 | } | 168 | } |
168 | 169 | ||
@@ -234,6 +235,10 @@ void global_flush_tlb(void) | |||
234 | flush_map(&l); | 235 | flush_map(&l); |
235 | 236 | ||
236 | list_for_each_entry_safe(pg, next, &l, lru) { | 237 | list_for_each_entry_safe(pg, next, &l, lru) { |
238 | list_del(&pg->lru); | ||
239 | clear_bit(PG_arch_1, &pg->flags); | ||
240 | if (page_private(pg) != 0) | ||
241 | continue; | ||
237 | ClearPagePrivate(pg); | 242 | ClearPagePrivate(pg); |
238 | __free_page(pg); | 243 | __free_page(pg); |
239 | } | 244 | } |