diff options
author | Eugene Surovegin <ebs@ebshome.net> | 2006-03-28 13:13:12 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-03-28 21:44:15 -0500 |
commit | bab70a4af737f623de5b034976a311055308ab86 (patch) | |
tree | f8c0e9463de01323db2cb1a62f4eb83adb5ce7ca /arch | |
parent | bac30d1a78d0f11c613968fc8b351a91ed465386 (diff) |
[PATCH] lock PTE before updating it in 440/BookE page fault handler
Fix 44x and BookE page fault handler to correctly lock PTE before
trying to pte_update() it, otherwise this PTE might be swapped out
after pte_present() check but before pte_uptdate() call, resulting in
corrupted PTE. This can happen with enabled preemption and low memory
condition.
Signed-off-by: Eugene Surovegin <ebs@ebshome.net>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/mm/fault.c | 30 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 6 | ||||
-rw-r--r-- | arch/ppc/mm/fault.c | 30 | ||||
-rw-r--r-- | arch/ppc/mm/pgtable.c | 6 |
4 files changed, 42 insertions, 30 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index ec4adcb4bc28..5aea0909a5ec 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -267,25 +267,29 @@ good_area: | |||
267 | #endif | 267 | #endif |
268 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 268 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
269 | pte_t *ptep; | 269 | pte_t *ptep; |
270 | pmd_t *pmdp; | ||
270 | 271 | ||
271 | /* Since 4xx/Book-E supports per-page execute permission, | 272 | /* Since 4xx/Book-E supports per-page execute permission, |
272 | * we lazily flush dcache to icache. */ | 273 | * we lazily flush dcache to icache. */ |
273 | ptep = NULL; | 274 | ptep = NULL; |
274 | if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { | 275 | if (get_pteptr(mm, address, &ptep, &pmdp)) { |
275 | struct page *page = pte_page(*ptep); | 276 | spinlock_t *ptl = pte_lockptr(mm, pmdp); |
276 | 277 | spin_lock(ptl); | |
277 | if (! test_bit(PG_arch_1, &page->flags)) { | 278 | if (pte_present(*ptep)) { |
278 | flush_dcache_icache_page(page); | 279 | struct page *page = pte_page(*ptep); |
279 | set_bit(PG_arch_1, &page->flags); | 280 | |
281 | if (!test_bit(PG_arch_1, &page->flags)) { | ||
282 | flush_dcache_icache_page(page); | ||
283 | set_bit(PG_arch_1, &page->flags); | ||
284 | } | ||
285 | pte_update(ptep, 0, _PAGE_HWEXEC); | ||
286 | _tlbie(address); | ||
287 | pte_unmap_unlock(ptep, ptl); | ||
288 | up_read(&mm->mmap_sem); | ||
289 | return 0; | ||
280 | } | 290 | } |
281 | pte_update(ptep, 0, _PAGE_HWEXEC); | 291 | pte_unmap_unlock(ptep, ptl); |
282 | _tlbie(address); | ||
283 | pte_unmap(ptep); | ||
284 | up_read(&mm->mmap_sem); | ||
285 | return 0; | ||
286 | } | 292 | } |
287 | if (ptep != NULL) | ||
288 | pte_unmap(ptep); | ||
289 | #endif | 293 | #endif |
290 | /* a write */ | 294 | /* a write */ |
291 | } else if (is_write) { | 295 | } else if (is_write) { |
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index d296eb6b4545..90628601fac7 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -372,7 +372,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, | |||
372 | * the PTE pointer is unmodified if PTE is not found. | 372 | * the PTE pointer is unmodified if PTE is not found. |
373 | */ | 373 | */ |
374 | int | 374 | int |
375 | get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) | 375 | get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) |
376 | { | 376 | { |
377 | pgd_t *pgd; | 377 | pgd_t *pgd; |
378 | pmd_t *pmd; | 378 | pmd_t *pmd; |
@@ -387,6 +387,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) | |||
387 | if (pte) { | 387 | if (pte) { |
388 | retval = 1; | 388 | retval = 1; |
389 | *ptep = pte; | 389 | *ptep = pte; |
390 | if (pmdp) | ||
391 | *pmdp = pmd; | ||
390 | /* XXX caller needs to do pte_unmap, yuck */ | 392 | /* XXX caller needs to do pte_unmap, yuck */ |
391 | } | 393 | } |
392 | } | 394 | } |
@@ -424,7 +426,7 @@ unsigned long iopa(unsigned long addr) | |||
424 | mm = &init_mm; | 426 | mm = &init_mm; |
425 | 427 | ||
426 | pa = 0; | 428 | pa = 0; |
427 | if (get_pteptr(mm, addr, &pte)) { | 429 | if (get_pteptr(mm, addr, &pte, NULL)) { |
428 | pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); | 430 | pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); |
429 | pte_unmap(pte); | 431 | pte_unmap(pte); |
430 | } | 432 | } |
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c index 0217188ef465..8e08ca32531a 100644 --- a/arch/ppc/mm/fault.c +++ b/arch/ppc/mm/fault.c | |||
@@ -202,6 +202,7 @@ good_area: | |||
202 | /* an exec - 4xx/Book-E allows for per-page execute permission */ | 202 | /* an exec - 4xx/Book-E allows for per-page execute permission */ |
203 | } else if (TRAP(regs) == 0x400) { | 203 | } else if (TRAP(regs) == 0x400) { |
204 | pte_t *ptep; | 204 | pte_t *ptep; |
205 | pmd_t *pmdp; | ||
205 | 206 | ||
206 | #if 0 | 207 | #if 0 |
207 | /* It would be nice to actually enforce the VM execute | 208 | /* It would be nice to actually enforce the VM execute |
@@ -215,21 +216,24 @@ good_area: | |||
215 | /* Since 4xx/Book-E supports per-page execute permission, | 216 | /* Since 4xx/Book-E supports per-page execute permission, |
216 | * we lazily flush dcache to icache. */ | 217 | * we lazily flush dcache to icache. */ |
217 | ptep = NULL; | 218 | ptep = NULL; |
218 | if (get_pteptr(mm, address, &ptep) && pte_present(*ptep)) { | 219 | if (get_pteptr(mm, address, &ptep, &pmdp)) { |
219 | struct page *page = pte_page(*ptep); | 220 | spinlock_t *ptl = pte_lockptr(mm, pmdp); |
220 | 221 | spin_lock(ptl); | |
221 | if (! test_bit(PG_arch_1, &page->flags)) { | 222 | if (pte_present(*ptep)) { |
222 | flush_dcache_icache_page(page); | 223 | struct page *page = pte_page(*ptep); |
223 | set_bit(PG_arch_1, &page->flags); | 224 | |
225 | if (!test_bit(PG_arch_1, &page->flags)) { | ||
226 | flush_dcache_icache_page(page); | ||
227 | set_bit(PG_arch_1, &page->flags); | ||
228 | } | ||
229 | pte_update(ptep, 0, _PAGE_HWEXEC); | ||
230 | _tlbie(address); | ||
231 | pte_unmap_unlock(ptep, ptl); | ||
232 | up_read(&mm->mmap_sem); | ||
233 | return 0; | ||
224 | } | 234 | } |
225 | pte_update(ptep, 0, _PAGE_HWEXEC); | 235 | pte_unmap_unlock(ptep, ptl); |
226 | _tlbie(address); | ||
227 | pte_unmap(ptep); | ||
228 | up_read(&mm->mmap_sem); | ||
229 | return 0; | ||
230 | } | 236 | } |
231 | if (ptep != NULL) | ||
232 | pte_unmap(ptep); | ||
233 | #endif | 237 | #endif |
234 | /* a read */ | 238 | /* a read */ |
235 | } else { | 239 | } else { |
diff --git a/arch/ppc/mm/pgtable.c b/arch/ppc/mm/pgtable.c index a1924876cad6..706bca8eb144 100644 --- a/arch/ppc/mm/pgtable.c +++ b/arch/ppc/mm/pgtable.c | |||
@@ -368,7 +368,7 @@ void __init io_block_mapping(unsigned long virt, phys_addr_t phys, | |||
368 | * the PTE pointer is unmodified if PTE is not found. | 368 | * the PTE pointer is unmodified if PTE is not found. |
369 | */ | 369 | */ |
370 | int | 370 | int |
371 | get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) | 371 | get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp) |
372 | { | 372 | { |
373 | pgd_t *pgd; | 373 | pgd_t *pgd; |
374 | pmd_t *pmd; | 374 | pmd_t *pmd; |
@@ -383,6 +383,8 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep) | |||
383 | if (pte) { | 383 | if (pte) { |
384 | retval = 1; | 384 | retval = 1; |
385 | *ptep = pte; | 385 | *ptep = pte; |
386 | if (pmdp) | ||
387 | *pmdp = pmd; | ||
386 | /* XXX caller needs to do pte_unmap, yuck */ | 388 | /* XXX caller needs to do pte_unmap, yuck */ |
387 | } | 389 | } |
388 | } | 390 | } |
@@ -420,7 +422,7 @@ unsigned long iopa(unsigned long addr) | |||
420 | mm = &init_mm; | 422 | mm = &init_mm; |
421 | 423 | ||
422 | pa = 0; | 424 | pa = 0; |
423 | if (get_pteptr(mm, addr, &pte)) { | 425 | if (get_pteptr(mm, addr, &pte, NULL)) { |
424 | pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); | 426 | pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK); |
425 | pte_unmap(pte); | 427 | pte_unmap(pte); |
426 | } | 428 | } |