aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2005-07-27 14:43:54 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:25:53 -0400
commit1aaf18ff9de1f37bf674236fc0779c3aaa65b998 (patch)
tree53df4d3dd0fbd81b79d5cdb63cf0d11853307a6b /mm/memory.c
parent0cfc11ed45e4c00750039e5a18c0fc0d681e19db (diff)
[PATCH] check_user_page_readable() deadlock fix
Fix bug identifued by Richard Purdie <rpurdie@rpsys.net>. oprofile calls check_user_page_readable() from interrupt context, so we deadlock over various VFS locks. But check_user_page_readable() doesn't imply either a read or a write of the page's contents. Change __follow_page() so that check_user_page_readable() can tell __follow_page() that we're not accessing the page's contents, and use that info to avoid the troublesome lock-takings. Also, make follow_page() inline for the single callsite in memory.c to save a bit of stack space. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index beabdefa6254..6fe77acbc1cd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -776,8 +776,8 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
776 * Do a quick page-table lookup for a single page. 776 * Do a quick page-table lookup for a single page.
777 * mm->page_table_lock must be held. 777 * mm->page_table_lock must be held.
778 */ 778 */
779static struct page * 779static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
780__follow_page(struct mm_struct *mm, unsigned long address, int read, int write) 780 int read, int write, int accessed)
781{ 781{
782 pgd_t *pgd; 782 pgd_t *pgd;
783 pud_t *pud; 783 pud_t *pud;
@@ -818,9 +818,11 @@ __follow_page(struct mm_struct *mm, unsigned long address, int read, int write)
818 pfn = pte_pfn(pte); 818 pfn = pte_pfn(pte);
819 if (pfn_valid(pfn)) { 819 if (pfn_valid(pfn)) {
820 page = pfn_to_page(pfn); 820 page = pfn_to_page(pfn);
821 if (write && !pte_dirty(pte) && !PageDirty(page)) 821 if (accessed) {
822 set_page_dirty(page); 822 if (write && !pte_dirty(pte) &&!PageDirty(page))
823 mark_page_accessed(page); 823 set_page_dirty(page);
824 mark_page_accessed(page);
825 }
824 return page; 826 return page;
825 } 827 }
826 } 828 }
@@ -829,16 +831,19 @@ out:
829 return NULL; 831 return NULL;
830} 832}
831 833
832struct page * 834inline struct page *
833follow_page(struct mm_struct *mm, unsigned long address, int write) 835follow_page(struct mm_struct *mm, unsigned long address, int write)
834{ 836{
835 return __follow_page(mm, address, /*read*/0, write); 837 return __follow_page(mm, address, 0, write, 1);
836} 838}
837 839
838int 840/*
839check_user_page_readable(struct mm_struct *mm, unsigned long address) 841 * check_user_page_readable() can be called frm niterrupt context by oprofile,
842 * so we need to avoid taking any non-irq-safe locks
843 */
844int check_user_page_readable(struct mm_struct *mm, unsigned long address)
840{ 845{
841 return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL; 846 return __follow_page(mm, address, 1, 0, 0) != NULL;
842} 847}
843EXPORT_SYMBOL(check_user_page_readable); 848EXPORT_SYMBOL(check_user_page_readable);
844 849