diff options
author | Mika Westerberg <mika.westerberg@iki.fi> | 2010-10-28 06:45:22 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-10-28 08:53:47 -0400 |
commit | 4e54d93d3c9846ba1c2644ad06463dafa690d1b7 (patch) | |
tree | 7d692f758a87fdc7d97caa3432542d08075eb427 /arch/arm/mm/fault-armv.c | |
parent | 4e929d2bcf13eeaa9636448c55690b383a910391 (diff) |
ARM: 6464/2: fix spinlock recursion in adjust_pte()
When running following code in a machine which has VIVT caches and
USE_SPLIT_PTLOCKS is not defined:
fd = open("/etc/passwd", O_RDONLY);
addr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
addr2 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
v = *((int *)addr);
we will hang in spinlock recursion in the page fault handler:
BUG: spinlock recursion on CPU#0, mmap_test/717
lock: c5e295d8, .magic: dead4ead, .owner: mmap_test/717,
.owner_cpu: 0
[<c0026604>] (unwind_backtrace+0x0/0xec)
[<c014ee48>] (do_raw_spin_lock+0x40/0x140)
[<c0027f68>] (update_mmu_cache+0x208/0x250)
[<c0079db4>] (__do_fault+0x320/0x3ec)
[<c007af7c>] (handle_mm_fault+0x2f0/0x6d8)
[<c0027834>] (do_page_fault+0xdc/0x1cc)
[<c00202d0>] (do_DataAbort+0x34/0x94)
This comes from the fact that when USE_SPLIT_PTLOCKS is not defined,
the only lock protecting the page tables is mm->page_table_lock
which is already locked before update_mmu_cache() is called.
Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r-- | arch/arm/mm/fault-armv.c | 28 |
1 files changed, 26 insertions, 2 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index c493d7244d3d..83e59f870426 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -66,6 +66,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
66 | return ret; | 66 | return ret; |
67 | } | 67 | } |
68 | 68 | ||
69 | #if USE_SPLIT_PTLOCKS | ||
70 | /* | ||
71 | * If we are using split PTE locks, then we need to take the page | ||
72 | * lock here. Otherwise we are using shared mm->page_table_lock | ||
73 | * which is already locked, thus cannot take it. | ||
74 | */ | ||
75 | static inline void do_pte_lock(spinlock_t *ptl) | ||
76 | { | ||
77 | /* | ||
78 | * Use nested version here to indicate that we are already | ||
79 | * holding one similar spinlock. | ||
80 | */ | ||
81 | spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); | ||
82 | } | ||
83 | |||
84 | static inline void do_pte_unlock(spinlock_t *ptl) | ||
85 | { | ||
86 | spin_unlock(ptl); | ||
87 | } | ||
88 | #else /* !USE_SPLIT_PTLOCKS */ | ||
89 | static inline void do_pte_lock(spinlock_t *ptl) {} | ||
90 | static inline void do_pte_unlock(spinlock_t *ptl) {} | ||
91 | #endif /* USE_SPLIT_PTLOCKS */ | ||
92 | |||
69 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | 93 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, |
70 | unsigned long pfn) | 94 | unsigned long pfn) |
71 | { | 95 | { |
@@ -90,11 +114,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
90 | */ | 114 | */ |
91 | ptl = pte_lockptr(vma->vm_mm, pmd); | 115 | ptl = pte_lockptr(vma->vm_mm, pmd); |
92 | pte = pte_offset_map(pmd, address); | 116 | pte = pte_offset_map(pmd, address); |
93 | spin_lock(ptl); | 117 | do_pte_lock(ptl); |
94 | 118 | ||
95 | ret = do_adjust_pte(vma, address, pfn, pte); | 119 | ret = do_adjust_pte(vma, address, pfn, pte); |
96 | 120 | ||
97 | spin_unlock(ptl); | 121 | do_pte_unlock(ptl); |
98 | pte_unmap(pte); | 122 | pte_unmap(pte); |
99 | 123 | ||
100 | return ret; | 124 | return ret; |