aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:23 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:40 -0400
commitc74df32c724a1652ad8399b4891bb02c9d43743a (patch)
tree5a79d56fdcf7dc2053a277dbf6db7c3b339e9659 /include/linux
parent1bb3630e89cb8a7b3d3807629c20c5bad88290ff (diff)
[PATCH] mm: ptd_alloc take ptlock
Second step in pushing down the page_table_lock. Remove the temporary bridging hack from __pud_alloc, __pmd_alloc, __pte_alloc: expect callers not to hold page_table_lock, whether it's on init_mm or a user mm; take page_table_lock internally to check if a racing task already allocated. Convert their callers from common code. But avoid coming back to change them again later: instead of moving the spin_lock(&mm->page_table_lock) down, switch over to new macros pte_alloc_map_lock and pte_unmap_unlock, which encapsulate the mapping+locking and unlocking+unmapping together, and in the end may use alternatives to the mm page_table_lock itself. These callers all hold mmap_sem (some exclusively, some not), so at no level can a page table be whipped away from beneath them; and pte_alloc uses the "atomic" pmd_present to test whether it needs to allocate. It appears that on all arches we can safely descend without page_table_lock. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm.h18
1 files changed, 18 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 22c2d6922c0e..d4c3512e7db4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -779,10 +779,28 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
779} 779}
780#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ 780#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
781 781
782#define pte_offset_map_lock(mm, pmd, address, ptlp) \
783({ \
784 spinlock_t *__ptl = &(mm)->page_table_lock; \
785 pte_t *__pte = pte_offset_map(pmd, address); \
786 *(ptlp) = __ptl; \
787 spin_lock(__ptl); \
788 __pte; \
789})
790
791#define pte_unmap_unlock(pte, ptl) do { \
792 spin_unlock(ptl); \
793 pte_unmap(pte); \
794} while (0)
795
782#define pte_alloc_map(mm, pmd, address) \ 796#define pte_alloc_map(mm, pmd, address) \
783 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ 797 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
784 NULL: pte_offset_map(pmd, address)) 798 NULL: pte_offset_map(pmd, address))
785 799
800#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
801 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
802 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
803
786#define pte_alloc_kernel(pmd, address) \ 804#define pte_alloc_kernel(pmd, address) \
787 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ 805 ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
788 NULL: pte_offset_kernel(pmd, address)) 806 NULL: pte_offset_kernel(pmd, address))