diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:42 -0400 |
commit | 4c21e2f2441dc5fbb957b030333f5a3f2d02dea7 (patch) | |
tree | 1f76d33bb1d76221c6424bc5fed080a4f91349a6 /mm/mremap.c | |
parent | b38c6845b695141259019e2b7c0fe6c32a6e720d (diff) |
[PATCH] mm: split page table lock
Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with
a many-threaded application which concurrently initializes different parts of
a large anonymous area.
This patch corrects that, by using a separate spinlock per page table page, to
guard the page table entries in that page, instead of using the mm's single
page_table_lock. (But even then, page_table_lock is still used to guard page
table allocation, and anon_vma allocation.)
In this implementation, the spinlock is tucked inside the struct page of the
page table page: with a BUILD_BUG_ON in case it overflows - which it would in
the case of 32-bit PA-RISC with spinlock debugging enabled.
Splitting the lock is not quite for free: another cacheline access. Ideally,
I suppose we would use split ptlock only for multi-threaded processes on
multi-cpu machines; but deciding that dynamically would have its own costs.
So for now enable it by config, at some number of cpus - since the Kconfig
language doesn't support inequalities, let preprocessor compare that with
NR_CPUS. But I don't think it's worth being user-configurable: for good
testing of both split and unsplit configs, split now at 4 cpus, and perhaps
change that to 8 later.
There is a benefit even for singly threaded processes: kswapd can be attacking
one part of the mm while another part is busy faulting.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mremap.c')
-rw-r--r-- | mm/mremap.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/mm/mremap.c b/mm/mremap.c index 8de77b632a20..b535438c363c 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -72,7 +72,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
72 | struct address_space *mapping = NULL; | 72 | struct address_space *mapping = NULL; |
73 | struct mm_struct *mm = vma->vm_mm; | 73 | struct mm_struct *mm = vma->vm_mm; |
74 | pte_t *old_pte, *new_pte, pte; | 74 | pte_t *old_pte, *new_pte, pte; |
75 | spinlock_t *old_ptl; | 75 | spinlock_t *old_ptl, *new_ptl; |
76 | 76 | ||
77 | if (vma->vm_file) { | 77 | if (vma->vm_file) { |
78 | /* | 78 | /* |
@@ -88,8 +88,15 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
88 | new_vma->vm_truncate_count = 0; | 88 | new_vma->vm_truncate_count = 0; |
89 | } | 89 | } |
90 | 90 | ||
91 | /* | ||
92 | * We don't have to worry about the ordering of src and dst | ||
93 | * pte locks because exclusive mmap_sem prevents deadlock. | ||
94 | */ | ||
91 | old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); | 95 | old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); |
92 | new_pte = pte_offset_map_nested(new_pmd, new_addr); | 96 | new_pte = pte_offset_map_nested(new_pmd, new_addr); |
97 | new_ptl = pte_lockptr(mm, new_pmd); | ||
98 | if (new_ptl != old_ptl) | ||
99 | spin_lock(new_ptl); | ||
93 | 100 | ||
94 | for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, | 101 | for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, |
95 | new_pte++, new_addr += PAGE_SIZE) { | 102 | new_pte++, new_addr += PAGE_SIZE) { |
@@ -101,6 +108,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
101 | set_pte_at(mm, new_addr, new_pte, pte); | 108 | set_pte_at(mm, new_addr, new_pte, pte); |
102 | } | 109 | } |
103 | 110 | ||
111 | if (new_ptl != old_ptl) | ||
112 | spin_unlock(new_ptl); | ||
104 | pte_unmap_nested(new_pte - 1); | 113 | pte_unmap_nested(new_pte - 1); |
105 | pte_unmap_unlock(old_pte - 1, old_ptl); | 114 | pte_unmap_unlock(old_pte - 1, old_ptl); |
106 | if (mapping) | 115 | if (mapping) |