diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:23 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:40 -0400 |
commit | c74df32c724a1652ad8399b4891bb02c9d43743a (patch) | |
tree | 5a79d56fdcf7dc2053a277dbf6db7c3b339e9659 /fs/exec.c | |
parent | 1bb3630e89cb8a7b3d3807629c20c5bad88290ff (diff) |
[PATCH] mm: ptd_alloc take ptlock
Second step in pushing down the page_table_lock. Remove the temporary
bridging hack from __pud_alloc, __pmd_alloc, __pte_alloc: expect callers not
to hold page_table_lock, whether it's on init_mm or a user mm; take
page_table_lock internally to check if a racing task already allocated.
Convert their callers from common code. But avoid coming back to change them
again later: instead of moving the spin_lock(&mm->page_table_lock) down,
switch over to new macros pte_alloc_map_lock and pte_unmap_unlock, which
encapsulate the mapping+locking and unlocking+unmapping together, and in the
end may use alternatives to the mm page_table_lock itself.
These callers all hold mmap_sem (some exclusively, some not), so at no level
can a page table be whipped away from beneath them; and pte_alloc uses the
"atomic" pmd_present to test whether it needs to allocate. It appears that on
all arches we can safely descend without page_table_lock.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/exec.c')
-rw-r--r-- | fs/exec.c | 14 |
1 files changed, 5 insertions, 9 deletions
@@ -309,25 +309,24 @@ void install_arg_page(struct vm_area_struct *vma, | |||
309 | pud_t * pud; | 309 | pud_t * pud; |
310 | pmd_t * pmd; | 310 | pmd_t * pmd; |
311 | pte_t * pte; | 311 | pte_t * pte; |
312 | spinlock_t *ptl; | ||
312 | 313 | ||
313 | if (unlikely(anon_vma_prepare(vma))) | 314 | if (unlikely(anon_vma_prepare(vma))) |
314 | goto out_sig; | 315 | goto out; |
315 | 316 | ||
316 | flush_dcache_page(page); | 317 | flush_dcache_page(page); |
317 | pgd = pgd_offset(mm, address); | 318 | pgd = pgd_offset(mm, address); |
318 | |||
319 | spin_lock(&mm->page_table_lock); | ||
320 | pud = pud_alloc(mm, pgd, address); | 319 | pud = pud_alloc(mm, pgd, address); |
321 | if (!pud) | 320 | if (!pud) |
322 | goto out; | 321 | goto out; |
323 | pmd = pmd_alloc(mm, pud, address); | 322 | pmd = pmd_alloc(mm, pud, address); |
324 | if (!pmd) | 323 | if (!pmd) |
325 | goto out; | 324 | goto out; |
326 | pte = pte_alloc_map(mm, pmd, address); | 325 | pte = pte_alloc_map_lock(mm, pmd, address, &ptl); |
327 | if (!pte) | 326 | if (!pte) |
328 | goto out; | 327 | goto out; |
329 | if (!pte_none(*pte)) { | 328 | if (!pte_none(*pte)) { |
330 | pte_unmap(pte); | 329 | pte_unmap_unlock(pte, ptl); |
331 | goto out; | 330 | goto out; |
332 | } | 331 | } |
333 | inc_mm_counter(mm, anon_rss); | 332 | inc_mm_counter(mm, anon_rss); |
@@ -335,14 +334,11 @@ void install_arg_page(struct vm_area_struct *vma, | |||
335 | set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( | 334 | set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( |
336 | page, vma->vm_page_prot)))); | 335 | page, vma->vm_page_prot)))); |
337 | page_add_anon_rmap(page, vma, address); | 336 | page_add_anon_rmap(page, vma, address); |
338 | pte_unmap(pte); | 337 | pte_unmap_unlock(pte, ptl); |
339 | spin_unlock(&mm->page_table_lock); | ||
340 | 338 | ||
341 | /* no need for flush_tlb */ | 339 | /* no need for flush_tlb */ |
342 | return; | 340 | return; |
343 | out: | 341 | out: |
344 | spin_unlock(&mm->page_table_lock); | ||
345 | out_sig: | ||
346 | __free_page(page); | 342 | __free_page(page); |
347 | force_sig(SIGKILL, current); | 343 | force_sig(SIGKILL, current); |
348 | } | 344 | } |