diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:15:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:36 -0400 |
commit | e040f218bb49a6965a5b77edce05fe47a62dda39 (patch) | |
tree | e5547e04fa312f1b87db66e4ac2b6eed885ca276 /mm/memory.c | |
parent | 09ad4bbc3a5c93316d7f4ffc0c310d9cbb28c2f0 (diff) |
[PATCH] mm: copy_pte_range progress fix
My latency breaking in copy_pte_range didn't work as intended: instead of
checking at regularish intervals, after the first interval it checked every
time around the loop, too impatient to be preempted. Fix that.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c index 1db40e935e55..222c13e46130 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -410,7 +410,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
410 | { | 410 | { |
411 | pte_t *src_pte, *dst_pte; | 411 | pte_t *src_pte, *dst_pte; |
412 | unsigned long vm_flags = vma->vm_flags; | 412 | unsigned long vm_flags = vma->vm_flags; |
413 | int progress; | 413 | int progress = 0; |
414 | 414 | ||
415 | again: | 415 | again: |
416 | dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr); | 416 | dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr); |
@@ -418,17 +418,19 @@ again: | |||
418 | return -ENOMEM; | 418 | return -ENOMEM; |
419 | src_pte = pte_offset_map_nested(src_pmd, addr); | 419 | src_pte = pte_offset_map_nested(src_pmd, addr); |
420 | 420 | ||
421 | progress = 0; | ||
422 | spin_lock(&src_mm->page_table_lock); | 421 | spin_lock(&src_mm->page_table_lock); |
423 | do { | 422 | do { |
424 | /* | 423 | /* |
425 | * We are holding two locks at this point - either of them | 424 | * We are holding two locks at this point - either of them |
426 | * could generate latencies in another task on another CPU. | 425 | * could generate latencies in another task on another CPU. |
427 | */ | 426 | */ |
428 | if (progress >= 32 && (need_resched() || | 427 | if (progress >= 32) { |
429 | need_lockbreak(&src_mm->page_table_lock) || | 428 | progress = 0; |
430 | need_lockbreak(&dst_mm->page_table_lock))) | 429 | if (need_resched() || |
431 | break; | 430 | need_lockbreak(&src_mm->page_table_lock) || |
431 | need_lockbreak(&dst_mm->page_table_lock)) | ||
432 | break; | ||
433 | } | ||
432 | if (pte_none(*src_pte)) { | 434 | if (pte_none(*src_pte)) { |
433 | progress++; | 435 | progress++; |
434 | continue; | 436 | continue; |