aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:05 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:38 -0400
commitae859762332f19bfc06f4c4a1b1fefb41e9e1084 (patch)
tree4f21583bb1441e5555ed199a40d5f679bb4506e9 /mm
parent4294621f41a85497019fae64341aa5351a1921b7 (diff)
[PATCH] mm: batch updating mm_counters
tlb_finish_mmu used to batch zap_pte_range's update of mm rss, which may be worthwhile if the mm is contended, and would reduce atomic operations if the counts were atomic. Let zap_pte_range now batch its updates to file_rss and anon_rss, per page-table in case we drop the lock outside; and copy_pte_range batch them too. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c47
1 files changed, 32 insertions, 15 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 59d42e50fa53..da642b5528fa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -332,6 +332,16 @@ out:
332 return pte_offset_kernel(pmd, address); 332 return pte_offset_kernel(pmd, address);
333} 333}
334 334
335static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
336{
337 if (file_rss)
338 add_mm_counter(mm, file_rss, file_rss);
339 if (anon_rss)
340 add_mm_counter(mm, anon_rss, anon_rss);
341}
342
343#define NO_RSS 2 /* Increment neither file_rss nor anon_rss */
344
335/* 345/*
336 * copy one vm_area from one task to the other. Assumes the page tables 346 * copy one vm_area from one task to the other. Assumes the page tables
337 * already present in the new task to be cleared in the whole range 347 * already present in the new task to be cleared in the whole range
@@ -341,7 +351,7 @@ out:
341 * but may be dropped within p[mg]d_alloc() and pte_alloc_map(). 351 * but may be dropped within p[mg]d_alloc() and pte_alloc_map().
342 */ 352 */
343 353
344static inline void 354static inline int
345copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 355copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
346 pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags, 356 pte_t *dst_pte, pte_t *src_pte, unsigned long vm_flags,
347 unsigned long addr) 357 unsigned long addr)
@@ -349,6 +359,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
349 pte_t pte = *src_pte; 359 pte_t pte = *src_pte;
350 struct page *page; 360 struct page *page;
351 unsigned long pfn; 361 unsigned long pfn;
362 int anon = NO_RSS;
352 363
353 /* pte contains position in swap or file, so copy. */ 364 /* pte contains position in swap or file, so copy. */
354 if (unlikely(!pte_present(pte))) { 365 if (unlikely(!pte_present(pte))) {
@@ -361,8 +372,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
361 spin_unlock(&mmlist_lock); 372 spin_unlock(&mmlist_lock);
362 } 373 }
363 } 374 }
364 set_pte_at(dst_mm, addr, dst_pte, pte); 375 goto out_set_pte;
365 return;
366 } 376 }
367 377
368 pfn = pte_pfn(pte); 378 pfn = pte_pfn(pte);
@@ -375,10 +385,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
375 if (pfn_valid(pfn)) 385 if (pfn_valid(pfn))
376 page = pfn_to_page(pfn); 386 page = pfn_to_page(pfn);
377 387
378 if (!page || PageReserved(page)) { 388 if (!page || PageReserved(page))
379 set_pte_at(dst_mm, addr, dst_pte, pte); 389 goto out_set_pte;
380 return;
381 }
382 390
383 /* 391 /*
384 * If it's a COW mapping, write protect it both 392 * If it's a COW mapping, write protect it both
@@ -397,12 +405,12 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
397 pte = pte_mkclean(pte); 405 pte = pte_mkclean(pte);
398 pte = pte_mkold(pte); 406 pte = pte_mkold(pte);
399 get_page(page); 407 get_page(page);
400 if (PageAnon(page))
401 inc_mm_counter(dst_mm, anon_rss);
402 else
403 inc_mm_counter(dst_mm, file_rss);
404 set_pte_at(dst_mm, addr, dst_pte, pte);
405 page_dup_rmap(page); 408 page_dup_rmap(page);
409 anon = !!PageAnon(page);
410
411out_set_pte:
412 set_pte_at(dst_mm, addr, dst_pte, pte);
413 return anon;
406} 414}
407 415
408static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 416static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
@@ -412,8 +420,10 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
412 pte_t *src_pte, *dst_pte; 420 pte_t *src_pte, *dst_pte;
413 unsigned long vm_flags = vma->vm_flags; 421 unsigned long vm_flags = vma->vm_flags;
414 int progress = 0; 422 int progress = 0;
423 int rss[NO_RSS+1], anon;
415 424
416again: 425again:
426 rss[1] = rss[0] = 0;
417 dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr); 427 dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
418 if (!dst_pte) 428 if (!dst_pte)
419 return -ENOMEM; 429 return -ENOMEM;
@@ -436,13 +446,16 @@ again:
436 progress++; 446 progress++;
437 continue; 447 continue;
438 } 448 }
439 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vm_flags, addr); 449 anon = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
450 vm_flags, addr);
451 rss[anon]++;
440 progress += 8; 452 progress += 8;
441 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 453 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
442 spin_unlock(&src_mm->page_table_lock); 454 spin_unlock(&src_mm->page_table_lock);
443 455
444 pte_unmap_nested(src_pte - 1); 456 pte_unmap_nested(src_pte - 1);
445 pte_unmap(dst_pte - 1); 457 pte_unmap(dst_pte - 1);
458 add_mm_rss(dst_mm, rss[0], rss[1]);
446 cond_resched_lock(&dst_mm->page_table_lock); 459 cond_resched_lock(&dst_mm->page_table_lock);
447 if (addr != end) 460 if (addr != end)
448 goto again; 461 goto again;
@@ -533,6 +546,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
533 struct zap_details *details) 546 struct zap_details *details)
534{ 547{
535 pte_t *pte; 548 pte_t *pte;
549 int file_rss = 0;
550 int anon_rss = 0;
536 551
537 pte = pte_offset_map(pmd, addr); 552 pte = pte_offset_map(pmd, addr);
538 do { 553 do {
@@ -576,13 +591,13 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
576 set_pte_at(tlb->mm, addr, pte, 591 set_pte_at(tlb->mm, addr, pte,
577 pgoff_to_pte(page->index)); 592 pgoff_to_pte(page->index));
578 if (PageAnon(page)) 593 if (PageAnon(page))
579 dec_mm_counter(tlb->mm, anon_rss); 594 anon_rss++;
580 else { 595 else {
581 if (pte_dirty(ptent)) 596 if (pte_dirty(ptent))
582 set_page_dirty(page); 597 set_page_dirty(page);
583 if (pte_young(ptent)) 598 if (pte_young(ptent))
584 mark_page_accessed(page); 599 mark_page_accessed(page);
585 dec_mm_counter(tlb->mm, file_rss); 600 file_rss++;
586 } 601 }
587 page_remove_rmap(page); 602 page_remove_rmap(page);
588 tlb_remove_page(tlb, page); 603 tlb_remove_page(tlb, page);
@@ -598,6 +613,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
598 free_swap_and_cache(pte_to_swp_entry(ptent)); 613 free_swap_and_cache(pte_to_swp_entry(ptent));
599 pte_clear_full(tlb->mm, addr, pte, tlb->fullmm); 614 pte_clear_full(tlb->mm, addr, pte, tlb->fullmm);
600 } while (pte++, addr += PAGE_SIZE, addr != end); 615 } while (pte++, addr += PAGE_SIZE, addr != end);
616
617 add_mm_rss(tlb->mm, -file_rss, -anon_rss);
601 pte_unmap(pte - 1); 618 pte_unmap(pte - 1);
602} 619}
603 620