diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-04-19 16:29:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org.(none)> | 2005-04-19 16:29:15 -0400 |
commit | ee39b37b23da0b6ec53a8ebe90ff41c016f8ae27 (patch) | |
tree | 4af606913ab8f95551623b788c0c66c1f5902229 /mm | |
parent | e0da382c92626ad1d7f4b7527d19b80104d67a83 (diff) |
[PATCH] freepgt: remove MM_VM_SIZE(mm)
There's only one usage of MM_VM_SIZE(mm) left, and it's a troublesome macro
because mm doesn't contain the (32-bit emulation?) info needed. But it too is
only needed because we ignore the end from the vma list.
We could make flush_pgtables return that end, or unmap_vmas. Choose the
latter, since it's a natural fit with unmap_mapping_range_vma needing to know
its restart addr. This does make more than minimal change, but if unmap_vmas
had returned the end before, this is how we'd have done it, rather than
storing the break_addr in zap_details.
unmap_vmas used to return count of vmas scanned, but that's just debug which
hasn't been useful in a while; and if we want the map_count 0 on exit check
back, it can easily come from the final remove_vm_struct loop.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 28 | ||||
-rw-r--r-- | mm/mmap.c | 6 |
2 files changed, 16 insertions, 18 deletions
diff --git a/mm/memory.c b/mm/memory.c index fee5dc8fc36c..854bd90eeca1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -645,7 +645,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
645 | * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here | 645 | * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here |
646 | * @details: details of nonlinear truncation or shared cache invalidation | 646 | * @details: details of nonlinear truncation or shared cache invalidation |
647 | * | 647 | * |
648 | * Returns the number of vma's which were covered by the unmapping. | 648 | * Returns the end address of the unmapping (restart addr if interrupted). |
649 | * | 649 | * |
650 | * Unmap all pages in the vma list. Called under page_table_lock. | 650 | * Unmap all pages in the vma list. Called under page_table_lock. |
651 | * | 651 | * |
@@ -662,7 +662,7 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
662 | * ensure that any thus-far unmapped pages are flushed before unmap_vmas() | 662 | * ensure that any thus-far unmapped pages are flushed before unmap_vmas() |
663 | * drops the lock and schedules. | 663 | * drops the lock and schedules. |
664 | */ | 664 | */ |
665 | int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, | 665 | unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, |
666 | struct vm_area_struct *vma, unsigned long start_addr, | 666 | struct vm_area_struct *vma, unsigned long start_addr, |
667 | unsigned long end_addr, unsigned long *nr_accounted, | 667 | unsigned long end_addr, unsigned long *nr_accounted, |
668 | struct zap_details *details) | 668 | struct zap_details *details) |
@@ -670,12 +670,11 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, | |||
670 | unsigned long zap_bytes = ZAP_BLOCK_SIZE; | 670 | unsigned long zap_bytes = ZAP_BLOCK_SIZE; |
671 | unsigned long tlb_start = 0; /* For tlb_finish_mmu */ | 671 | unsigned long tlb_start = 0; /* For tlb_finish_mmu */ |
672 | int tlb_start_valid = 0; | 672 | int tlb_start_valid = 0; |
673 | int ret = 0; | 673 | unsigned long start = start_addr; |
674 | spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; | 674 | spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; |
675 | int fullmm = tlb_is_full_mm(*tlbp); | 675 | int fullmm = tlb_is_full_mm(*tlbp); |
676 | 676 | ||
677 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { | 677 | for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { |
678 | unsigned long start; | ||
679 | unsigned long end; | 678 | unsigned long end; |
680 | 679 | ||
681 | start = max(vma->vm_start, start_addr); | 680 | start = max(vma->vm_start, start_addr); |
@@ -688,7 +687,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, | |||
688 | if (vma->vm_flags & VM_ACCOUNT) | 687 | if (vma->vm_flags & VM_ACCOUNT) |
689 | *nr_accounted += (end - start) >> PAGE_SHIFT; | 688 | *nr_accounted += (end - start) >> PAGE_SHIFT; |
690 | 689 | ||
691 | ret++; | ||
692 | while (start != end) { | 690 | while (start != end) { |
693 | unsigned long block; | 691 | unsigned long block; |
694 | 692 | ||
@@ -719,7 +717,6 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, | |||
719 | if (i_mmap_lock) { | 717 | if (i_mmap_lock) { |
720 | /* must reset count of rss freed */ | 718 | /* must reset count of rss freed */ |
721 | *tlbp = tlb_gather_mmu(mm, fullmm); | 719 | *tlbp = tlb_gather_mmu(mm, fullmm); |
722 | details->break_addr = start; | ||
723 | goto out; | 720 | goto out; |
724 | } | 721 | } |
725 | spin_unlock(&mm->page_table_lock); | 722 | spin_unlock(&mm->page_table_lock); |
@@ -733,7 +730,7 @@ int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, | |||
733 | } | 730 | } |
734 | } | 731 | } |
735 | out: | 732 | out: |
736 | return ret; | 733 | return start; /* which is now the end (or restart) address */ |
737 | } | 734 | } |
738 | 735 | ||
739 | /** | 736 | /** |
@@ -743,7 +740,7 @@ out: | |||
743 | * @size: number of bytes to zap | 740 | * @size: number of bytes to zap |
744 | * @details: details of nonlinear truncation or shared cache invalidation | 741 | * @details: details of nonlinear truncation or shared cache invalidation |
745 | */ | 742 | */ |
746 | void zap_page_range(struct vm_area_struct *vma, unsigned long address, | 743 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, |
747 | unsigned long size, struct zap_details *details) | 744 | unsigned long size, struct zap_details *details) |
748 | { | 745 | { |
749 | struct mm_struct *mm = vma->vm_mm; | 746 | struct mm_struct *mm = vma->vm_mm; |
@@ -753,15 +750,16 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, | |||
753 | 750 | ||
754 | if (is_vm_hugetlb_page(vma)) { | 751 | if (is_vm_hugetlb_page(vma)) { |
755 | zap_hugepage_range(vma, address, size); | 752 | zap_hugepage_range(vma, address, size); |
756 | return; | 753 | return end; |
757 | } | 754 | } |
758 | 755 | ||
759 | lru_add_drain(); | 756 | lru_add_drain(); |
760 | spin_lock(&mm->page_table_lock); | 757 | spin_lock(&mm->page_table_lock); |
761 | tlb = tlb_gather_mmu(mm, 0); | 758 | tlb = tlb_gather_mmu(mm, 0); |
762 | unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); | 759 | end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); |
763 | tlb_finish_mmu(tlb, address, end); | 760 | tlb_finish_mmu(tlb, address, end); |
764 | spin_unlock(&mm->page_table_lock); | 761 | spin_unlock(&mm->page_table_lock); |
762 | return end; | ||
765 | } | 763 | } |
766 | 764 | ||
767 | /* | 765 | /* |
@@ -1348,7 +1346,7 @@ no_new_page: | |||
1348 | * i_mmap_lock. | 1346 | * i_mmap_lock. |
1349 | * | 1347 | * |
1350 | * In order to make forward progress despite repeatedly restarting some | 1348 | * In order to make forward progress despite repeatedly restarting some |
1351 | * large vma, note the break_addr set by unmap_vmas when it breaks out: | 1349 | * large vma, note the restart_addr from unmap_vmas when it breaks out: |
1352 | * and restart from that address when we reach that vma again. It might | 1350 | * and restart from that address when we reach that vma again. It might |
1353 | * have been split or merged, shrunk or extended, but never shifted: so | 1351 | * have been split or merged, shrunk or extended, but never shifted: so |
1354 | * restart_addr remains valid so long as it remains in the vma's range. | 1352 | * restart_addr remains valid so long as it remains in the vma's range. |
@@ -1386,8 +1384,8 @@ again: | |||
1386 | } | 1384 | } |
1387 | } | 1385 | } |
1388 | 1386 | ||
1389 | details->break_addr = end_addr; | 1387 | restart_addr = zap_page_range(vma, start_addr, |
1390 | zap_page_range(vma, start_addr, end_addr - start_addr, details); | 1388 | end_addr - start_addr, details); |
1391 | 1389 | ||
1392 | /* | 1390 | /* |
1393 | * We cannot rely on the break test in unmap_vmas: | 1391 | * We cannot rely on the break test in unmap_vmas: |
@@ -1398,14 +1396,14 @@ again: | |||
1398 | need_break = need_resched() || | 1396 | need_break = need_resched() || |
1399 | need_lockbreak(details->i_mmap_lock); | 1397 | need_lockbreak(details->i_mmap_lock); |
1400 | 1398 | ||
1401 | if (details->break_addr >= end_addr) { | 1399 | if (restart_addr >= end_addr) { |
1402 | /* We have now completed this vma: mark it so */ | 1400 | /* We have now completed this vma: mark it so */ |
1403 | vma->vm_truncate_count = details->truncate_count; | 1401 | vma->vm_truncate_count = details->truncate_count; |
1404 | if (!need_break) | 1402 | if (!need_break) |
1405 | return 0; | 1403 | return 0; |
1406 | } else { | 1404 | } else { |
1407 | /* Note restart_addr in vma's truncate_count field */ | 1405 | /* Note restart_addr in vma's truncate_count field */ |
1408 | vma->vm_truncate_count = details->break_addr; | 1406 | vma->vm_truncate_count = restart_addr; |
1409 | if (!need_break) | 1407 | if (!need_break) |
1410 | goto again; | 1408 | goto again; |
1411 | } | 1409 | } |
@@ -1900,6 +1900,7 @@ void exit_mmap(struct mm_struct *mm) | |||
1900 | struct mmu_gather *tlb; | 1900 | struct mmu_gather *tlb; |
1901 | struct vm_area_struct *vma = mm->mmap; | 1901 | struct vm_area_struct *vma = mm->mmap; |
1902 | unsigned long nr_accounted = 0; | 1902 | unsigned long nr_accounted = 0; |
1903 | unsigned long end; | ||
1903 | 1904 | ||
1904 | lru_add_drain(); | 1905 | lru_add_drain(); |
1905 | 1906 | ||
@@ -1908,10 +1909,10 @@ void exit_mmap(struct mm_struct *mm) | |||
1908 | flush_cache_mm(mm); | 1909 | flush_cache_mm(mm); |
1909 | tlb = tlb_gather_mmu(mm, 1); | 1910 | tlb = tlb_gather_mmu(mm, 1); |
1910 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 1911 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
1911 | mm->map_count -= unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); | 1912 | end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); |
1912 | vm_unacct_memory(nr_accounted); | 1913 | vm_unacct_memory(nr_accounted); |
1913 | free_pgtables(&tlb, vma, 0, 0); | 1914 | free_pgtables(&tlb, vma, 0, 0); |
1914 | tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm)); | 1915 | tlb_finish_mmu(tlb, 0, end); |
1915 | 1916 | ||
1916 | mm->mmap = mm->mmap_cache = NULL; | 1917 | mm->mmap = mm->mmap_cache = NULL; |
1917 | mm->mm_rb = RB_ROOT; | 1918 | mm->mm_rb = RB_ROOT; |
@@ -1931,7 +1932,6 @@ void exit_mmap(struct mm_struct *mm) | |||
1931 | vma = next; | 1932 | vma = next; |
1932 | } | 1933 | } |
1933 | 1934 | ||
1934 | BUG_ON(mm->map_count); /* This is just debugging */ | ||
1935 | BUG_ON(mm->nr_ptes); /* This is just debugging */ | 1935 | BUG_ON(mm->nr_ptes); /* This is just debugging */ |
1936 | } | 1936 | } |
1937 | 1937 | ||