diff options
author | Jan Beulich <jbeulich@novell.com> | 2008-07-24 00:27:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-24 13:47:15 -0400 |
commit | 42b7772812d15b86543a23b82bd6070eef9a08b1 (patch) | |
tree | 10665ee01fe82ce17c68a6278d044531b1ed64c0 | |
parent | a352894d07059649398c4769dc8b645e1a1dad88 (diff) |
mm: remove double indirection on tlb parameter to free_pgd_range() & Co
The double indirection here is not needed anywhere and hence (at least)
confusing.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: "David S. Miller" <davem@davemloft.net>
Acked-by: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 8 | ||||
-rw-r--r-- | fs/exec.c | 4 | ||||
-rw-r--r-- | include/asm-ia64/hugetlb.h | 2 | ||||
-rw-r--r-- | include/asm-powerpc/hugetlb.h | 2 | ||||
-rw-r--r-- | include/asm-sh/hugetlb.h | 2 | ||||
-rw-r--r-- | include/asm-sparc/hugetlb.h | 2 | ||||
-rw-r--r-- | include/asm-x86/hugetlb.h | 2 | ||||
-rw-r--r-- | include/linux/mm.h | 4 | ||||
-rw-r--r-- | mm/internal.h | 3 | ||||
-rw-r--r-- | mm/memory.c | 10 | ||||
-rw-r--r-- | mm/mmap.c | 6 |
12 files changed, 26 insertions, 21 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index d3ce8f3bcaa6..cd49e2860eef 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c | |||
@@ -112,7 +112,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int wri | |||
112 | return NULL; | 112 | return NULL; |
113 | } | 113 | } |
114 | 114 | ||
115 | void hugetlb_free_pgd_range(struct mmu_gather **tlb, | 115 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
116 | unsigned long addr, unsigned long end, | 116 | unsigned long addr, unsigned long end, |
117 | unsigned long floor, unsigned long ceiling) | 117 | unsigned long floor, unsigned long ceiling) |
118 | { | 118 | { |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 0d12fba31bc5..1a96cc891cf5 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -255,7 +255,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
255 | * | 255 | * |
256 | * Must be called with pagetable lock held. | 256 | * Must be called with pagetable lock held. |
257 | */ | 257 | */ |
258 | void hugetlb_free_pgd_range(struct mmu_gather **tlb, | 258 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
259 | unsigned long addr, unsigned long end, | 259 | unsigned long addr, unsigned long end, |
260 | unsigned long floor, unsigned long ceiling) | 260 | unsigned long floor, unsigned long ceiling) |
261 | { | 261 | { |
@@ -315,13 +315,13 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, | |||
315 | return; | 315 | return; |
316 | 316 | ||
317 | start = addr; | 317 | start = addr; |
318 | pgd = pgd_offset((*tlb)->mm, addr); | 318 | pgd = pgd_offset(tlb->mm, addr); |
319 | do { | 319 | do { |
320 | BUG_ON(get_slice_psize((*tlb)->mm, addr) != mmu_huge_psize); | 320 | BUG_ON(get_slice_psize(tlb->mm, addr) != mmu_huge_psize); |
321 | next = pgd_addr_end(addr, end); | 321 | next = pgd_addr_end(addr, end); |
322 | if (pgd_none_or_clear_bad(pgd)) | 322 | if (pgd_none_or_clear_bad(pgd)) |
323 | continue; | 323 | continue; |
324 | hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling); | 324 | hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); |
325 | } while (pgd++, addr = next, addr != end); | 325 | } while (pgd++, addr = next, addr != end); |
326 | } | 326 | } |
327 | 327 | ||
@@ -541,7 +541,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
541 | /* | 541 | /* |
542 | * when the old and new regions overlap clear from new_end. | 542 | * when the old and new regions overlap clear from new_end. |
543 | */ | 543 | */ |
544 | free_pgd_range(&tlb, new_end, old_end, new_end, | 544 | free_pgd_range(tlb, new_end, old_end, new_end, |
545 | vma->vm_next ? vma->vm_next->vm_start : 0); | 545 | vma->vm_next ? vma->vm_next->vm_start : 0); |
546 | } else { | 546 | } else { |
547 | /* | 547 | /* |
@@ -550,7 +550,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
550 | * have constraints on va-space that make this illegal (IA64) - | 550 | * have constraints on va-space that make this illegal (IA64) - |
551 | * for the others its just a little faster. | 551 | * for the others its just a little faster. |
552 | */ | 552 | */ |
553 | free_pgd_range(&tlb, old_start, old_end, new_end, | 553 | free_pgd_range(tlb, old_start, old_end, new_end, |
554 | vma->vm_next ? vma->vm_next->vm_start : 0); | 554 | vma->vm_next ? vma->vm_next->vm_start : 0); |
555 | } | 555 | } |
556 | tlb_finish_mmu(tlb, new_end, old_end); | 556 | tlb_finish_mmu(tlb, new_end, old_end); |
diff --git a/include/asm-ia64/hugetlb.h b/include/asm-ia64/hugetlb.h index f28a9701f1cf..e9d1e5e2382d 100644 --- a/include/asm-ia64/hugetlb.h +++ b/include/asm-ia64/hugetlb.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | 5 | ||
6 | 6 | ||
7 | void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | 7 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
8 | unsigned long end, unsigned long floor, | 8 | unsigned long end, unsigned long floor, |
9 | unsigned long ceiling); | 9 | unsigned long ceiling); |
10 | 10 | ||
diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h index be32ff02f4a0..0a37aa5ecaa5 100644 --- a/include/asm-powerpc/hugetlb.h +++ b/include/asm-powerpc/hugetlb.h | |||
@@ -7,7 +7,7 @@ | |||
7 | int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, | 7 | int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, |
8 | unsigned long len); | 8 | unsigned long len); |
9 | 9 | ||
10 | void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | 10 | void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
11 | unsigned long end, unsigned long floor, | 11 | unsigned long end, unsigned long floor, |
12 | unsigned long ceiling); | 12 | unsigned long ceiling); |
13 | 13 | ||
diff --git a/include/asm-sh/hugetlb.h b/include/asm-sh/hugetlb.h index 02402303d89b..fb30018938c7 100644 --- a/include/asm-sh/hugetlb.h +++ b/include/asm-sh/hugetlb.h | |||
@@ -26,7 +26,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) | |||
26 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { | 26 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, | 29 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
30 | unsigned long addr, unsigned long end, | 30 | unsigned long addr, unsigned long end, |
31 | unsigned long floor, | 31 | unsigned long floor, |
32 | unsigned long ceiling) | 32 | unsigned long ceiling) |
diff --git a/include/asm-sparc/hugetlb.h b/include/asm-sparc/hugetlb.h index 412af58926a0..aeb92374ca3d 100644 --- a/include/asm-sparc/hugetlb.h +++ b/include/asm-sparc/hugetlb.h | |||
@@ -31,7 +31,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) | |||
31 | return 0; | 31 | return 0; |
32 | } | 32 | } |
33 | 33 | ||
34 | static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, | 34 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
35 | unsigned long addr, unsigned long end, | 35 | unsigned long addr, unsigned long end, |
36 | unsigned long floor, | 36 | unsigned long floor, |
37 | unsigned long ceiling) | 37 | unsigned long ceiling) |
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h index 14171a4924f6..7eed6e0883bf 100644 --- a/include/asm-x86/hugetlb.h +++ b/include/asm-x86/hugetlb.h | |||
@@ -26,7 +26,7 @@ static inline int prepare_hugepage_range(unsigned long addr, unsigned long len) | |||
26 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { | 26 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, | 29 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
30 | unsigned long addr, unsigned long end, | 30 | unsigned long addr, unsigned long end, |
31 | unsigned long floor, | 31 | unsigned long floor, |
32 | unsigned long ceiling) | 32 | unsigned long ceiling) |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 5c7f8f64f70e..f8071097302a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -769,10 +769,8 @@ struct mm_walk { | |||
769 | 769 | ||
770 | int walk_page_range(unsigned long addr, unsigned long end, | 770 | int walk_page_range(unsigned long addr, unsigned long end, |
771 | struct mm_walk *walk); | 771 | struct mm_walk *walk); |
772 | void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | 772 | void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, |
773 | unsigned long end, unsigned long floor, unsigned long ceiling); | 773 | unsigned long end, unsigned long floor, unsigned long ceiling); |
774 | void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, | ||
775 | unsigned long floor, unsigned long ceiling); | ||
776 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | 774 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, |
777 | struct vm_area_struct *vma); | 775 | struct vm_area_struct *vma); |
778 | void unmap_mapping_range(struct address_space *mapping, | 776 | void unmap_mapping_range(struct address_space *mapping, |
diff --git a/mm/internal.h b/mm/internal.h index 50807e12490e..858ad01864dc 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -13,6 +13,9 @@ | |||
13 | 13 | ||
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | 15 | ||
16 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | ||
17 | unsigned long floor, unsigned long ceiling); | ||
18 | |||
16 | static inline void set_page_count(struct page *page, int v) | 19 | static inline void set_page_count(struct page *page, int v) |
17 | { | 20 | { |
18 | atomic_set(&page->_count, v); | 21 | atomic_set(&page->_count, v); |
diff --git a/mm/memory.c b/mm/memory.c index 87350321e66f..82f3f1c5cf17 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -61,6 +61,8 @@ | |||
61 | #include <linux/swapops.h> | 61 | #include <linux/swapops.h> |
62 | #include <linux/elf.h> | 62 | #include <linux/elf.h> |
63 | 63 | ||
64 | #include "internal.h" | ||
65 | |||
64 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 66 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
65 | /* use the per-pgdat data instead for discontigmem - mbligh */ | 67 | /* use the per-pgdat data instead for discontigmem - mbligh */ |
66 | unsigned long max_mapnr; | 68 | unsigned long max_mapnr; |
@@ -211,7 +213,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, | |||
211 | * | 213 | * |
212 | * Must be called with pagetable lock held. | 214 | * Must be called with pagetable lock held. |
213 | */ | 215 | */ |
214 | void free_pgd_range(struct mmu_gather **tlb, | 216 | void free_pgd_range(struct mmu_gather *tlb, |
215 | unsigned long addr, unsigned long end, | 217 | unsigned long addr, unsigned long end, |
216 | unsigned long floor, unsigned long ceiling) | 218 | unsigned long floor, unsigned long ceiling) |
217 | { | 219 | { |
@@ -262,16 +264,16 @@ void free_pgd_range(struct mmu_gather **tlb, | |||
262 | return; | 264 | return; |
263 | 265 | ||
264 | start = addr; | 266 | start = addr; |
265 | pgd = pgd_offset((*tlb)->mm, addr); | 267 | pgd = pgd_offset(tlb->mm, addr); |
266 | do { | 268 | do { |
267 | next = pgd_addr_end(addr, end); | 269 | next = pgd_addr_end(addr, end); |
268 | if (pgd_none_or_clear_bad(pgd)) | 270 | if (pgd_none_or_clear_bad(pgd)) |
269 | continue; | 271 | continue; |
270 | free_pud_range(*tlb, pgd, addr, next, floor, ceiling); | 272 | free_pud_range(tlb, pgd, addr, next, floor, ceiling); |
271 | } while (pgd++, addr = next, addr != end); | 273 | } while (pgd++, addr = next, addr != end); |
272 | } | 274 | } |
273 | 275 | ||
274 | void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, | 276 | void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, |
275 | unsigned long floor, unsigned long ceiling) | 277 | unsigned long floor, unsigned long ceiling) |
276 | { | 278 | { |
277 | while (vma) { | 279 | while (vma) { |
@@ -32,6 +32,8 @@ | |||
32 | #include <asm/tlb.h> | 32 | #include <asm/tlb.h> |
33 | #include <asm/mmu_context.h> | 33 | #include <asm/mmu_context.h> |
34 | 34 | ||
35 | #include "internal.h" | ||
36 | |||
35 | #ifndef arch_mmap_check | 37 | #ifndef arch_mmap_check |
36 | #define arch_mmap_check(addr, len, flags) (0) | 38 | #define arch_mmap_check(addr, len, flags) (0) |
37 | #endif | 39 | #endif |
@@ -1763,7 +1765,7 @@ static void unmap_region(struct mm_struct *mm, | |||
1763 | update_hiwater_rss(mm); | 1765 | update_hiwater_rss(mm); |
1764 | unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); | 1766 | unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); |
1765 | vm_unacct_memory(nr_accounted); | 1767 | vm_unacct_memory(nr_accounted); |
1766 | free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, | 1768 | free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, |
1767 | next? next->vm_start: 0); | 1769 | next? next->vm_start: 0); |
1768 | tlb_finish_mmu(tlb, start, end); | 1770 | tlb_finish_mmu(tlb, start, end); |
1769 | } | 1771 | } |
@@ -2063,7 +2065,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2063 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2065 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
2064 | end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); | 2066 | end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); |
2065 | vm_unacct_memory(nr_accounted); | 2067 | vm_unacct_memory(nr_accounted); |
2066 | free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); | 2068 | free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); |
2067 | tlb_finish_mmu(tlb, 0, end); | 2069 | tlb_finish_mmu(tlb, 0, end); |
2068 | 2070 | ||
2069 | /* | 2071 | /* |