aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2008-07-24 00:27:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 13:47:15 -0400
commit42b7772812d15b86543a23b82bd6070eef9a08b1 (patch)
tree10665ee01fe82ce17c68a6278d044531b1ed64c0 /mm
parenta352894d07059649398c4769dc8b645e1a1dad88 (diff)
mm: remove double indirection on tlb parameter to free_pgd_range() & Co
The double indirection here is not needed anywhere and hence (at least) confusing. Signed-off-by: Jan Beulich <jbeulich@novell.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Acked-by: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h3
-rw-r--r--mm/memory.c10
-rw-r--r--mm/mmap.c6
3 files changed, 13 insertions, 6 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 50807e12490e..858ad01864dc 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -13,6 +13,9 @@
13 13
14#include <linux/mm.h> 14#include <linux/mm.h>
15 15
16void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
17 unsigned long floor, unsigned long ceiling);
18
16static inline void set_page_count(struct page *page, int v) 19static inline void set_page_count(struct page *page, int v)
17{ 20{
18 atomic_set(&page->_count, v); 21 atomic_set(&page->_count, v);
diff --git a/mm/memory.c b/mm/memory.c
index 87350321e66f..82f3f1c5cf17 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -61,6 +61,8 @@
61#include <linux/swapops.h> 61#include <linux/swapops.h>
62#include <linux/elf.h> 62#include <linux/elf.h>
63 63
64#include "internal.h"
65
64#ifndef CONFIG_NEED_MULTIPLE_NODES 66#ifndef CONFIG_NEED_MULTIPLE_NODES
65/* use the per-pgdat data instead for discontigmem - mbligh */ 67/* use the per-pgdat data instead for discontigmem - mbligh */
66unsigned long max_mapnr; 68unsigned long max_mapnr;
@@ -211,7 +213,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
211 * 213 *
212 * Must be called with pagetable lock held. 214 * Must be called with pagetable lock held.
213 */ 215 */
214void free_pgd_range(struct mmu_gather **tlb, 216void free_pgd_range(struct mmu_gather *tlb,
215 unsigned long addr, unsigned long end, 217 unsigned long addr, unsigned long end,
216 unsigned long floor, unsigned long ceiling) 218 unsigned long floor, unsigned long ceiling)
217{ 219{
@@ -262,16 +264,16 @@ void free_pgd_range(struct mmu_gather **tlb,
262 return; 264 return;
263 265
264 start = addr; 266 start = addr;
265 pgd = pgd_offset((*tlb)->mm, addr); 267 pgd = pgd_offset(tlb->mm, addr);
266 do { 268 do {
267 next = pgd_addr_end(addr, end); 269 next = pgd_addr_end(addr, end);
268 if (pgd_none_or_clear_bad(pgd)) 270 if (pgd_none_or_clear_bad(pgd))
269 continue; 271 continue;
270 free_pud_range(*tlb, pgd, addr, next, floor, ceiling); 272 free_pud_range(tlb, pgd, addr, next, floor, ceiling);
271 } while (pgd++, addr = next, addr != end); 273 } while (pgd++, addr = next, addr != end);
272} 274}
273 275
274void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 276void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
275 unsigned long floor, unsigned long ceiling) 277 unsigned long floor, unsigned long ceiling)
276{ 278{
277 while (vma) { 279 while (vma) {
diff --git a/mm/mmap.c b/mm/mmap.c
index 1d102b956fd8..75e0d0673d78 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -32,6 +32,8 @@
32#include <asm/tlb.h> 32#include <asm/tlb.h>
33#include <asm/mmu_context.h> 33#include <asm/mmu_context.h>
34 34
35#include "internal.h"
36
35#ifndef arch_mmap_check 37#ifndef arch_mmap_check
36#define arch_mmap_check(addr, len, flags) (0) 38#define arch_mmap_check(addr, len, flags) (0)
37#endif 39#endif
@@ -1763,7 +1765,7 @@ static void unmap_region(struct mm_struct *mm,
1763 update_hiwater_rss(mm); 1765 update_hiwater_rss(mm);
1764 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 1766 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL);
1765 vm_unacct_memory(nr_accounted); 1767 vm_unacct_memory(nr_accounted);
1766 free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, 1768 free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS,
1767 next? next->vm_start: 0); 1769 next? next->vm_start: 0);
1768 tlb_finish_mmu(tlb, start, end); 1770 tlb_finish_mmu(tlb, start, end);
1769} 1771}
@@ -2063,7 +2065,7 @@ void exit_mmap(struct mm_struct *mm)
2063 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2065 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2064 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2066 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL);
2065 vm_unacct_memory(nr_accounted); 2067 vm_unacct_memory(nr_accounted);
2066 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2068 free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0);
2067 tlb_finish_mmu(tlb, 0, end); 2069 tlb_finish_mmu(tlb, 0, end);
2068 2070
2069 /* 2071 /*