aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-02-05 01:29:14 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:18 -0500
commit5e5419734c8719cbc01af959ad9c0844002c0df5 (patch)
treea075dca3f719946689efa0245464855cbf2a20ce /mm
parent9f8f2172537de7af0b0fbd33502d18d52b1339bc (diff)
add mm argument to pte/pmd/pud/pgd_free
(with Martin Schwidefsky <schwidefsky@de.ibm.com>) The pgd/pud/pmd/pte page table allocation functions get a mm_struct pointer as first argument. The free functions do not get the mm_struct argument. This is 1) asymmetrical and 2) to do mm related page table allocations the mm argument is needed on the free function as well. [kamalesh@linux.vnet.ibm.com: i386 fix] [akpm@linux-foundation.org: coding-syle fixes] Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Kamalesh Babulal <kamalesh@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index b7cb2e01705f..1c81fc2174cd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -305,7 +305,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
305 spin_lock(&mm->page_table_lock); 305 spin_lock(&mm->page_table_lock);
306 if (pmd_present(*pmd)) { /* Another has populated it */ 306 if (pmd_present(*pmd)) { /* Another has populated it */
307 pte_lock_deinit(new); 307 pte_lock_deinit(new);
308 pte_free(new); 308 pte_free(mm, new);
309 } else { 309 } else {
310 mm->nr_ptes++; 310 mm->nr_ptes++;
311 inc_zone_page_state(new, NR_PAGETABLE); 311 inc_zone_page_state(new, NR_PAGETABLE);
@@ -323,7 +323,7 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
323 323
324 spin_lock(&init_mm.page_table_lock); 324 spin_lock(&init_mm.page_table_lock);
325 if (pmd_present(*pmd)) /* Another has populated it */ 325 if (pmd_present(*pmd)) /* Another has populated it */
326 pte_free_kernel(new); 326 pte_free_kernel(&init_mm, new);
327 else 327 else
328 pmd_populate_kernel(&init_mm, pmd, new); 328 pmd_populate_kernel(&init_mm, pmd, new);
329 spin_unlock(&init_mm.page_table_lock); 329 spin_unlock(&init_mm.page_table_lock);
@@ -2501,7 +2501,7 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2501 2501
2502 spin_lock(&mm->page_table_lock); 2502 spin_lock(&mm->page_table_lock);
2503 if (pgd_present(*pgd)) /* Another has populated it */ 2503 if (pgd_present(*pgd)) /* Another has populated it */
2504 pud_free(new); 2504 pud_free(mm, new);
2505 else 2505 else
2506 pgd_populate(mm, pgd, new); 2506 pgd_populate(mm, pgd, new);
2507 spin_unlock(&mm->page_table_lock); 2507 spin_unlock(&mm->page_table_lock);
@@ -2523,12 +2523,12 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2523 spin_lock(&mm->page_table_lock); 2523 spin_lock(&mm->page_table_lock);
2524#ifndef __ARCH_HAS_4LEVEL_HACK 2524#ifndef __ARCH_HAS_4LEVEL_HACK
2525 if (pud_present(*pud)) /* Another has populated it */ 2525 if (pud_present(*pud)) /* Another has populated it */
2526 pmd_free(new); 2526 pmd_free(mm, new);
2527 else 2527 else
2528 pud_populate(mm, pud, new); 2528 pud_populate(mm, pud, new);
2529#else 2529#else
2530 if (pgd_present(*pud)) /* Another has populated it */ 2530 if (pgd_present(*pud)) /* Another has populated it */
2531 pmd_free(new); 2531 pmd_free(mm, new);
2532 else 2532 else
2533 pgd_populate(mm, pud, new); 2533 pgd_populate(mm, pud, new);
2534#endif /* __ARCH_HAS_4LEVEL_HACK */ 2534#endif /* __ARCH_HAS_4LEVEL_HACK */