aboutsummaryrefslogtreecommitdiffstats
path: root/mm/pagewalk.c
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2008-06-12 18:21:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-12 21:05:41 -0400
commit2165009bdf63f79716a36ad545df14c3cdf958b7 (patch)
tree83d1735f2104b6b5158be56a362856ac1079861d /mm/pagewalk.c
parentcfc53f65f56f9f33c0cf522124045ac5a64076b3 (diff)
pagemap: pass mm into pagewalkers
We need this at least for huge page detection for now, because powerpc needs the vm_area_struct to be able to determine whether a virtual address is referring to a huge page (its pmd_huge() doesn't work). It might also come in handy for some of the other users. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/pagewalk.c')
-rw-r--r--mm/pagewalk.c42
1 files changed, 22 insertions, 20 deletions
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 0afd2387e507..d5878bed7841 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -3,14 +3,14 @@
3#include <linux/sched.h> 3#include <linux/sched.h>
4 4
5static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 5static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
6 const struct mm_walk *walk, void *private) 6 struct mm_walk *walk)
7{ 7{
8 pte_t *pte; 8 pte_t *pte;
9 int err = 0; 9 int err = 0;
10 10
11 pte = pte_offset_map(pmd, addr); 11 pte = pte_offset_map(pmd, addr);
12 for (;;) { 12 for (;;) {
13 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private); 13 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
14 if (err) 14 if (err)
15 break; 15 break;
16 addr += PAGE_SIZE; 16 addr += PAGE_SIZE;
@@ -24,7 +24,7 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
24} 24}
25 25
26static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 26static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
27 const struct mm_walk *walk, void *private) 27 struct mm_walk *walk)
28{ 28{
29 pmd_t *pmd; 29 pmd_t *pmd;
30 unsigned long next; 30 unsigned long next;
@@ -35,15 +35,15 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
35 next = pmd_addr_end(addr, end); 35 next = pmd_addr_end(addr, end);
36 if (pmd_none_or_clear_bad(pmd)) { 36 if (pmd_none_or_clear_bad(pmd)) {
37 if (walk->pte_hole) 37 if (walk->pte_hole)
38 err = walk->pte_hole(addr, next, private); 38 err = walk->pte_hole(addr, next, walk);
39 if (err) 39 if (err)
40 break; 40 break;
41 continue; 41 continue;
42 } 42 }
43 if (walk->pmd_entry) 43 if (walk->pmd_entry)
44 err = walk->pmd_entry(pmd, addr, next, private); 44 err = walk->pmd_entry(pmd, addr, next, walk);
45 if (!err && walk->pte_entry) 45 if (!err && walk->pte_entry)
46 err = walk_pte_range(pmd, addr, next, walk, private); 46 err = walk_pte_range(pmd, addr, next, walk);
47 if (err) 47 if (err)
48 break; 48 break;
49 } while (pmd++, addr = next, addr != end); 49 } while (pmd++, addr = next, addr != end);
@@ -52,7 +52,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
52} 52}
53 53
54static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, 54static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
55 const struct mm_walk *walk, void *private) 55 struct mm_walk *walk)
56{ 56{
57 pud_t *pud; 57 pud_t *pud;
58 unsigned long next; 58 unsigned long next;
@@ -63,15 +63,15 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
63 next = pud_addr_end(addr, end); 63 next = pud_addr_end(addr, end);
64 if (pud_none_or_clear_bad(pud)) { 64 if (pud_none_or_clear_bad(pud)) {
65 if (walk->pte_hole) 65 if (walk->pte_hole)
66 err = walk->pte_hole(addr, next, private); 66 err = walk->pte_hole(addr, next, walk);
67 if (err) 67 if (err)
68 break; 68 break;
69 continue; 69 continue;
70 } 70 }
71 if (walk->pud_entry) 71 if (walk->pud_entry)
72 err = walk->pud_entry(pud, addr, next, private); 72 err = walk->pud_entry(pud, addr, next, walk);
73 if (!err && (walk->pmd_entry || walk->pte_entry)) 73 if (!err && (walk->pmd_entry || walk->pte_entry))
74 err = walk_pmd_range(pud, addr, next, walk, private); 74 err = walk_pmd_range(pud, addr, next, walk);
75 if (err) 75 if (err)
76 break; 76 break;
77 } while (pud++, addr = next, addr != end); 77 } while (pud++, addr = next, addr != end);
@@ -85,15 +85,15 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
85 * @addr: starting address 85 * @addr: starting address
86 * @end: ending address 86 * @end: ending address
87 * @walk: set of callbacks to invoke for each level of the tree 87 * @walk: set of callbacks to invoke for each level of the tree
88 * @private: private data passed to the callback function
89 * 88 *
90 * Recursively walk the page table for the memory area in a VMA, 89 * Recursively walk the page table for the memory area in a VMA,
91 * calling supplied callbacks. Callbacks are called in-order (first 90 * calling supplied callbacks. Callbacks are called in-order (first
92 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, 91 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD,
93 * etc.). If lower-level callbacks are omitted, walking depth is reduced. 92 * etc.). If lower-level callbacks are omitted, walking depth is reduced.
94 * 93 *
95 * Each callback receives an entry pointer, the start and end of the 94 * Each callback receives an entry pointer and the start and end of the
96 * associated range, and a caller-supplied private data pointer. 95 * associated range, and a copy of the original mm_walk for access to
96 * the ->private or ->mm fields.
97 * 97 *
98 * No locks are taken, but the bottom level iterator will map PTE 98 * No locks are taken, but the bottom level iterator will map PTE
99 * directories from highmem if necessary. 99 * directories from highmem if necessary.
@@ -101,9 +101,8 @@ static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end,
101 * If any callback returns a non-zero value, the walk is aborted and 101 * If any callback returns a non-zero value, the walk is aborted and
102 * the return value is propagated back to the caller. Otherwise 0 is returned. 102 * the return value is propagated back to the caller. Otherwise 0 is returned.
103 */ 103 */
104int walk_page_range(const struct mm_struct *mm, 104int walk_page_range(unsigned long addr, unsigned long end,
105 unsigned long addr, unsigned long end, 105 struct mm_walk *walk)
106 const struct mm_walk *walk, void *private)
107{ 106{
108 pgd_t *pgd; 107 pgd_t *pgd;
109 unsigned long next; 108 unsigned long next;
@@ -112,21 +111,24 @@ int walk_page_range(const struct mm_struct *mm,
112 if (addr >= end) 111 if (addr >= end)
113 return err; 112 return err;
114 113
115 pgd = pgd_offset(mm, addr); 114 if (!walk->mm)
115 return -EINVAL;
116
117 pgd = pgd_offset(walk->mm, addr);
116 do { 118 do {
117 next = pgd_addr_end(addr, end); 119 next = pgd_addr_end(addr, end);
118 if (pgd_none_or_clear_bad(pgd)) { 120 if (pgd_none_or_clear_bad(pgd)) {
119 if (walk->pte_hole) 121 if (walk->pte_hole)
120 err = walk->pte_hole(addr, next, private); 122 err = walk->pte_hole(addr, next, walk);
121 if (err) 123 if (err)
122 break; 124 break;
123 continue; 125 continue;
124 } 126 }
125 if (walk->pgd_entry) 127 if (walk->pgd_entry)
126 err = walk->pgd_entry(pgd, addr, next, private); 128 err = walk->pgd_entry(pgd, addr, next, walk);
127 if (!err && 129 if (!err &&
128 (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) 130 (walk->pud_entry || walk->pmd_entry || walk->pte_entry))
129 err = walk_pud_range(pgd, addr, next, walk, private); 131 err = walk_pud_range(pgd, addr, next, walk);
130 if (err) 132 if (err)
131 break; 133 break;
132 } while (pgd++, addr = next, addr != end); 134 } while (pgd++, addr = next, addr != end);