diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2005-05-01 11:58:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-01 11:58:44 -0400 |
commit | 58366af5861eee1479426380e3c91ecb334c301d (patch) | |
tree | 2c7e61d424279057ebeb2ef32b2e9648666848ca /arch | |
parent | 0339ad77c4a06fa8529db17c91f790058e18b65b (diff) |
[PATCH] ppc64: update to use the new 4L headers
This patch converts ppc64 to use the generic pgtable-nopud.h instead of the
"fixup" header.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ppc64/mm/hugetlbpage.c | 45 | ||||
-rw-r--r-- | arch/ppc64/mm/init.c | 198 |
2 files changed, 123 insertions, 120 deletions
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 390296efe3e0..d3bf86a5c1ad 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c | |||
@@ -42,7 +42,7 @@ static inline int hugepgd_index(unsigned long addr) | |||
42 | return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; | 42 | return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; |
43 | } | 43 | } |
44 | 44 | ||
45 | static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) | 45 | static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) |
46 | { | 46 | { |
47 | int index; | 47 | int index; |
48 | 48 | ||
@@ -52,21 +52,21 @@ static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) | |||
52 | 52 | ||
53 | index = hugepgd_index(addr); | 53 | index = hugepgd_index(addr); |
54 | BUG_ON(index >= PTRS_PER_HUGEPGD); | 54 | BUG_ON(index >= PTRS_PER_HUGEPGD); |
55 | return mm->context.huge_pgdir + index; | 55 | return (pud_t *)(mm->context.huge_pgdir + index); |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline pte_t *hugepte_offset(pgd_t *dir, unsigned long addr) | 58 | static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) |
59 | { | 59 | { |
60 | int index; | 60 | int index; |
61 | 61 | ||
62 | if (pgd_none(*dir)) | 62 | if (pud_none(*dir)) |
63 | return NULL; | 63 | return NULL; |
64 | 64 | ||
65 | index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; | 65 | index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; |
66 | return (pte_t *)pgd_page(*dir) + index; | 66 | return (pte_t *)pud_page(*dir) + index; |
67 | } | 67 | } |
68 | 68 | ||
69 | static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) | 69 | static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) |
70 | { | 70 | { |
71 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 71 | BUG_ON(! in_hugepage_area(mm->context, addr)); |
72 | 72 | ||
@@ -90,10 +90,9 @@ static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) | |||
90 | return hugepgd_offset(mm, addr); | 90 | return hugepgd_offset(mm, addr); |
91 | } | 91 | } |
92 | 92 | ||
93 | static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, | 93 | static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) |
94 | unsigned long addr) | ||
95 | { | 94 | { |
96 | if (! pgd_present(*dir)) { | 95 | if (! pud_present(*dir)) { |
97 | pte_t *new; | 96 | pte_t *new; |
98 | 97 | ||
99 | spin_unlock(&mm->page_table_lock); | 98 | spin_unlock(&mm->page_table_lock); |
@@ -104,7 +103,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, | |||
104 | * Because we dropped the lock, we should re-check the | 103 | * Because we dropped the lock, we should re-check the |
105 | * entry, as somebody else could have populated it.. | 104 | * entry, as somebody else could have populated it.. |
106 | */ | 105 | */ |
107 | if (pgd_present(*dir)) { | 106 | if (pud_present(*dir)) { |
108 | if (new) | 107 | if (new) |
109 | kmem_cache_free(zero_cache, new); | 108 | kmem_cache_free(zero_cache, new); |
110 | } else { | 109 | } else { |
@@ -115,7 +114,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, | |||
115 | ptepage = virt_to_page(new); | 114 | ptepage = virt_to_page(new); |
116 | ptepage->mapping = (void *) mm; | 115 | ptepage->mapping = (void *) mm; |
117 | ptepage->index = addr & HUGEPGDIR_MASK; | 116 | ptepage->index = addr & HUGEPGDIR_MASK; |
118 | pgd_populate(mm, dir, new); | 117 | pud_populate(mm, dir, new); |
119 | } | 118 | } |
120 | } | 119 | } |
121 | 120 | ||
@@ -124,28 +123,28 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, | |||
124 | 123 | ||
125 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 124 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
126 | { | 125 | { |
127 | pgd_t *pgd; | 126 | pud_t *pud; |
128 | 127 | ||
129 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 128 | BUG_ON(! in_hugepage_area(mm->context, addr)); |
130 | 129 | ||
131 | pgd = hugepgd_offset(mm, addr); | 130 | pud = hugepgd_offset(mm, addr); |
132 | if (! pgd) | 131 | if (! pud) |
133 | return NULL; | 132 | return NULL; |
134 | 133 | ||
135 | return hugepte_offset(pgd, addr); | 134 | return hugepte_offset(pud, addr); |
136 | } | 135 | } |
137 | 136 | ||
138 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 137 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
139 | { | 138 | { |
140 | pgd_t *pgd; | 139 | pud_t *pud; |
141 | 140 | ||
142 | BUG_ON(! in_hugepage_area(mm->context, addr)); | 141 | BUG_ON(! in_hugepage_area(mm->context, addr)); |
143 | 142 | ||
144 | pgd = hugepgd_alloc(mm, addr); | 143 | pud = hugepgd_alloc(mm, addr); |
145 | if (! pgd) | 144 | if (! pud) |
146 | return NULL; | 145 | return NULL; |
147 | 146 | ||
148 | return hugepte_alloc(mm, pgd, addr); | 147 | return hugepte_alloc(mm, pud, addr); |
149 | } | 148 | } |
150 | 149 | ||
151 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, | 150 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
@@ -709,10 +708,10 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm) | |||
709 | 708 | ||
710 | /* cleanup any hugepte pages leftover */ | 709 | /* cleanup any hugepte pages leftover */ |
711 | for (i = 0; i < PTRS_PER_HUGEPGD; i++) { | 710 | for (i = 0; i < PTRS_PER_HUGEPGD; i++) { |
712 | pgd_t *pgd = pgdir + i; | 711 | pud_t *pud = (pud_t *)(pgdir + i); |
713 | 712 | ||
714 | if (! pgd_none(*pgd)) { | 713 | if (! pud_none(*pud)) { |
715 | pte_t *pte = (pte_t *)pgd_page(*pgd); | 714 | pte_t *pte = (pte_t *)pud_page(*pud); |
716 | struct page *ptepage = virt_to_page(pte); | 715 | struct page *ptepage = virt_to_page(pte); |
717 | 716 | ||
718 | ptepage->mapping = NULL; | 717 | ptepage->mapping = NULL; |
@@ -720,7 +719,7 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm) | |||
720 | BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); | 719 | BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); |
721 | kmem_cache_free(zero_cache, pte); | 720 | kmem_cache_free(zero_cache, pte); |
722 | } | 721 | } |
723 | pgd_clear(pgd); | 722 | pud_clear(pud); |
724 | } | 723 | } |
725 | 724 | ||
726 | BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); | 725 | BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); |
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index a7149b9fc35c..cf33d7ec2e29 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c | |||
@@ -136,14 +136,78 @@ void iounmap(volatile void __iomem *addr) | |||
136 | 136 | ||
137 | #else | 137 | #else |
138 | 138 | ||
139 | static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr, | ||
140 | unsigned long end) | ||
141 | { | ||
142 | pte_t *pte; | ||
143 | |||
144 | pte = pte_offset_kernel(pmd, addr); | ||
145 | do { | ||
146 | pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte); | ||
147 | WARN_ON(!pte_none(ptent) && !pte_present(ptent)); | ||
148 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
149 | } | ||
150 | |||
151 | static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr, | ||
152 | unsigned long end) | ||
153 | { | ||
154 | pmd_t *pmd; | ||
155 | unsigned long next; | ||
156 | |||
157 | pmd = pmd_offset(pud, addr); | ||
158 | do { | ||
159 | next = pmd_addr_end(addr, end); | ||
160 | if (pmd_none_or_clear_bad(pmd)) | ||
161 | continue; | ||
162 | unmap_im_area_pte(pmd, addr, next); | ||
163 | } while (pmd++, addr = next, addr != end); | ||
164 | } | ||
165 | |||
166 | static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr, | ||
167 | unsigned long end) | ||
168 | { | ||
169 | pud_t *pud; | ||
170 | unsigned long next; | ||
171 | |||
172 | pud = pud_offset(pgd, addr); | ||
173 | do { | ||
174 | next = pud_addr_end(addr, end); | ||
175 | if (pud_none_or_clear_bad(pud)) | ||
176 | continue; | ||
177 | unmap_im_area_pmd(pud, addr, next); | ||
178 | } while (pud++, addr = next, addr != end); | ||
179 | } | ||
180 | |||
181 | static void unmap_im_area(unsigned long addr, unsigned long end) | ||
182 | { | ||
183 | struct mm_struct *mm = &ioremap_mm; | ||
184 | unsigned long next; | ||
185 | pgd_t *pgd; | ||
186 | |||
187 | spin_lock(&mm->page_table_lock); | ||
188 | |||
189 | pgd = pgd_offset_i(addr); | ||
190 | flush_cache_vunmap(addr, end); | ||
191 | do { | ||
192 | next = pgd_addr_end(addr, end); | ||
193 | if (pgd_none_or_clear_bad(pgd)) | ||
194 | continue; | ||
195 | unmap_im_area_pud(pgd, addr, next); | ||
196 | } while (pgd++, addr = next, addr != end); | ||
197 | flush_tlb_kernel_range(start, end); | ||
198 | |||
199 | spin_unlock(&mm->page_table_lock); | ||
200 | } | ||
201 | |||
139 | /* | 202 | /* |
140 | * map_io_page currently only called by __ioremap | 203 | * map_io_page currently only called by __ioremap |
141 | * map_io_page adds an entry to the ioremap page table | 204 | * map_io_page adds an entry to the ioremap page table |
142 | * and adds an entry to the HPT, possibly bolting it | 205 | * and adds an entry to the HPT, possibly bolting it |
143 | */ | 206 | */ |
144 | static void map_io_page(unsigned long ea, unsigned long pa, int flags) | 207 | static int map_io_page(unsigned long ea, unsigned long pa, int flags) |
145 | { | 208 | { |
146 | pgd_t *pgdp; | 209 | pgd_t *pgdp; |
210 | pud_t *pudp; | ||
147 | pmd_t *pmdp; | 211 | pmd_t *pmdp; |
148 | pte_t *ptep; | 212 | pte_t *ptep; |
149 | unsigned long vsid; | 213 | unsigned long vsid; |
@@ -151,9 +215,15 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
151 | if (mem_init_done) { | 215 | if (mem_init_done) { |
152 | spin_lock(&ioremap_mm.page_table_lock); | 216 | spin_lock(&ioremap_mm.page_table_lock); |
153 | pgdp = pgd_offset_i(ea); | 217 | pgdp = pgd_offset_i(ea); |
154 | pmdp = pmd_alloc(&ioremap_mm, pgdp, ea); | 218 | pudp = pud_alloc(&ioremap_mm, pgdp, ea); |
219 | if (!pudp) | ||
220 | return -ENOMEM; | ||
221 | pmdp = pmd_alloc(&ioremap_mm, pudp, ea); | ||
222 | if (!pmdp) | ||
223 | return -ENOMEM; | ||
155 | ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); | 224 | ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); |
156 | 225 | if (!ptep) | |
226 | return -ENOMEM; | ||
157 | pa = abs_to_phys(pa); | 227 | pa = abs_to_phys(pa); |
158 | set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | 228 | set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
159 | __pgprot(flags))); | 229 | __pgprot(flags))); |
@@ -181,6 +251,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
181 | panic("map_io_page: could not insert mapping"); | 251 | panic("map_io_page: could not insert mapping"); |
182 | } | 252 | } |
183 | } | 253 | } |
254 | return 0; | ||
184 | } | 255 | } |
185 | 256 | ||
186 | 257 | ||
@@ -194,9 +265,14 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, | |||
194 | flags |= pgprot_val(PAGE_KERNEL); | 265 | flags |= pgprot_val(PAGE_KERNEL); |
195 | 266 | ||
196 | for (i = 0; i < size; i += PAGE_SIZE) | 267 | for (i = 0; i < size; i += PAGE_SIZE) |
197 | map_io_page(ea+i, pa+i, flags); | 268 | if (map_io_page(ea+i, pa+i, flags)) |
269 | goto failure; | ||
198 | 270 | ||
199 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); | 271 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); |
272 | failure: | ||
273 | if (mem_init_done) | ||
274 | unmap_im_area(ea, ea + size); | ||
275 | return NULL; | ||
200 | } | 276 | } |
201 | 277 | ||
202 | 278 | ||
@@ -206,10 +282,11 @@ ioremap(unsigned long addr, unsigned long size) | |||
206 | return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); | 282 | return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); |
207 | } | 283 | } |
208 | 284 | ||
209 | void __iomem * | 285 | void __iomem * __ioremap(unsigned long addr, unsigned long size, |
210 | __ioremap(unsigned long addr, unsigned long size, unsigned long flags) | 286 | unsigned long flags) |
211 | { | 287 | { |
212 | unsigned long pa, ea; | 288 | unsigned long pa, ea; |
289 | void __iomem *ret; | ||
213 | 290 | ||
214 | /* | 291 | /* |
215 | * Choose an address to map it to. | 292 | * Choose an address to map it to. |
@@ -232,12 +309,16 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags) | |||
232 | if (area == NULL) | 309 | if (area == NULL) |
233 | return NULL; | 310 | return NULL; |
234 | ea = (unsigned long)(area->addr); | 311 | ea = (unsigned long)(area->addr); |
312 | ret = __ioremap_com(addr, pa, ea, size, flags); | ||
313 | if (!ret) | ||
314 | im_free(area->addr); | ||
235 | } else { | 315 | } else { |
236 | ea = ioremap_bot; | 316 | ea = ioremap_bot; |
237 | ioremap_bot += size; | 317 | ret = __ioremap_com(addr, pa, ea, size, flags); |
318 | if (ret) | ||
319 | ioremap_bot += size; | ||
238 | } | 320 | } |
239 | 321 | return ret; | |
240 | return __ioremap_com(addr, pa, ea, size, flags); | ||
241 | } | 322 | } |
242 | 323 | ||
243 | #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) | 324 | #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) |
@@ -246,6 +327,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, | |||
246 | unsigned long size, unsigned long flags) | 327 | unsigned long size, unsigned long flags) |
247 | { | 328 | { |
248 | struct vm_struct *area; | 329 | struct vm_struct *area; |
330 | void __iomem *ret; | ||
249 | 331 | ||
250 | /* For now, require page-aligned values for pa, ea, and size */ | 332 | /* For now, require page-aligned values for pa, ea, and size */ |
251 | if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || | 333 | if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || |
@@ -276,7 +358,12 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, | |||
276 | } | 358 | } |
277 | } | 359 | } |
278 | 360 | ||
279 | if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) { | 361 | ret = __ioremap_com(pa, pa, ea, size, flags); |
362 | if (ret == NULL) { | ||
363 | printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); | ||
364 | return 1; | ||
365 | } | ||
366 | if (ret != (void *) ea) { | ||
280 | printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); | 367 | printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); |
281 | return 1; | 368 | return 1; |
282 | } | 369 | } |
@@ -284,69 +371,6 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, | |||
284 | return 0; | 371 | return 0; |
285 | } | 372 | } |
286 | 373 | ||
287 | static void unmap_im_area_pte(pmd_t *pmd, unsigned long address, | ||
288 | unsigned long size) | ||
289 | { | ||
290 | unsigned long base, end; | ||
291 | pte_t *pte; | ||
292 | |||
293 | if (pmd_none(*pmd)) | ||
294 | return; | ||
295 | if (pmd_bad(*pmd)) { | ||
296 | pmd_ERROR(*pmd); | ||
297 | pmd_clear(pmd); | ||
298 | return; | ||
299 | } | ||
300 | |||
301 | pte = pte_offset_kernel(pmd, address); | ||
302 | base = address & PMD_MASK; | ||
303 | address &= ~PMD_MASK; | ||
304 | end = address + size; | ||
305 | if (end > PMD_SIZE) | ||
306 | end = PMD_SIZE; | ||
307 | |||
308 | do { | ||
309 | pte_t page; | ||
310 | page = ptep_get_and_clear(&ioremap_mm, base + address, pte); | ||
311 | address += PAGE_SIZE; | ||
312 | pte++; | ||
313 | if (pte_none(page)) | ||
314 | continue; | ||
315 | if (pte_present(page)) | ||
316 | continue; | ||
317 | printk(KERN_CRIT "Whee.. Swapped out page in kernel page" | ||
318 | " table\n"); | ||
319 | } while (address < end); | ||
320 | } | ||
321 | |||
322 | static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, | ||
323 | unsigned long size) | ||
324 | { | ||
325 | unsigned long base, end; | ||
326 | pmd_t *pmd; | ||
327 | |||
328 | if (pgd_none(*dir)) | ||
329 | return; | ||
330 | if (pgd_bad(*dir)) { | ||
331 | pgd_ERROR(*dir); | ||
332 | pgd_clear(dir); | ||
333 | return; | ||
334 | } | ||
335 | |||
336 | pmd = pmd_offset(dir, address); | ||
337 | base = address & PGDIR_MASK; | ||
338 | address &= ~PGDIR_MASK; | ||
339 | end = address + size; | ||
340 | if (end > PGDIR_SIZE) | ||
341 | end = PGDIR_SIZE; | ||
342 | |||
343 | do { | ||
344 | unmap_im_area_pte(pmd, base + address, end - address); | ||
345 | address = (address + PMD_SIZE) & PMD_MASK; | ||
346 | pmd++; | ||
347 | } while (address < end); | ||
348 | } | ||
349 | |||
350 | /* | 374 | /* |
351 | * Unmap an IO region and remove it from imalloc'd list. | 375 | * Unmap an IO region and remove it from imalloc'd list. |
352 | * Access to IO memory should be serialized by driver. | 376 | * Access to IO memory should be serialized by driver. |
@@ -356,39 +380,19 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, | |||
356 | */ | 380 | */ |
357 | void iounmap(volatile void __iomem *token) | 381 | void iounmap(volatile void __iomem *token) |
358 | { | 382 | { |
359 | unsigned long address, start, end, size; | 383 | unsigned long address, size; |
360 | struct mm_struct *mm; | ||
361 | pgd_t *dir; | ||
362 | void *addr; | 384 | void *addr; |
363 | 385 | ||
364 | if (!mem_init_done) { | 386 | if (!mem_init_done) |
365 | return; | 387 | return; |
366 | } | ||
367 | 388 | ||
368 | addr = (void *) ((unsigned long __force) token & PAGE_MASK); | 389 | addr = (void *) ((unsigned long __force) token & PAGE_MASK); |
369 | 390 | ||
370 | if ((size = im_free(addr)) == 0) { | 391 | if ((size = im_free(addr)) == 0) |
371 | return; | 392 | return; |
372 | } | ||
373 | 393 | ||
374 | address = (unsigned long)addr; | 394 | address = (unsigned long)addr; |
375 | start = address; | 395 | unmap_im_area(address, address + size); |
376 | end = address + size; | ||
377 | |||
378 | mm = &ioremap_mm; | ||
379 | spin_lock(&mm->page_table_lock); | ||
380 | |||
381 | dir = pgd_offset_i(address); | ||
382 | flush_cache_vunmap(address, end); | ||
383 | do { | ||
384 | unmap_im_area_pmd(dir, address, end - address); | ||
385 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
386 | dir++; | ||
387 | } while (address && (address < end)); | ||
388 | flush_tlb_kernel_range(start, end); | ||
389 | |||
390 | spin_unlock(&mm->page_table_lock); | ||
391 | return; | ||
392 | } | 396 | } |
393 | 397 | ||
394 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) | 398 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) |