aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r--arch/ppc64/mm/hash_low.S9
-rw-r--r--arch/ppc64/mm/hash_native.c3
-rw-r--r--arch/ppc64/mm/hash_utils.c11
-rw-r--r--arch/ppc64/mm/hugetlbpage.c45
-rw-r--r--arch/ppc64/mm/imalloc.c5
-rw-r--r--arch/ppc64/mm/init.c201
-rw-r--r--arch/ppc64/mm/slb.c9
-rw-r--r--arch/ppc64/mm/stab.c5
8 files changed, 151 insertions, 137 deletions
diff --git a/arch/ppc64/mm/hash_low.S b/arch/ppc64/mm/hash_low.S
index 8c0156a37001..c23d46956dd9 100644
--- a/arch/ppc64/mm/hash_low.S
+++ b/arch/ppc64/mm/hash_low.S
@@ -85,7 +85,10 @@ _GLOBAL(__hash_page)
85 bne- htab_wrong_access 85 bne- htab_wrong_access
86 /* Check if PTE is busy */ 86 /* Check if PTE is busy */
87 andi. r0,r31,_PAGE_BUSY 87 andi. r0,r31,_PAGE_BUSY
88 bne- 1b 88 /* If so, just bail out and refault if needed. Someone else
89 * is changing this PTE anyway and might hash it.
90 */
91 bne- bail_ok
89 /* Prepare new PTE value (turn access RW into DIRTY, then 92 /* Prepare new PTE value (turn access RW into DIRTY, then
90 * add BUSY,HASHPTE and ACCESSED) 93 * add BUSY,HASHPTE and ACCESSED)
91 */ 94 */
@@ -215,6 +218,10 @@ _GLOBAL(htab_call_hpte_remove)
215 /* Try all again */ 218 /* Try all again */
216 b htab_insert_pte 219 b htab_insert_pte
217 220
221bail_ok:
222 li r3,0
223 b bail
224
218htab_pte_insert_ok: 225htab_pte_insert_ok:
219 /* Insert slot number & secondary bit in PTE */ 226 /* Insert slot number & secondary bit in PTE */
220 rldimi r30,r3,12,63-15 227 rldimi r30,r3,12,63-15
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 144657e0c3d5..52b6b9305341 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -320,8 +320,7 @@ static void native_flush_hash_range(unsigned long context,
320 320
321 j = 0; 321 j = 0;
322 for (i = 0; i < number; i++) { 322 for (i = 0; i < number; i++) {
323 if ((batch->addr[i] >= USER_START) && 323 if (batch->addr[i] < KERNELBASE)
324 (batch->addr[i] <= USER_END))
325 vsid = get_vsid(context, batch->addr[i]); 324 vsid = get_vsid(context, batch->addr[i]);
326 else 325 else
327 vsid = get_kernel_vsid(batch->addr[i]); 326 vsid = get_kernel_vsid(batch->addr[i]);
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index e48be12f518c..0a0f97008d02 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -298,24 +298,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
298 int local = 0; 298 int local = 0;
299 cpumask_t tmp; 299 cpumask_t tmp;
300 300
301 if ((ea & ~REGION_MASK) > EADDR_MASK)
302 return 1;
303
301 switch (REGION_ID(ea)) { 304 switch (REGION_ID(ea)) {
302 case USER_REGION_ID: 305 case USER_REGION_ID:
303 user_region = 1; 306 user_region = 1;
304 mm = current->mm; 307 mm = current->mm;
305 if ((ea > USER_END) || (! mm)) 308 if (! mm)
306 return 1; 309 return 1;
307 310
308 vsid = get_vsid(mm->context.id, ea); 311 vsid = get_vsid(mm->context.id, ea);
309 break; 312 break;
310 case IO_REGION_ID: 313 case IO_REGION_ID:
311 if (ea > IMALLOC_END)
312 return 1;
313 mm = &ioremap_mm; 314 mm = &ioremap_mm;
314 vsid = get_kernel_vsid(ea); 315 vsid = get_kernel_vsid(ea);
315 break; 316 break;
316 case VMALLOC_REGION_ID: 317 case VMALLOC_REGION_ID:
317 if (ea > VMALLOC_END)
318 return 1;
319 mm = &init_mm; 318 mm = &init_mm;
320 vsid = get_kernel_vsid(ea); 319 vsid = get_kernel_vsid(ea);
321 break; 320 break;
@@ -362,7 +361,7 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
362 unsigned long vsid, vpn, va, hash, secondary, slot; 361 unsigned long vsid, vpn, va, hash, secondary, slot;
363 unsigned long huge = pte_huge(pte); 362 unsigned long huge = pte_huge(pte);
364 363
365 if ((ea >= USER_START) && (ea <= USER_END)) 364 if (ea < KERNELBASE)
366 vsid = get_vsid(context, ea); 365 vsid = get_vsid(context, ea);
367 else 366 else
368 vsid = get_kernel_vsid(ea); 367 vsid = get_kernel_vsid(ea);
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index 390296efe3e0..d3bf86a5c1ad 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -42,7 +42,7 @@ static inline int hugepgd_index(unsigned long addr)
42 return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; 42 return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT;
43} 43}
44 44
45static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) 45static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
46{ 46{
47 int index; 47 int index;
48 48
@@ -52,21 +52,21 @@ static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
52 52
53 index = hugepgd_index(addr); 53 index = hugepgd_index(addr);
54 BUG_ON(index >= PTRS_PER_HUGEPGD); 54 BUG_ON(index >= PTRS_PER_HUGEPGD);
55 return mm->context.huge_pgdir + index; 55 return (pud_t *)(mm->context.huge_pgdir + index);
56} 56}
57 57
58static inline pte_t *hugepte_offset(pgd_t *dir, unsigned long addr) 58static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr)
59{ 59{
60 int index; 60 int index;
61 61
62 if (pgd_none(*dir)) 62 if (pud_none(*dir))
63 return NULL; 63 return NULL;
64 64
65 index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; 65 index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE;
66 return (pte_t *)pgd_page(*dir) + index; 66 return (pte_t *)pud_page(*dir) + index;
67} 67}
68 68
69static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) 69static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
70{ 70{
71 BUG_ON(! in_hugepage_area(mm->context, addr)); 71 BUG_ON(! in_hugepage_area(mm->context, addr));
72 72
@@ -90,10 +90,9 @@ static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
90 return hugepgd_offset(mm, addr); 90 return hugepgd_offset(mm, addr);
91} 91}
92 92
93static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, 93static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr)
94 unsigned long addr)
95{ 94{
96 if (! pgd_present(*dir)) { 95 if (! pud_present(*dir)) {
97 pte_t *new; 96 pte_t *new;
98 97
99 spin_unlock(&mm->page_table_lock); 98 spin_unlock(&mm->page_table_lock);
@@ -104,7 +103,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
104 * Because we dropped the lock, we should re-check the 103 * Because we dropped the lock, we should re-check the
105 * entry, as somebody else could have populated it.. 104 * entry, as somebody else could have populated it..
106 */ 105 */
107 if (pgd_present(*dir)) { 106 if (pud_present(*dir)) {
108 if (new) 107 if (new)
109 kmem_cache_free(zero_cache, new); 108 kmem_cache_free(zero_cache, new);
110 } else { 109 } else {
@@ -115,7 +114,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
115 ptepage = virt_to_page(new); 114 ptepage = virt_to_page(new);
116 ptepage->mapping = (void *) mm; 115 ptepage->mapping = (void *) mm;
117 ptepage->index = addr & HUGEPGDIR_MASK; 116 ptepage->index = addr & HUGEPGDIR_MASK;
118 pgd_populate(mm, dir, new); 117 pud_populate(mm, dir, new);
119 } 118 }
120 } 119 }
121 120
@@ -124,28 +123,28 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
124 123
125static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 124static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
126{ 125{
127 pgd_t *pgd; 126 pud_t *pud;
128 127
129 BUG_ON(! in_hugepage_area(mm->context, addr)); 128 BUG_ON(! in_hugepage_area(mm->context, addr));
130 129
131 pgd = hugepgd_offset(mm, addr); 130 pud = hugepgd_offset(mm, addr);
132 if (! pgd) 131 if (! pud)
133 return NULL; 132 return NULL;
134 133
135 return hugepte_offset(pgd, addr); 134 return hugepte_offset(pud, addr);
136} 135}
137 136
138static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 137static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
139{ 138{
140 pgd_t *pgd; 139 pud_t *pud;
141 140
142 BUG_ON(! in_hugepage_area(mm->context, addr)); 141 BUG_ON(! in_hugepage_area(mm->context, addr));
143 142
144 pgd = hugepgd_alloc(mm, addr); 143 pud = hugepgd_alloc(mm, addr);
145 if (! pgd) 144 if (! pud)
146 return NULL; 145 return NULL;
147 146
148 return hugepte_alloc(mm, pgd, addr); 147 return hugepte_alloc(mm, pud, addr);
149} 148}
150 149
151static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, 150static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -709,10 +708,10 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm)
709 708
710 /* cleanup any hugepte pages leftover */ 709 /* cleanup any hugepte pages leftover */
711 for (i = 0; i < PTRS_PER_HUGEPGD; i++) { 710 for (i = 0; i < PTRS_PER_HUGEPGD; i++) {
712 pgd_t *pgd = pgdir + i; 711 pud_t *pud = (pud_t *)(pgdir + i);
713 712
714 if (! pgd_none(*pgd)) { 713 if (! pud_none(*pud)) {
715 pte_t *pte = (pte_t *)pgd_page(*pgd); 714 pte_t *pte = (pte_t *)pud_page(*pud);
716 struct page *ptepage = virt_to_page(pte); 715 struct page *ptepage = virt_to_page(pte);
717 716
718 ptepage->mapping = NULL; 717 ptepage->mapping = NULL;
@@ -720,7 +719,7 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm)
720 BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); 719 BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE));
721 kmem_cache_free(zero_cache, pte); 720 kmem_cache_free(zero_cache, pte);
722 } 721 }
723 pgd_clear(pgd); 722 pud_clear(pud);
724 } 723 }
725 724
726 BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); 725 BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE));
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index 9d92b0d9cde5..cb8727f3267a 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -14,6 +14,7 @@
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17#include <asm/imalloc.h>
17 18
18static DECLARE_MUTEX(imlist_sem); 19static DECLARE_MUTEX(imlist_sem);
19struct vm_struct * imlist = NULL; 20struct vm_struct * imlist = NULL;
@@ -23,11 +24,11 @@ static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
23 unsigned long addr; 24 unsigned long addr;
24 struct vm_struct **p, *tmp; 25 struct vm_struct **p, *tmp;
25 26
26 addr = IMALLOC_START; 27 addr = ioremap_bot;
27 for (p = &imlist; (tmp = *p) ; p = &tmp->next) { 28 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
28 if (size + addr < (unsigned long) tmp->addr) 29 if (size + addr < (unsigned long) tmp->addr)
29 break; 30 break;
30 if ((unsigned long)tmp->addr >= IMALLOC_START) 31 if ((unsigned long)tmp->addr >= ioremap_bot)
31 addr = tmp->size + (unsigned long) tmp->addr; 32 addr = tmp->size + (unsigned long) tmp->addr;
32 if (addr > IMALLOC_END-size) 33 if (addr > IMALLOC_END-size)
33 return 1; 34 return 1;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index a7149b9fc35c..4b42aff74d73 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -64,6 +64,7 @@
64#include <asm/iommu.h> 64#include <asm/iommu.h>
65#include <asm/abs_addr.h> 65#include <asm/abs_addr.h>
66#include <asm/vdso.h> 66#include <asm/vdso.h>
67#include <asm/imalloc.h>
67 68
68int mem_init_done; 69int mem_init_done;
69unsigned long ioremap_bot = IMALLOC_BASE; 70unsigned long ioremap_bot = IMALLOC_BASE;
@@ -136,14 +137,78 @@ void iounmap(volatile void __iomem *addr)
136 137
137#else 138#else
138 139
140static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
141 unsigned long end)
142{
143 pte_t *pte;
144
145 pte = pte_offset_kernel(pmd, addr);
146 do {
147 pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
148 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
149 } while (pte++, addr += PAGE_SIZE, addr != end);
150}
151
152static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
153 unsigned long end)
154{
155 pmd_t *pmd;
156 unsigned long next;
157
158 pmd = pmd_offset(pud, addr);
159 do {
160 next = pmd_addr_end(addr, end);
161 if (pmd_none_or_clear_bad(pmd))
162 continue;
163 unmap_im_area_pte(pmd, addr, next);
164 } while (pmd++, addr = next, addr != end);
165}
166
167static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
168 unsigned long end)
169{
170 pud_t *pud;
171 unsigned long next;
172
173 pud = pud_offset(pgd, addr);
174 do {
175 next = pud_addr_end(addr, end);
176 if (pud_none_or_clear_bad(pud))
177 continue;
178 unmap_im_area_pmd(pud, addr, next);
179 } while (pud++, addr = next, addr != end);
180}
181
182static void unmap_im_area(unsigned long addr, unsigned long end)
183{
184 struct mm_struct *mm = &ioremap_mm;
185 unsigned long next;
186 pgd_t *pgd;
187
188 spin_lock(&mm->page_table_lock);
189
190 pgd = pgd_offset_i(addr);
191 flush_cache_vunmap(addr, end);
192 do {
193 next = pgd_addr_end(addr, end);
194 if (pgd_none_or_clear_bad(pgd))
195 continue;
196 unmap_im_area_pud(pgd, addr, next);
197 } while (pgd++, addr = next, addr != end);
198 flush_tlb_kernel_range(start, end);
199
200 spin_unlock(&mm->page_table_lock);
201}
202
139/* 203/*
140 * map_io_page currently only called by __ioremap 204 * map_io_page currently only called by __ioremap
141 * map_io_page adds an entry to the ioremap page table 205 * map_io_page adds an entry to the ioremap page table
142 * and adds an entry to the HPT, possibly bolting it 206 * and adds an entry to the HPT, possibly bolting it
143 */ 207 */
144static void map_io_page(unsigned long ea, unsigned long pa, int flags) 208static int map_io_page(unsigned long ea, unsigned long pa, int flags)
145{ 209{
146 pgd_t *pgdp; 210 pgd_t *pgdp;
211 pud_t *pudp;
147 pmd_t *pmdp; 212 pmd_t *pmdp;
148 pte_t *ptep; 213 pte_t *ptep;
149 unsigned long vsid; 214 unsigned long vsid;
@@ -151,9 +216,15 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
151 if (mem_init_done) { 216 if (mem_init_done) {
152 spin_lock(&ioremap_mm.page_table_lock); 217 spin_lock(&ioremap_mm.page_table_lock);
153 pgdp = pgd_offset_i(ea); 218 pgdp = pgd_offset_i(ea);
154 pmdp = pmd_alloc(&ioremap_mm, pgdp, ea); 219 pudp = pud_alloc(&ioremap_mm, pgdp, ea);
220 if (!pudp)
221 return -ENOMEM;
222 pmdp = pmd_alloc(&ioremap_mm, pudp, ea);
223 if (!pmdp)
224 return -ENOMEM;
155 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); 225 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
156 226 if (!ptep)
227 return -ENOMEM;
157 pa = abs_to_phys(pa); 228 pa = abs_to_phys(pa);
158 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 229 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
159 __pgprot(flags))); 230 __pgprot(flags)));
@@ -181,6 +252,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
181 panic("map_io_page: could not insert mapping"); 252 panic("map_io_page: could not insert mapping");
182 } 253 }
183 } 254 }
255 return 0;
184} 256}
185 257
186 258
@@ -194,9 +266,14 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
194 flags |= pgprot_val(PAGE_KERNEL); 266 flags |= pgprot_val(PAGE_KERNEL);
195 267
196 for (i = 0; i < size; i += PAGE_SIZE) 268 for (i = 0; i < size; i += PAGE_SIZE)
197 map_io_page(ea+i, pa+i, flags); 269 if (map_io_page(ea+i, pa+i, flags))
270 goto failure;
198 271
199 return (void __iomem *) (ea + (addr & ~PAGE_MASK)); 272 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
273 failure:
274 if (mem_init_done)
275 unmap_im_area(ea, ea + size);
276 return NULL;
200} 277}
201 278
202 279
@@ -206,10 +283,11 @@ ioremap(unsigned long addr, unsigned long size)
206 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); 283 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
207} 284}
208 285
209void __iomem * 286void __iomem * __ioremap(unsigned long addr, unsigned long size,
210__ioremap(unsigned long addr, unsigned long size, unsigned long flags) 287 unsigned long flags)
211{ 288{
212 unsigned long pa, ea; 289 unsigned long pa, ea;
290 void __iomem *ret;
213 291
214 /* 292 /*
215 * Choose an address to map it to. 293 * Choose an address to map it to.
@@ -232,12 +310,16 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
232 if (area == NULL) 310 if (area == NULL)
233 return NULL; 311 return NULL;
234 ea = (unsigned long)(area->addr); 312 ea = (unsigned long)(area->addr);
313 ret = __ioremap_com(addr, pa, ea, size, flags);
314 if (!ret)
315 im_free(area->addr);
235 } else { 316 } else {
236 ea = ioremap_bot; 317 ea = ioremap_bot;
237 ioremap_bot += size; 318 ret = __ioremap_com(addr, pa, ea, size, flags);
319 if (ret)
320 ioremap_bot += size;
238 } 321 }
239 322 return ret;
240 return __ioremap_com(addr, pa, ea, size, flags);
241} 323}
242 324
243#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) 325#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
@@ -246,6 +328,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
246 unsigned long size, unsigned long flags) 328 unsigned long size, unsigned long flags)
247{ 329{
248 struct vm_struct *area; 330 struct vm_struct *area;
331 void __iomem *ret;
249 332
250 /* For now, require page-aligned values for pa, ea, and size */ 333 /* For now, require page-aligned values for pa, ea, and size */
251 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || 334 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
@@ -276,7 +359,12 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
276 } 359 }
277 } 360 }
278 361
279 if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) { 362 ret = __ioremap_com(pa, pa, ea, size, flags);
363 if (ret == NULL) {
364 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
365 return 1;
366 }
367 if (ret != (void *) ea) {
280 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); 368 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
281 return 1; 369 return 1;
282 } 370 }
@@ -284,69 +372,6 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
284 return 0; 372 return 0;
285} 373}
286 374
287static void unmap_im_area_pte(pmd_t *pmd, unsigned long address,
288 unsigned long size)
289{
290 unsigned long base, end;
291 pte_t *pte;
292
293 if (pmd_none(*pmd))
294 return;
295 if (pmd_bad(*pmd)) {
296 pmd_ERROR(*pmd);
297 pmd_clear(pmd);
298 return;
299 }
300
301 pte = pte_offset_kernel(pmd, address);
302 base = address & PMD_MASK;
303 address &= ~PMD_MASK;
304 end = address + size;
305 if (end > PMD_SIZE)
306 end = PMD_SIZE;
307
308 do {
309 pte_t page;
310 page = ptep_get_and_clear(&ioremap_mm, base + address, pte);
311 address += PAGE_SIZE;
312 pte++;
313 if (pte_none(page))
314 continue;
315 if (pte_present(page))
316 continue;
317 printk(KERN_CRIT "Whee.. Swapped out page in kernel page"
318 " table\n");
319 } while (address < end);
320}
321
322static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
323 unsigned long size)
324{
325 unsigned long base, end;
326 pmd_t *pmd;
327
328 if (pgd_none(*dir))
329 return;
330 if (pgd_bad(*dir)) {
331 pgd_ERROR(*dir);
332 pgd_clear(dir);
333 return;
334 }
335
336 pmd = pmd_offset(dir, address);
337 base = address & PGDIR_MASK;
338 address &= ~PGDIR_MASK;
339 end = address + size;
340 if (end > PGDIR_SIZE)
341 end = PGDIR_SIZE;
342
343 do {
344 unmap_im_area_pte(pmd, base + address, end - address);
345 address = (address + PMD_SIZE) & PMD_MASK;
346 pmd++;
347 } while (address < end);
348}
349
350/* 375/*
351 * Unmap an IO region and remove it from imalloc'd list. 376 * Unmap an IO region and remove it from imalloc'd list.
352 * Access to IO memory should be serialized by driver. 377 * Access to IO memory should be serialized by driver.
@@ -356,39 +381,19 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
356 */ 381 */
357void iounmap(volatile void __iomem *token) 382void iounmap(volatile void __iomem *token)
358{ 383{
359 unsigned long address, start, end, size; 384 unsigned long address, size;
360 struct mm_struct *mm;
361 pgd_t *dir;
362 void *addr; 385 void *addr;
363 386
364 if (!mem_init_done) { 387 if (!mem_init_done)
365 return; 388 return;
366 }
367 389
368 addr = (void *) ((unsigned long __force) token & PAGE_MASK); 390 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
369 391
370 if ((size = im_free(addr)) == 0) { 392 if ((size = im_free(addr)) == 0)
371 return; 393 return;
372 }
373 394
374 address = (unsigned long)addr; 395 address = (unsigned long)addr;
375 start = address; 396 unmap_im_area(address, address + size);
376 end = address + size;
377
378 mm = &ioremap_mm;
379 spin_lock(&mm->page_table_lock);
380
381 dir = pgd_offset_i(address);
382 flush_cache_vunmap(address, end);
383 do {
384 unmap_im_area_pmd(dir, address, end - address);
385 address = (address + PGDIR_SIZE) & PGDIR_MASK;
386 dir++;
387 } while (address && (address < end));
388 flush_tlb_kernel_range(start, end);
389
390 spin_unlock(&mm->page_table_lock);
391 return;
392} 397}
393 398
394static int iounmap_subset_regions(unsigned long addr, unsigned long size) 399static int iounmap_subset_regions(unsigned long addr, unsigned long size)
@@ -664,7 +669,7 @@ void __init paging_init(void)
664 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; 669 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
665 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; 670 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
666 671
667 free_area_init_node(0, &contig_page_data, zones_size, 672 free_area_init_node(0, NODE_DATA(0), zones_size,
668 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); 673 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
669} 674}
670#endif /* CONFIG_DISCONTIGMEM */ 675#endif /* CONFIG_DISCONTIGMEM */
diff --git a/arch/ppc64/mm/slb.c b/arch/ppc64/mm/slb.c
index 6a20773f695d..244150a0bc18 100644
--- a/arch/ppc64/mm/slb.c
+++ b/arch/ppc64/mm/slb.c
@@ -33,8 +33,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
33 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 33 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
34} 34}
35 35
36static inline void create_slbe(unsigned long ea, unsigned long vsid, 36static inline void create_slbe(unsigned long ea, unsigned long flags,
37 unsigned long flags, unsigned long entry) 37 unsigned long entry)
38{ 38{
39 asm volatile("slbmte %0,%1" : 39 asm volatile("slbmte %0,%1" :
40 : "r" (mk_vsid_data(ea, flags)), 40 : "r" (mk_vsid_data(ea, flags)),
@@ -145,9 +145,8 @@ void slb_initialize(void)
145 asm volatile("isync":::"memory"); 145 asm volatile("isync":::"memory");
146 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 146 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
147 asm volatile("isync; slbia; isync":::"memory"); 147 asm volatile("isync; slbia; isync":::"memory");
148 create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE), flags, 0); 148 create_slbe(KERNELBASE, flags, 0);
149 create_slbe(VMALLOCBASE, get_kernel_vsid(KERNELBASE), 149 create_slbe(VMALLOCBASE, SLB_VSID_KERNEL, 1);
150 SLB_VSID_KERNEL, 1);
151 /* We don't bolt the stack for the time being - we're in boot, 150 /* We don't bolt the stack for the time being - we're in boot,
152 * so the stack is in the bolted segment. By the time it goes 151 * so the stack is in the bolted segment. By the time it goes
153 * elsewhere, we'll call _switch() which will bolt in the new 152 * elsewhere, we'll call _switch() which will bolt in the new
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index 31491131d5e4..df4bbe14153c 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -19,6 +19,11 @@
19#include <asm/paca.h> 19#include <asm/paca.h>
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21 21
22struct stab_entry {
23 unsigned long esid_data;
24 unsigned long vsid_data;
25};
26
22/* Both the segment table and SLB code uses the following cache */ 27/* Both the segment table and SLB code uses the following cache */
23#define NR_STAB_CACHE_ENTRIES 8 28#define NR_STAB_CACHE_ENTRIES 8
24DEFINE_PER_CPU(long, stab_cache_ptr); 29DEFINE_PER_CPU(long, stab_cache_ptr);