aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/mm/init.c')
-rw-r--r--arch/ppc64/mm/init.c198
1 files changed, 101 insertions, 97 deletions
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index a7149b9fc35c..cf33d7ec2e29 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -136,14 +136,78 @@ void iounmap(volatile void __iomem *addr)
136 136
137#else 137#else
138 138
139static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
140 unsigned long end)
141{
142 pte_t *pte;
143
144 pte = pte_offset_kernel(pmd, addr);
145 do {
146 pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
147 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
148 } while (pte++, addr += PAGE_SIZE, addr != end);
149}
150
151static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
152 unsigned long end)
153{
154 pmd_t *pmd;
155 unsigned long next;
156
157 pmd = pmd_offset(pud, addr);
158 do {
159 next = pmd_addr_end(addr, end);
160 if (pmd_none_or_clear_bad(pmd))
161 continue;
162 unmap_im_area_pte(pmd, addr, next);
163 } while (pmd++, addr = next, addr != end);
164}
165
166static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
167 unsigned long end)
168{
169 pud_t *pud;
170 unsigned long next;
171
172 pud = pud_offset(pgd, addr);
173 do {
174 next = pud_addr_end(addr, end);
175 if (pud_none_or_clear_bad(pud))
176 continue;
177 unmap_im_area_pmd(pud, addr, next);
178 } while (pud++, addr = next, addr != end);
179}
180
181static void unmap_im_area(unsigned long addr, unsigned long end)
182{
183 struct mm_struct *mm = &ioremap_mm;
184 unsigned long next;
185 pgd_t *pgd;
186
187 spin_lock(&mm->page_table_lock);
188
189 pgd = pgd_offset_i(addr);
190 flush_cache_vunmap(addr, end);
191 do {
192 next = pgd_addr_end(addr, end);
193 if (pgd_none_or_clear_bad(pgd))
194 continue;
195 unmap_im_area_pud(pgd, addr, next);
196 } while (pgd++, addr = next, addr != end);
197 flush_tlb_kernel_range(start, end);
198
199 spin_unlock(&mm->page_table_lock);
200}
201
139/* 202/*
140 * map_io_page currently only called by __ioremap 203 * map_io_page currently only called by __ioremap
141 * map_io_page adds an entry to the ioremap page table 204 * map_io_page adds an entry to the ioremap page table
142 * and adds an entry to the HPT, possibly bolting it 205 * and adds an entry to the HPT, possibly bolting it
143 */ 206 */
144static void map_io_page(unsigned long ea, unsigned long pa, int flags) 207static int map_io_page(unsigned long ea, unsigned long pa, int flags)
145{ 208{
146 pgd_t *pgdp; 209 pgd_t *pgdp;
210 pud_t *pudp;
147 pmd_t *pmdp; 211 pmd_t *pmdp;
148 pte_t *ptep; 212 pte_t *ptep;
149 unsigned long vsid; 213 unsigned long vsid;
@@ -151,9 +215,15 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
151 if (mem_init_done) { 215 if (mem_init_done) {
152 spin_lock(&ioremap_mm.page_table_lock); 216 spin_lock(&ioremap_mm.page_table_lock);
153 pgdp = pgd_offset_i(ea); 217 pgdp = pgd_offset_i(ea);
154 pmdp = pmd_alloc(&ioremap_mm, pgdp, ea); 218 pudp = pud_alloc(&ioremap_mm, pgdp, ea);
219 if (!pudp)
220 return -ENOMEM;
221 pmdp = pmd_alloc(&ioremap_mm, pudp, ea);
222 if (!pmdp)
223 return -ENOMEM;
155 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); 224 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
156 225 if (!ptep)
226 return -ENOMEM;
157 pa = abs_to_phys(pa); 227 pa = abs_to_phys(pa);
158 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 228 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
159 __pgprot(flags))); 229 __pgprot(flags)));
@@ -181,6 +251,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
181 panic("map_io_page: could not insert mapping"); 251 panic("map_io_page: could not insert mapping");
182 } 252 }
183 } 253 }
254 return 0;
184} 255}
185 256
186 257
@@ -194,9 +265,14 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
194 flags |= pgprot_val(PAGE_KERNEL); 265 flags |= pgprot_val(PAGE_KERNEL);
195 266
196 for (i = 0; i < size; i += PAGE_SIZE) 267 for (i = 0; i < size; i += PAGE_SIZE)
197 map_io_page(ea+i, pa+i, flags); 268 if (map_io_page(ea+i, pa+i, flags))
269 goto failure;
198 270
199 return (void __iomem *) (ea + (addr & ~PAGE_MASK)); 271 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
272 failure:
273 if (mem_init_done)
274 unmap_im_area(ea, ea + size);
275 return NULL;
200} 276}
201 277
202 278
@@ -206,10 +282,11 @@ ioremap(unsigned long addr, unsigned long size)
206 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); 282 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
207} 283}
208 284
209void __iomem * 285void __iomem * __ioremap(unsigned long addr, unsigned long size,
210__ioremap(unsigned long addr, unsigned long size, unsigned long flags) 286 unsigned long flags)
211{ 287{
212 unsigned long pa, ea; 288 unsigned long pa, ea;
289 void __iomem *ret;
213 290
214 /* 291 /*
215 * Choose an address to map it to. 292 * Choose an address to map it to.
@@ -232,12 +309,16 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
232 if (area == NULL) 309 if (area == NULL)
233 return NULL; 310 return NULL;
234 ea = (unsigned long)(area->addr); 311 ea = (unsigned long)(area->addr);
312 ret = __ioremap_com(addr, pa, ea, size, flags);
313 if (!ret)
314 im_free(area->addr);
235 } else { 315 } else {
236 ea = ioremap_bot; 316 ea = ioremap_bot;
237 ioremap_bot += size; 317 ret = __ioremap_com(addr, pa, ea, size, flags);
318 if (ret)
319 ioremap_bot += size;
238 } 320 }
239 321 return ret;
240 return __ioremap_com(addr, pa, ea, size, flags);
241} 322}
242 323
243#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) 324#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
@@ -246,6 +327,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
246 unsigned long size, unsigned long flags) 327 unsigned long size, unsigned long flags)
247{ 328{
248 struct vm_struct *area; 329 struct vm_struct *area;
330 void __iomem *ret;
249 331
250 /* For now, require page-aligned values for pa, ea, and size */ 332 /* For now, require page-aligned values for pa, ea, and size */
251 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || 333 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
@@ -276,7 +358,12 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
276 } 358 }
277 } 359 }
278 360
279 if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) { 361 ret = __ioremap_com(pa, pa, ea, size, flags);
362 if (ret == NULL) {
363 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
364 return 1;
365 }
366 if (ret != (void *) ea) {
280 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); 367 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
281 return 1; 368 return 1;
282 } 369 }
@@ -284,69 +371,6 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
284 return 0; 371 return 0;
285} 372}
286 373
287static void unmap_im_area_pte(pmd_t *pmd, unsigned long address,
288 unsigned long size)
289{
290 unsigned long base, end;
291 pte_t *pte;
292
293 if (pmd_none(*pmd))
294 return;
295 if (pmd_bad(*pmd)) {
296 pmd_ERROR(*pmd);
297 pmd_clear(pmd);
298 return;
299 }
300
301 pte = pte_offset_kernel(pmd, address);
302 base = address & PMD_MASK;
303 address &= ~PMD_MASK;
304 end = address + size;
305 if (end > PMD_SIZE)
306 end = PMD_SIZE;
307
308 do {
309 pte_t page;
310 page = ptep_get_and_clear(&ioremap_mm, base + address, pte);
311 address += PAGE_SIZE;
312 pte++;
313 if (pte_none(page))
314 continue;
315 if (pte_present(page))
316 continue;
317 printk(KERN_CRIT "Whee.. Swapped out page in kernel page"
318 " table\n");
319 } while (address < end);
320}
321
322static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
323 unsigned long size)
324{
325 unsigned long base, end;
326 pmd_t *pmd;
327
328 if (pgd_none(*dir))
329 return;
330 if (pgd_bad(*dir)) {
331 pgd_ERROR(*dir);
332 pgd_clear(dir);
333 return;
334 }
335
336 pmd = pmd_offset(dir, address);
337 base = address & PGDIR_MASK;
338 address &= ~PGDIR_MASK;
339 end = address + size;
340 if (end > PGDIR_SIZE)
341 end = PGDIR_SIZE;
342
343 do {
344 unmap_im_area_pte(pmd, base + address, end - address);
345 address = (address + PMD_SIZE) & PMD_MASK;
346 pmd++;
347 } while (address < end);
348}
349
350/* 374/*
351 * Unmap an IO region and remove it from imalloc'd list. 375 * Unmap an IO region and remove it from imalloc'd list.
352 * Access to IO memory should be serialized by driver. 376 * Access to IO memory should be serialized by driver.
@@ -356,39 +380,19 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
356 */ 380 */
357void iounmap(volatile void __iomem *token) 381void iounmap(volatile void __iomem *token)
358{ 382{
359 unsigned long address, start, end, size; 383 unsigned long address, size;
360 struct mm_struct *mm;
361 pgd_t *dir;
362 void *addr; 384 void *addr;
363 385
364 if (!mem_init_done) { 386 if (!mem_init_done)
365 return; 387 return;
366 }
367 388
368 addr = (void *) ((unsigned long __force) token & PAGE_MASK); 389 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
369 390
370 if ((size = im_free(addr)) == 0) { 391 if ((size = im_free(addr)) == 0)
371 return; 392 return;
372 }
373 393
374 address = (unsigned long)addr; 394 address = (unsigned long)addr;
375 start = address; 395 unmap_im_area(address, address + size);
376 end = address + size;
377
378 mm = &ioremap_mm;
379 spin_lock(&mm->page_table_lock);
380
381 dir = pgd_offset_i(address);
382 flush_cache_vunmap(address, end);
383 do {
384 unmap_im_area_pmd(dir, address, end - address);
385 address = (address + PGDIR_SIZE) & PGDIR_MASK;
386 dir++;
387 } while (address && (address < end));
388 flush_tlb_kernel_range(start, end);
389
390 spin_unlock(&mm->page_table_lock);
391 return;
392} 396}
393 397
394static int iounmap_subset_regions(unsigned long addr, unsigned long size) 398static int iounmap_subset_regions(unsigned long addr, unsigned long size)