aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-06-21 20:15:31 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 21:46:26 -0400
commit20cee16ced631f70a62c97bdebae08a1c9470448 (patch)
treef980db4e2b19dfcd4413e6cba4f667eb1d8c79cd /arch/ppc64/mm
parent6879dc137ea4efad65cab8bf8a7c0b742bcf92cc (diff)
[PATCH] ppc64: Abolish ioremap_mm
Currently ppc64 has two mm_structs for the kernel, init_mm and also ioremap_mm. The latter really isn't necessary: this patch abolishes it, instead restricting vmallocs to the lower 1TB of the init_mm's range and placing io mappings in the upper 1TB. This simplifies the code in a number of places and eliminates an unecessary set of pagetables. It also tweaks the unmap/free path a little, allowing us to remove the unmap_im_area() set of page table walkers, replacing them with unmap_vm_area(). Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/mm')
-rw-r--r--arch/ppc64/mm/hash_utils.c4
-rw-r--r--arch/ppc64/mm/imalloc.c20
-rw-r--r--arch/ppc64/mm/init.c93
3 files changed, 21 insertions, 96 deletions
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index 0a0f97008d02..87d0525f56fa 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -310,10 +310,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
310 310
311 vsid = get_vsid(mm->context.id, ea); 311 vsid = get_vsid(mm->context.id, ea);
312 break; 312 break;
313 case IO_REGION_ID:
314 mm = &ioremap_mm;
315 vsid = get_kernel_vsid(ea);
316 break;
317 case VMALLOC_REGION_ID: 313 case VMALLOC_REGION_ID:
318 mm = &init_mm; 314 mm = &init_mm;
319 vsid = get_kernel_vsid(ea); 315 vsid = get_kernel_vsid(ea);
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index cb8727f3267a..b6e75b891ac0 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -15,6 +15,7 @@
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17#include <asm/imalloc.h> 17#include <asm/imalloc.h>
18#include <asm/cacheflush.h>
18 19
19static DECLARE_MUTEX(imlist_sem); 20static DECLARE_MUTEX(imlist_sem);
20struct vm_struct * imlist = NULL; 21struct vm_struct * imlist = NULL;
@@ -285,29 +286,32 @@ struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
285 return area; 286 return area;
286} 287}
287 288
288unsigned long im_free(void * addr) 289void im_free(void * addr)
289{ 290{
290 struct vm_struct **p, *tmp; 291 struct vm_struct **p, *tmp;
291 unsigned long ret_size = 0;
292 292
293 if (!addr) 293 if (!addr)
294 return ret_size; 294 return;
295 if ((PAGE_SIZE-1) & (unsigned long) addr) { 295 if ((unsigned long) addr & ~PAGE_MASK) {
296 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr); 296 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
297 return ret_size; 297 return;
298 } 298 }
299 down(&imlist_sem); 299 down(&imlist_sem);
300 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { 300 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
301 if (tmp->addr == addr) { 301 if (tmp->addr == addr) {
302 ret_size = tmp->size;
303 *p = tmp->next; 302 *p = tmp->next;
303
304 /* XXX: do we need the lock? */
305 spin_lock(&init_mm.page_table_lock);
306 unmap_vm_area(tmp);
307 spin_unlock(&init_mm.page_table_lock);
308
304 kfree(tmp); 309 kfree(tmp);
305 up(&imlist_sem); 310 up(&imlist_sem);
306 return ret_size; 311 return;
307 } 312 }
308 } 313 }
309 up(&imlist_sem); 314 up(&imlist_sem);
310 printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__, 315 printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
311 addr); 316 addr);
312 return ret_size;
313} 317}
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index 4b42aff74d73..6fa1e6490b57 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -73,9 +73,6 @@ static unsigned long phbs_io_bot = PHBS_IO_BASE;
73extern pgd_t swapper_pg_dir[]; 73extern pgd_t swapper_pg_dir[];
74extern struct task_struct *current_set[NR_CPUS]; 74extern struct task_struct *current_set[NR_CPUS];
75 75
76extern pgd_t ioremap_dir[];
77pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
78
79unsigned long klimit = (unsigned long)_end; 76unsigned long klimit = (unsigned long)_end;
80 77
81unsigned long _SDR1=0; 78unsigned long _SDR1=0;
@@ -137,69 +134,6 @@ void iounmap(volatile void __iomem *addr)
137 134
138#else 135#else
139 136
140static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
141 unsigned long end)
142{
143 pte_t *pte;
144
145 pte = pte_offset_kernel(pmd, addr);
146 do {
147 pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
148 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
149 } while (pte++, addr += PAGE_SIZE, addr != end);
150}
151
152static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
153 unsigned long end)
154{
155 pmd_t *pmd;
156 unsigned long next;
157
158 pmd = pmd_offset(pud, addr);
159 do {
160 next = pmd_addr_end(addr, end);
161 if (pmd_none_or_clear_bad(pmd))
162 continue;
163 unmap_im_area_pte(pmd, addr, next);
164 } while (pmd++, addr = next, addr != end);
165}
166
167static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
168 unsigned long end)
169{
170 pud_t *pud;
171 unsigned long next;
172
173 pud = pud_offset(pgd, addr);
174 do {
175 next = pud_addr_end(addr, end);
176 if (pud_none_or_clear_bad(pud))
177 continue;
178 unmap_im_area_pmd(pud, addr, next);
179 } while (pud++, addr = next, addr != end);
180}
181
182static void unmap_im_area(unsigned long addr, unsigned long end)
183{
184 struct mm_struct *mm = &ioremap_mm;
185 unsigned long next;
186 pgd_t *pgd;
187
188 spin_lock(&mm->page_table_lock);
189
190 pgd = pgd_offset_i(addr);
191 flush_cache_vunmap(addr, end);
192 do {
193 next = pgd_addr_end(addr, end);
194 if (pgd_none_or_clear_bad(pgd))
195 continue;
196 unmap_im_area_pud(pgd, addr, next);
197 } while (pgd++, addr = next, addr != end);
198 flush_tlb_kernel_range(start, end);
199
200 spin_unlock(&mm->page_table_lock);
201}
202
203/* 137/*
204 * map_io_page currently only called by __ioremap 138 * map_io_page currently only called by __ioremap
205 * map_io_page adds an entry to the ioremap page table 139 * map_io_page adds an entry to the ioremap page table
@@ -214,21 +148,21 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
214 unsigned long vsid; 148 unsigned long vsid;
215 149
216 if (mem_init_done) { 150 if (mem_init_done) {
217 spin_lock(&ioremap_mm.page_table_lock); 151 spin_lock(&init_mm.page_table_lock);
218 pgdp = pgd_offset_i(ea); 152 pgdp = pgd_offset_k(ea);
219 pudp = pud_alloc(&ioremap_mm, pgdp, ea); 153 pudp = pud_alloc(&init_mm, pgdp, ea);
220 if (!pudp) 154 if (!pudp)
221 return -ENOMEM; 155 return -ENOMEM;
222 pmdp = pmd_alloc(&ioremap_mm, pudp, ea); 156 pmdp = pmd_alloc(&init_mm, pudp, ea);
223 if (!pmdp) 157 if (!pmdp)
224 return -ENOMEM; 158 return -ENOMEM;
225 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); 159 ptep = pte_alloc_kernel(&init_mm, pmdp, ea);
226 if (!ptep) 160 if (!ptep)
227 return -ENOMEM; 161 return -ENOMEM;
228 pa = abs_to_phys(pa); 162 pa = abs_to_phys(pa);
229 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 163 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
230 __pgprot(flags))); 164 __pgprot(flags)));
231 spin_unlock(&ioremap_mm.page_table_lock); 165 spin_unlock(&init_mm.page_table_lock);
232 } else { 166 } else {
233 unsigned long va, vpn, hash, hpteg; 167 unsigned long va, vpn, hash, hpteg;
234 168
@@ -267,13 +201,9 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
267 201
268 for (i = 0; i < size; i += PAGE_SIZE) 202 for (i = 0; i < size; i += PAGE_SIZE)
269 if (map_io_page(ea+i, pa+i, flags)) 203 if (map_io_page(ea+i, pa+i, flags))
270 goto failure; 204 return NULL;
271 205
272 return (void __iomem *) (ea + (addr & ~PAGE_MASK)); 206 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
273 failure:
274 if (mem_init_done)
275 unmap_im_area(ea, ea + size);
276 return NULL;
277} 207}
278 208
279 209
@@ -381,19 +311,14 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
381 */ 311 */
382void iounmap(volatile void __iomem *token) 312void iounmap(volatile void __iomem *token)
383{ 313{
384 unsigned long address, size;
385 void *addr; 314 void *addr;
386 315
387 if (!mem_init_done) 316 if (!mem_init_done)
388 return; 317 return;
389 318
390 addr = (void *) ((unsigned long __force) token & PAGE_MASK); 319 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
391
392 if ((size = im_free(addr)) == 0)
393 return;
394 320
395 address = (unsigned long)addr; 321 im_free(addr);
396 unmap_im_area(address, address + size);
397} 322}
398 323
399static int iounmap_subset_regions(unsigned long addr, unsigned long size) 324static int iounmap_subset_regions(unsigned long addr, unsigned long size)