aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm/imalloc.c
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-06-21 20:15:31 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 21:46:26 -0400
commit20cee16ced631f70a62c97bdebae08a1c9470448 (patch)
treef980db4e2b19dfcd4413e6cba4f667eb1d8c79cd /arch/ppc64/mm/imalloc.c
parent6879dc137ea4efad65cab8bf8a7c0b742bcf92cc (diff)
[PATCH] ppc64: Abolish ioremap_mm
Currently ppc64 has two mm_structs for the kernel, init_mm and also ioremap_mm. The latter really isn't necessary: this patch abolishes it, instead restricting vmallocs to the lower 1TB of the init_mm's range and placing io mappings in the upper 1TB. This simplifies the code in a number of places and eliminates an unecessary set of pagetables. It also tweaks the unmap/free path a little, allowing us to remove the unmap_im_area() set of page table walkers, replacing them with unmap_vm_area(). Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/mm/imalloc.c')
-rw-r--r--arch/ppc64/mm/imalloc.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index cb8727f3267a..b6e75b891ac0 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -15,6 +15,7 @@
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17#include <asm/imalloc.h> 17#include <asm/imalloc.h>
18#include <asm/cacheflush.h>
18 19
19static DECLARE_MUTEX(imlist_sem); 20static DECLARE_MUTEX(imlist_sem);
20struct vm_struct * imlist = NULL; 21struct vm_struct * imlist = NULL;
@@ -285,29 +286,32 @@ struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
285 return area; 286 return area;
286} 287}
287 288
288unsigned long im_free(void * addr) 289void im_free(void * addr)
289{ 290{
290 struct vm_struct **p, *tmp; 291 struct vm_struct **p, *tmp;
291 unsigned long ret_size = 0;
292 292
293 if (!addr) 293 if (!addr)
294 return ret_size; 294 return;
295 if ((PAGE_SIZE-1) & (unsigned long) addr) { 295 if ((unsigned long) addr & ~PAGE_MASK) {
296 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr); 296 printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
297 return ret_size; 297 return;
298 } 298 }
299 down(&imlist_sem); 299 down(&imlist_sem);
300 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { 300 for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
301 if (tmp->addr == addr) { 301 if (tmp->addr == addr) {
302 ret_size = tmp->size;
303 *p = tmp->next; 302 *p = tmp->next;
303
304 /* XXX: do we need the lock? */
305 spin_lock(&init_mm.page_table_lock);
306 unmap_vm_area(tmp);
307 spin_unlock(&init_mm.page_table_lock);
308
304 kfree(tmp); 309 kfree(tmp);
305 up(&imlist_sem); 310 up(&imlist_sem);
306 return ret_size; 311 return;
307 } 312 }
308 } 313 }
309 up(&imlist_sem); 314 up(&imlist_sem);
310 printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__, 315 printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
311 addr); 316 addr);
312 return ret_size;
313} 317}