diff options
| author | Paul Mackerras <paulus@samba.org> | 2005-10-10 07:58:35 -0400 |
|---|---|---|
| committer | Paul Mackerras <paulus@samba.org> | 2005-10-10 07:58:35 -0400 |
| commit | ab1f9dac6eea25ee59e4c8e1cf0b7476afbbfe07 (patch) | |
| tree | 03577652197b5e58c348ede3c474bc8dd47e046c /arch/powerpc/mm/imalloc.c | |
| parent | 70d64ceaa1a84d2502405422a4dfd3f87786a347 (diff) | |
powerpc: Merge arch/ppc64/mm to arch/powerpc/mm
This moves the remaining files in arch/ppc64/mm to arch/powerpc/mm,
and arranges that we use them when compiling with ARCH=ppc64.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/imalloc.c')
| -rw-r--r-- | arch/powerpc/mm/imalloc.c | 317 |
1 files changed, 317 insertions, 0 deletions
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c new file mode 100644 index 000000000000..c65b87b92756 --- /dev/null +++ b/arch/powerpc/mm/imalloc.c | |||
| @@ -0,0 +1,317 @@ | |||
| 1 | /* | ||
| 2 | * c 2001 PPC 64 Team, IBM Corp | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU General Public License | ||
| 6 | * as published by the Free Software Foundation; either version | ||
| 7 | * 2 of the License, or (at your option) any later version. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/slab.h> | ||
| 11 | #include <linux/vmalloc.h> | ||
| 12 | |||
| 13 | #include <asm/uaccess.h> | ||
| 14 | #include <asm/pgalloc.h> | ||
| 15 | #include <asm/pgtable.h> | ||
| 16 | #include <asm/semaphore.h> | ||
| 17 | #include <asm/imalloc.h> | ||
| 18 | #include <asm/cacheflush.h> | ||
| 19 | |||
| 20 | static DECLARE_MUTEX(imlist_sem); | ||
| 21 | struct vm_struct * imlist = NULL; | ||
| 22 | |||
| 23 | static int get_free_im_addr(unsigned long size, unsigned long *im_addr) | ||
| 24 | { | ||
| 25 | unsigned long addr; | ||
| 26 | struct vm_struct **p, *tmp; | ||
| 27 | |||
| 28 | addr = ioremap_bot; | ||
| 29 | for (p = &imlist; (tmp = *p) ; p = &tmp->next) { | ||
| 30 | if (size + addr < (unsigned long) tmp->addr) | ||
| 31 | break; | ||
| 32 | if ((unsigned long)tmp->addr >= ioremap_bot) | ||
| 33 | addr = tmp->size + (unsigned long) tmp->addr; | ||
| 34 | if (addr >= IMALLOC_END-size) | ||
| 35 | return 1; | ||
| 36 | } | ||
| 37 | *im_addr = addr; | ||
| 38 | |||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | |||
| 42 | /* Return whether the region described by v_addr and size is a subset | ||
| 43 | * of the region described by parent | ||
| 44 | */ | ||
| 45 | static inline int im_region_is_subset(unsigned long v_addr, unsigned long size, | ||
| 46 | struct vm_struct *parent) | ||
| 47 | { | ||
| 48 | return (int) (v_addr >= (unsigned long) parent->addr && | ||
| 49 | v_addr < (unsigned long) parent->addr + parent->size && | ||
| 50 | size < parent->size); | ||
| 51 | } | ||
| 52 | |||
| 53 | /* Return whether the region described by v_addr and size is a superset | ||
| 54 | * of the region described by child | ||
| 55 | */ | ||
| 56 | static int im_region_is_superset(unsigned long v_addr, unsigned long size, | ||
| 57 | struct vm_struct *child) | ||
| 58 | { | ||
| 59 | struct vm_struct parent; | ||
| 60 | |||
| 61 | parent.addr = (void *) v_addr; | ||
| 62 | parent.size = size; | ||
| 63 | |||
| 64 | return im_region_is_subset((unsigned long) child->addr, child->size, | ||
| 65 | &parent); | ||
| 66 | } | ||
| 67 | |||
| 68 | /* Return whether the region described by v_addr and size overlaps | ||
| 69 | * the region described by vm. Overlapping regions meet the | ||
| 70 | * following conditions: | ||
| 71 | * 1) The regions share some part of the address space | ||
| 72 | * 2) The regions aren't identical | ||
| 73 | * 3) Neither region is a subset of the other | ||
| 74 | */ | ||
| 75 | static int im_region_overlaps(unsigned long v_addr, unsigned long size, | ||
| 76 | struct vm_struct *vm) | ||
| 77 | { | ||
| 78 | if (im_region_is_superset(v_addr, size, vm)) | ||
| 79 | return 0; | ||
| 80 | |||
| 81 | return (v_addr + size > (unsigned long) vm->addr + vm->size && | ||
| 82 | v_addr < (unsigned long) vm->addr + vm->size) || | ||
| 83 | (v_addr < (unsigned long) vm->addr && | ||
| 84 | v_addr + size > (unsigned long) vm->addr); | ||
| 85 | } | ||
| 86 | |||
| 87 | /* Determine imalloc status of region described by v_addr and size. | ||
| 88 | * Can return one of the following: | ||
| 89 | * IM_REGION_UNUSED - Entire region is unallocated in imalloc space. | ||
| 90 | * IM_REGION_SUBSET - Region is a subset of a region that is already | ||
| 91 | * allocated in imalloc space. | ||
| 92 | * vm will be assigned to a ptr to the parent region. | ||
| 93 | * IM_REGION_EXISTS - Exact region already allocated in imalloc space. | ||
| 94 | * vm will be assigned to a ptr to the existing imlist | ||
| 95 | * member. | ||
| 96 | * IM_REGION_OVERLAPS - Region overlaps an allocated region in imalloc space. | ||
| 97 | * IM_REGION_SUPERSET - Region is a superset of a region that is already | ||
| 98 | * allocated in imalloc space. | ||
| 99 | */ | ||
| 100 | static int im_region_status(unsigned long v_addr, unsigned long size, | ||
| 101 | struct vm_struct **vm) | ||
| 102 | { | ||
| 103 | struct vm_struct *tmp; | ||
| 104 | |||
| 105 | for (tmp = imlist; tmp; tmp = tmp->next) | ||
| 106 | if (v_addr < (unsigned long) tmp->addr + tmp->size) | ||
| 107 | break; | ||
| 108 | |||
| 109 | if (tmp) { | ||
| 110 | if (im_region_overlaps(v_addr, size, tmp)) | ||
| 111 | return IM_REGION_OVERLAP; | ||
| 112 | |||
| 113 | *vm = tmp; | ||
| 114 | if (im_region_is_subset(v_addr, size, tmp)) { | ||
| 115 | /* Return with tmp pointing to superset */ | ||
| 116 | return IM_REGION_SUBSET; | ||
| 117 | } | ||
| 118 | if (im_region_is_superset(v_addr, size, tmp)) { | ||
| 119 | /* Return with tmp pointing to first subset */ | ||
| 120 | return IM_REGION_SUPERSET; | ||
| 121 | } | ||
| 122 | else if (v_addr == (unsigned long) tmp->addr && | ||
| 123 | size == tmp->size) { | ||
| 124 | /* Return with tmp pointing to exact region */ | ||
| 125 | return IM_REGION_EXISTS; | ||
| 126 | } | ||
| 127 | } | ||
| 128 | |||
| 129 | *vm = NULL; | ||
| 130 | return IM_REGION_UNUSED; | ||
| 131 | } | ||
| 132 | |||
| 133 | static struct vm_struct * split_im_region(unsigned long v_addr, | ||
| 134 | unsigned long size, struct vm_struct *parent) | ||
| 135 | { | ||
| 136 | struct vm_struct *vm1 = NULL; | ||
| 137 | struct vm_struct *vm2 = NULL; | ||
| 138 | struct vm_struct *new_vm = NULL; | ||
| 139 | |||
| 140 | vm1 = (struct vm_struct *) kmalloc(sizeof(*vm1), GFP_KERNEL); | ||
| 141 | if (vm1 == NULL) { | ||
| 142 | printk(KERN_ERR "%s() out of memory\n", __FUNCTION__); | ||
| 143 | return NULL; | ||
| 144 | } | ||
| 145 | |||
| 146 | if (v_addr == (unsigned long) parent->addr) { | ||
| 147 | /* Use existing parent vm_struct to represent child, allocate | ||
| 148 | * new one for the remainder of parent range | ||
| 149 | */ | ||
| 150 | vm1->size = parent->size - size; | ||
| 151 | vm1->addr = (void *) (v_addr + size); | ||
| 152 | vm1->next = parent->next; | ||
| 153 | |||
| 154 | parent->size = size; | ||
| 155 | parent->next = vm1; | ||
| 156 | new_vm = parent; | ||
| 157 | } else if (v_addr + size == (unsigned long) parent->addr + | ||
| 158 | parent->size) { | ||
| 159 | /* Allocate new vm_struct to represent child, use existing | ||
| 160 | * parent one for remainder of parent range | ||
| 161 | */ | ||
| 162 | vm1->size = size; | ||
| 163 | vm1->addr = (void *) v_addr; | ||
| 164 | vm1->next = parent->next; | ||
| 165 | new_vm = vm1; | ||
| 166 | |||
| 167 | parent->size -= size; | ||
| 168 | parent->next = vm1; | ||
| 169 | } else { | ||
| 170 | /* Allocate two new vm_structs for the new child and | ||
| 171 | * uppermost remainder, and use existing parent one for the | ||
| 172 | * lower remainder of parent range | ||
| 173 | */ | ||
| 174 | vm2 = (struct vm_struct *) kmalloc(sizeof(*vm2), GFP_KERNEL); | ||
| 175 | if (vm2 == NULL) { | ||
| 176 | printk(KERN_ERR "%s() out of memory\n", __FUNCTION__); | ||
| 177 | kfree(vm1); | ||
| 178 | return NULL; | ||
| 179 | } | ||
| 180 | |||
| 181 | vm1->size = size; | ||
| 182 | vm1->addr = (void *) v_addr; | ||
| 183 | vm1->next = vm2; | ||
| 184 | new_vm = vm1; | ||
| 185 | |||
| 186 | vm2->size = ((unsigned long) parent->addr + parent->size) - | ||
| 187 | (v_addr + size); | ||
| 188 | vm2->addr = (void *) v_addr + size; | ||
| 189 | vm2->next = parent->next; | ||
| 190 | |||
| 191 | parent->size = v_addr - (unsigned long) parent->addr; | ||
| 192 | parent->next = vm1; | ||
| 193 | } | ||
| 194 | |||
| 195 | return new_vm; | ||
| 196 | } | ||
| 197 | |||
| 198 | static struct vm_struct * __add_new_im_area(unsigned long req_addr, | ||
| 199 | unsigned long size) | ||
| 200 | { | ||
| 201 | struct vm_struct **p, *tmp, *area; | ||
| 202 | |||
| 203 | for (p = &imlist; (tmp = *p) ; p = &tmp->next) { | ||
| 204 | if (req_addr + size <= (unsigned long)tmp->addr) | ||
| 205 | break; | ||
| 206 | } | ||
| 207 | |||
| 208 | area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL); | ||
| 209 | if (!area) | ||
| 210 | return NULL; | ||
| 211 | area->flags = 0; | ||
| 212 | area->addr = (void *)req_addr; | ||
| 213 | area->size = size; | ||
| 214 | area->next = *p; | ||
| 215 | *p = area; | ||
| 216 | |||
| 217 | return area; | ||
| 218 | } | ||
| 219 | |||
| 220 | static struct vm_struct * __im_get_area(unsigned long req_addr, | ||
| 221 | unsigned long size, | ||
| 222 | int criteria) | ||
| 223 | { | ||
| 224 | struct vm_struct *tmp; | ||
| 225 | int status; | ||
| 226 | |||
| 227 | status = im_region_status(req_addr, size, &tmp); | ||
| 228 | if ((criteria & status) == 0) { | ||
| 229 | return NULL; | ||
| 230 | } | ||
| 231 | |||
| 232 | switch (status) { | ||
| 233 | case IM_REGION_UNUSED: | ||
| 234 | tmp = __add_new_im_area(req_addr, size); | ||
| 235 | break; | ||
| 236 | case IM_REGION_SUBSET: | ||
| 237 | tmp = split_im_region(req_addr, size, tmp); | ||
| 238 | break; | ||
| 239 | case IM_REGION_EXISTS: | ||
| 240 | /* Return requested region */ | ||
| 241 | break; | ||
| 242 | case IM_REGION_SUPERSET: | ||
| 243 | /* Return first existing subset of requested region */ | ||
| 244 | break; | ||
| 245 | default: | ||
| 246 | printk(KERN_ERR "%s() unexpected imalloc region status\n", | ||
| 247 | __FUNCTION__); | ||
| 248 | tmp = NULL; | ||
| 249 | } | ||
| 250 | |||
| 251 | return tmp; | ||
| 252 | } | ||
| 253 | |||
| 254 | struct vm_struct * im_get_free_area(unsigned long size) | ||
| 255 | { | ||
| 256 | struct vm_struct *area; | ||
| 257 | unsigned long addr; | ||
| 258 | |||
| 259 | down(&imlist_sem); | ||
| 260 | if (get_free_im_addr(size, &addr)) { | ||
| 261 | printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n", | ||
| 262 | __FUNCTION__, size); | ||
| 263 | area = NULL; | ||
| 264 | goto next_im_done; | ||
| 265 | } | ||
| 266 | |||
| 267 | area = __im_get_area(addr, size, IM_REGION_UNUSED); | ||
| 268 | if (area == NULL) { | ||
| 269 | printk(KERN_ERR | ||
| 270 | "%s() cannot obtain area for addr 0x%lx size 0x%lx\n", | ||
| 271 | __FUNCTION__, addr, size); | ||
| 272 | } | ||
| 273 | next_im_done: | ||
| 274 | up(&imlist_sem); | ||
| 275 | return area; | ||
| 276 | } | ||
| 277 | |||
| 278 | struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size, | ||
| 279 | int criteria) | ||
| 280 | { | ||
| 281 | struct vm_struct *area; | ||
| 282 | |||
| 283 | down(&imlist_sem); | ||
| 284 | area = __im_get_area(v_addr, size, criteria); | ||
| 285 | up(&imlist_sem); | ||
| 286 | return area; | ||
| 287 | } | ||
| 288 | |||
| 289 | void im_free(void * addr) | ||
| 290 | { | ||
| 291 | struct vm_struct **p, *tmp; | ||
| 292 | |||
| 293 | if (!addr) | ||
| 294 | return; | ||
| 295 | if ((unsigned long) addr & ~PAGE_MASK) { | ||
| 296 | printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr); | ||
| 297 | return; | ||
| 298 | } | ||
| 299 | down(&imlist_sem); | ||
| 300 | for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { | ||
| 301 | if (tmp->addr == addr) { | ||
| 302 | *p = tmp->next; | ||
| 303 | |||
| 304 | /* XXX: do we need the lock? */ | ||
| 305 | spin_lock(&init_mm.page_table_lock); | ||
| 306 | unmap_vm_area(tmp); | ||
| 307 | spin_unlock(&init_mm.page_table_lock); | ||
| 308 | |||
| 309 | kfree(tmp); | ||
| 310 | up(&imlist_sem); | ||
| 311 | return; | ||
| 312 | } | ||
| 313 | } | ||
| 314 | up(&imlist_sem); | ||
| 315 | printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__, | ||
| 316 | addr); | ||
| 317 | } | ||
