diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 99 |
1 files changed, 71 insertions, 28 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7ef0903058ee..3231bf332878 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1253,18 +1253,22 @@ EXPORT_SYMBOL_GPL(map_vm_area); | |||
1253 | DEFINE_RWLOCK(vmlist_lock); | 1253 | DEFINE_RWLOCK(vmlist_lock); |
1254 | struct vm_struct *vmlist; | 1254 | struct vm_struct *vmlist; |
1255 | 1255 | ||
1256 | static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, | 1256 | static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, |
1257 | unsigned long flags, void *caller) | 1257 | unsigned long flags, void *caller) |
1258 | { | 1258 | { |
1259 | struct vm_struct *tmp, **p; | ||
1260 | |||
1261 | vm->flags = flags; | 1259 | vm->flags = flags; |
1262 | vm->addr = (void *)va->va_start; | 1260 | vm->addr = (void *)va->va_start; |
1263 | vm->size = va->va_end - va->va_start; | 1261 | vm->size = va->va_end - va->va_start; |
1264 | vm->caller = caller; | 1262 | vm->caller = caller; |
1265 | va->private = vm; | 1263 | va->private = vm; |
1266 | va->flags |= VM_VM_AREA; | 1264 | va->flags |= VM_VM_AREA; |
1265 | } | ||
1267 | 1266 | ||
1267 | static void insert_vmalloc_vmlist(struct vm_struct *vm) | ||
1268 | { | ||
1269 | struct vm_struct *tmp, **p; | ||
1270 | |||
1271 | vm->flags &= ~VM_UNLIST; | ||
1268 | write_lock(&vmlist_lock); | 1272 | write_lock(&vmlist_lock); |
1269 | for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { | 1273 | for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { |
1270 | if (tmp->addr >= vm->addr) | 1274 | if (tmp->addr >= vm->addr) |
@@ -1275,6 +1279,13 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, | |||
1275 | write_unlock(&vmlist_lock); | 1279 | write_unlock(&vmlist_lock); |
1276 | } | 1280 | } |
1277 | 1281 | ||
1282 | static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, | ||
1283 | unsigned long flags, void *caller) | ||
1284 | { | ||
1285 | setup_vmalloc_vm(vm, va, flags, caller); | ||
1286 | insert_vmalloc_vmlist(vm); | ||
1287 | } | ||
1288 | |||
1278 | static struct vm_struct *__get_vm_area_node(unsigned long size, | 1289 | static struct vm_struct *__get_vm_area_node(unsigned long size, |
1279 | unsigned long align, unsigned long flags, unsigned long start, | 1290 | unsigned long align, unsigned long flags, unsigned long start, |
1280 | unsigned long end, int node, gfp_t gfp_mask, void *caller) | 1291 | unsigned long end, int node, gfp_t gfp_mask, void *caller) |
@@ -1313,7 +1324,18 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, | |||
1313 | return NULL; | 1324 | return NULL; |
1314 | } | 1325 | } |
1315 | 1326 | ||
1316 | insert_vmalloc_vm(area, va, flags, caller); | 1327 | /* |
1328 | * When this function is called from __vmalloc_node_range, | ||
1329 | * we do not add vm_struct to vmlist here to avoid | ||
1330 | * accessing uninitialized members of vm_struct such as | ||
1331 | * pages and nr_pages fields. They will be set later. | ||
1332 | * To distinguish it from others, we use a VM_UNLIST flag. | ||
1333 | */ | ||
1334 | if (flags & VM_UNLIST) | ||
1335 | setup_vmalloc_vm(area, va, flags, caller); | ||
1336 | else | ||
1337 | insert_vmalloc_vm(area, va, flags, caller); | ||
1338 | |||
1317 | return area; | 1339 | return area; |
1318 | } | 1340 | } |
1319 | 1341 | ||
@@ -1381,17 +1403,20 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1381 | va = find_vmap_area((unsigned long)addr); | 1403 | va = find_vmap_area((unsigned long)addr); |
1382 | if (va && va->flags & VM_VM_AREA) { | 1404 | if (va && va->flags & VM_VM_AREA) { |
1383 | struct vm_struct *vm = va->private; | 1405 | struct vm_struct *vm = va->private; |
1384 | struct vm_struct *tmp, **p; | 1406 | |
1385 | /* | 1407 | if (!(vm->flags & VM_UNLIST)) { |
1386 | * remove from list and disallow access to this vm_struct | 1408 | struct vm_struct *tmp, **p; |
1387 | * before unmap. (address range confliction is maintained by | 1409 | /* |
1388 | * vmap.) | 1410 | * remove from list and disallow access to |
1389 | */ | 1411 | * this vm_struct before unmap. (address range |
1390 | write_lock(&vmlist_lock); | 1412 | * confliction is maintained by vmap.) |
1391 | for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) | 1413 | */ |
1392 | ; | 1414 | write_lock(&vmlist_lock); |
1393 | *p = tmp->next; | 1415 | for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next) |
1394 | write_unlock(&vmlist_lock); | 1416 | ; |
1417 | *p = tmp->next; | ||
1418 | write_unlock(&vmlist_lock); | ||
1419 | } | ||
1395 | 1420 | ||
1396 | vmap_debug_free_range(va->va_start, va->va_end); | 1421 | vmap_debug_free_range(va->va_start, va->va_end); |
1397 | free_unmap_vmap_area(va); | 1422 | free_unmap_vmap_area(va); |
@@ -1568,8 +1593,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
1568 | return area->addr; | 1593 | return area->addr; |
1569 | 1594 | ||
1570 | fail: | 1595 | fail: |
1571 | warn_alloc_failed(gfp_mask, order, "vmalloc: allocation failure, " | 1596 | warn_alloc_failed(gfp_mask, order, |
1572 | "allocated %ld of %ld bytes\n", | 1597 | "vmalloc: allocation failure, allocated %ld of %ld bytes\n", |
1573 | (area->nr_pages*PAGE_SIZE), area->size); | 1598 | (area->nr_pages*PAGE_SIZE), area->size); |
1574 | vfree(area->addr); | 1599 | vfree(area->addr); |
1575 | return NULL; | 1600 | return NULL; |
@@ -1600,17 +1625,22 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, | |||
1600 | 1625 | ||
1601 | size = PAGE_ALIGN(size); | 1626 | size = PAGE_ALIGN(size); |
1602 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) | 1627 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
1603 | return NULL; | 1628 | goto fail; |
1604 | |||
1605 | area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node, | ||
1606 | gfp_mask, caller); | ||
1607 | 1629 | ||
1630 | area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST, | ||
1631 | start, end, node, gfp_mask, caller); | ||
1608 | if (!area) | 1632 | if (!area) |
1609 | return NULL; | 1633 | goto fail; |
1610 | 1634 | ||
1611 | addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); | 1635 | addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); |
1612 | 1636 | ||
1613 | /* | 1637 | /* |
1638 | * In this function, newly allocated vm_struct is not added | ||
1639 | * to vmlist at __get_vm_area_node(). so, it is added here. | ||
1640 | */ | ||
1641 | insert_vmalloc_vmlist(area); | ||
1642 | |||
1643 | /* | ||
1614 | * A ref_count = 3 is needed because the vm_struct and vmap_area | 1644 | * A ref_count = 3 is needed because the vm_struct and vmap_area |
1615 | * structures allocated in the __get_vm_area_node() function contain | 1645 | * structures allocated in the __get_vm_area_node() function contain |
1616 | * references to the virtual address of the vmalloc'ed block. | 1646 | * references to the virtual address of the vmalloc'ed block. |
@@ -1618,6 +1648,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align, | |||
1618 | kmemleak_alloc(addr, real_size, 3, gfp_mask); | 1648 | kmemleak_alloc(addr, real_size, 3, gfp_mask); |
1619 | 1649 | ||
1620 | return addr; | 1650 | return addr; |
1651 | |||
1652 | fail: | ||
1653 | warn_alloc_failed(gfp_mask, 0, | ||
1654 | "vmalloc: allocation failure: %lu bytes\n", | ||
1655 | real_size); | ||
1656 | return NULL; | ||
1621 | } | 1657 | } |
1622 | 1658 | ||
1623 | /** | 1659 | /** |
@@ -2105,23 +2141,30 @@ void __attribute__((weak)) vmalloc_sync_all(void) | |||
2105 | 2141 | ||
2106 | static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) | 2142 | static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) |
2107 | { | 2143 | { |
2108 | /* apply_to_page_range() does all the hard work. */ | 2144 | pte_t ***p = data; |
2145 | |||
2146 | if (p) { | ||
2147 | *(*p) = pte; | ||
2148 | (*p)++; | ||
2149 | } | ||
2109 | return 0; | 2150 | return 0; |
2110 | } | 2151 | } |
2111 | 2152 | ||
2112 | /** | 2153 | /** |
2113 | * alloc_vm_area - allocate a range of kernel address space | 2154 | * alloc_vm_area - allocate a range of kernel address space |
2114 | * @size: size of the area | 2155 | * @size: size of the area |
2156 | * @ptes: returns the PTEs for the address space | ||
2115 | * | 2157 | * |
2116 | * Returns: NULL on failure, vm_struct on success | 2158 | * Returns: NULL on failure, vm_struct on success |
2117 | * | 2159 | * |
2118 | * This function reserves a range of kernel address space, and | 2160 | * This function reserves a range of kernel address space, and |
2119 | * allocates pagetables to map that range. No actual mappings | 2161 | * allocates pagetables to map that range. No actual mappings |
2120 | * are created. If the kernel address space is not shared | 2162 | * are created. |
2121 | * between processes, it syncs the pagetable across all | 2163 | * |
2122 | * processes. | 2164 | * If @ptes is non-NULL, pointers to the PTEs (in init_mm) |
2165 | * allocated for the VM area are returned. | ||
2123 | */ | 2166 | */ |
2124 | struct vm_struct *alloc_vm_area(size_t size) | 2167 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) |
2125 | { | 2168 | { |
2126 | struct vm_struct *area; | 2169 | struct vm_struct *area; |
2127 | 2170 | ||
@@ -2135,7 +2178,7 @@ struct vm_struct *alloc_vm_area(size_t size) | |||
2135 | * of kernel virtual address space and mapped into init_mm. | 2178 | * of kernel virtual address space and mapped into init_mm. |
2136 | */ | 2179 | */ |
2137 | if (apply_to_page_range(&init_mm, (unsigned long)area->addr, | 2180 | if (apply_to_page_range(&init_mm, (unsigned long)area->addr, |
2138 | area->size, f, NULL)) { | 2181 | size, f, ptes ? &ptes : NULL)) { |
2139 | free_vm_area(area); | 2182 | free_vm_area(area); |
2140 | return NULL; | 2183 | return NULL; |
2141 | } | 2184 | } |