diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 89 |
1 files changed, 41 insertions, 48 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index eb5cc7d00c5a..cac13b415635 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -748,7 +748,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) | |||
748 | va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, | 748 | va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, |
749 | VMALLOC_START, VMALLOC_END, | 749 | VMALLOC_START, VMALLOC_END, |
750 | node, gfp_mask); | 750 | node, gfp_mask); |
751 | if (unlikely(IS_ERR(va))) { | 751 | if (IS_ERR(va)) { |
752 | kfree(vb); | 752 | kfree(vb); |
753 | return ERR_CAST(va); | 753 | return ERR_CAST(va); |
754 | } | 754 | } |
@@ -1315,13 +1315,6 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1315 | -1, GFP_KERNEL, caller); | 1315 | -1, GFP_KERNEL, caller); |
1316 | } | 1316 | } |
1317 | 1317 | ||
1318 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, | ||
1319 | int node, gfp_t gfp_mask) | ||
1320 | { | ||
1321 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, | ||
1322 | node, gfp_mask, __builtin_return_address(0)); | ||
1323 | } | ||
1324 | |||
1325 | static struct vm_struct *find_vm_area(const void *addr) | 1318 | static struct vm_struct *find_vm_area(const void *addr) |
1326 | { | 1319 | { |
1327 | struct vmap_area *va; | 1320 | struct vmap_area *va; |
@@ -1537,25 +1530,12 @@ fail: | |||
1537 | return NULL; | 1530 | return NULL; |
1538 | } | 1531 | } |
1539 | 1532 | ||
1540 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | ||
1541 | { | ||
1542 | void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, | ||
1543 | __builtin_return_address(0)); | ||
1544 | |||
1545 | /* | ||
1546 | * A ref_count = 3 is needed because the vm_struct and vmap_area | ||
1547 | * structures allocated in the __get_vm_area_node() function contain | ||
1548 | * references to the virtual address of the vmalloc'ed block. | ||
1549 | */ | ||
1550 | kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); | ||
1551 | |||
1552 | return addr; | ||
1553 | } | ||
1554 | |||
1555 | /** | 1533 | /** |
1556 | * __vmalloc_node - allocate virtually contiguous memory | 1534 | * __vmalloc_node_range - allocate virtually contiguous memory |
1557 | * @size: allocation size | 1535 | * @size: allocation size |
1558 | * @align: desired alignment | 1536 | * @align: desired alignment |
1537 | * @start: vm area range start | ||
1538 | * @end: vm area range end | ||
1559 | * @gfp_mask: flags for the page level allocator | 1539 | * @gfp_mask: flags for the page level allocator |
1560 | * @prot: protection mask for the allocated pages | 1540 | * @prot: protection mask for the allocated pages |
1561 | * @node: node to use for allocation or -1 | 1541 | * @node: node to use for allocation or -1 |
@@ -1565,9 +1545,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
1565 | * allocator with @gfp_mask flags. Map them into contiguous | 1545 | * allocator with @gfp_mask flags. Map them into contiguous |
1566 | * kernel virtual space, using a pagetable protection of @prot. | 1546 | * kernel virtual space, using a pagetable protection of @prot. |
1567 | */ | 1547 | */ |
1568 | static void *__vmalloc_node(unsigned long size, unsigned long align, | 1548 | void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1569 | gfp_t gfp_mask, pgprot_t prot, | 1549 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
1570 | int node, void *caller) | 1550 | pgprot_t prot, int node, void *caller) |
1571 | { | 1551 | { |
1572 | struct vm_struct *area; | 1552 | struct vm_struct *area; |
1573 | void *addr; | 1553 | void *addr; |
@@ -1577,8 +1557,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, | |||
1577 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) | 1557 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
1578 | return NULL; | 1558 | return NULL; |
1579 | 1559 | ||
1580 | area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, | 1560 | area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node, |
1581 | VMALLOC_END, node, gfp_mask, caller); | 1561 | gfp_mask, caller); |
1582 | 1562 | ||
1583 | if (!area) | 1563 | if (!area) |
1584 | return NULL; | 1564 | return NULL; |
@@ -1595,6 +1575,27 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, | |||
1595 | return addr; | 1575 | return addr; |
1596 | } | 1576 | } |
1597 | 1577 | ||
1578 | /** | ||
1579 | * __vmalloc_node - allocate virtually contiguous memory | ||
1580 | * @size: allocation size | ||
1581 | * @align: desired alignment | ||
1582 | * @gfp_mask: flags for the page level allocator | ||
1583 | * @prot: protection mask for the allocated pages | ||
1584 | * @node: node to use for allocation or -1 | ||
1585 | * @caller: caller's return address | ||
1586 | * | ||
1587 | * Allocate enough pages to cover @size from the page level | ||
1588 | * allocator with @gfp_mask flags. Map them into contiguous | ||
1589 | * kernel virtual space, using a pagetable protection of @prot. | ||
1590 | */ | ||
1591 | static void *__vmalloc_node(unsigned long size, unsigned long align, | ||
1592 | gfp_t gfp_mask, pgprot_t prot, | ||
1593 | int node, void *caller) | ||
1594 | { | ||
1595 | return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, | ||
1596 | gfp_mask, prot, node, caller); | ||
1597 | } | ||
1598 | |||
1598 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 1599 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
1599 | { | 1600 | { |
1600 | return __vmalloc_node(size, 1, gfp_mask, prot, -1, | 1601 | return __vmalloc_node(size, 1, gfp_mask, prot, -1, |
@@ -2203,17 +2204,16 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext, | |||
2203 | * @sizes: array containing size of each area | 2204 | * @sizes: array containing size of each area |
2204 | * @nr_vms: the number of areas to allocate | 2205 | * @nr_vms: the number of areas to allocate |
2205 | * @align: alignment, all entries in @offsets and @sizes must be aligned to this | 2206 | * @align: alignment, all entries in @offsets and @sizes must be aligned to this |
2206 | * @gfp_mask: allocation mask | ||
2207 | * | 2207 | * |
2208 | * Returns: kmalloc'd vm_struct pointer array pointing to allocated | 2208 | * Returns: kmalloc'd vm_struct pointer array pointing to allocated |
2209 | * vm_structs on success, %NULL on failure | 2209 | * vm_structs on success, %NULL on failure |
2210 | * | 2210 | * |
2211 | * Percpu allocator wants to use congruent vm areas so that it can | 2211 | * Percpu allocator wants to use congruent vm areas so that it can |
2212 | * maintain the offsets among percpu areas. This function allocates | 2212 | * maintain the offsets among percpu areas. This function allocates |
2213 | * congruent vmalloc areas for it. These areas tend to be scattered | 2213 | * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to |
2214 | * pretty far, distance between two areas easily going up to | 2214 | * be scattered pretty far, distance between two areas easily going up |
2215 | * gigabytes. To avoid interacting with regular vmallocs, these areas | 2215 | * to gigabytes. To avoid interacting with regular vmallocs, these |
2216 | * are allocated from top. | 2216 | * areas are allocated from top. |
2217 | * | 2217 | * |
2218 | * Despite its complicated look, this allocator is rather simple. It | 2218 | * Despite its complicated look, this allocator is rather simple. It |
2219 | * does everything top-down and scans areas from the end looking for | 2219 | * does everything top-down and scans areas from the end looking for |
@@ -2224,7 +2224,7 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext, | |||
2224 | */ | 2224 | */ |
2225 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | 2225 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
2226 | const size_t *sizes, int nr_vms, | 2226 | const size_t *sizes, int nr_vms, |
2227 | size_t align, gfp_t gfp_mask) | 2227 | size_t align) |
2228 | { | 2228 | { |
2229 | const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); | 2229 | const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); |
2230 | const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); | 2230 | const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); |
@@ -2234,8 +2234,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | |||
2234 | unsigned long base, start, end, last_end; | 2234 | unsigned long base, start, end, last_end; |
2235 | bool purged = false; | 2235 | bool purged = false; |
2236 | 2236 | ||
2237 | gfp_mask &= GFP_RECLAIM_MASK; | ||
2238 | |||
2239 | /* verify parameters and allocate data structures */ | 2237 | /* verify parameters and allocate data structures */ |
2240 | BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); | 2238 | BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); |
2241 | for (last_area = 0, area = 0; area < nr_vms; area++) { | 2239 | for (last_area = 0, area = 0; area < nr_vms; area++) { |
@@ -2268,14 +2266,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | |||
2268 | return NULL; | 2266 | return NULL; |
2269 | } | 2267 | } |
2270 | 2268 | ||
2271 | vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); | 2269 | vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL); |
2272 | vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); | 2270 | vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL); |
2273 | if (!vas || !vms) | 2271 | if (!vas || !vms) |
2274 | goto err_free; | 2272 | goto err_free; |
2275 | 2273 | ||
2276 | for (area = 0; area < nr_vms; area++) { | 2274 | for (area = 0; area < nr_vms; area++) { |
2277 | vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); | 2275 | vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); |
2278 | vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); | 2276 | vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); |
2279 | if (!vas[area] || !vms[area]) | 2277 | if (!vas[area] || !vms[area]) |
2280 | goto err_free; | 2278 | goto err_free; |
2281 | } | 2279 | } |
@@ -2456,13 +2454,8 @@ static int s_show(struct seq_file *m, void *p) | |||
2456 | seq_printf(m, "0x%p-0x%p %7ld", | 2454 | seq_printf(m, "0x%p-0x%p %7ld", |
2457 | v->addr, v->addr + v->size, v->size); | 2455 | v->addr, v->addr + v->size, v->size); |
2458 | 2456 | ||
2459 | if (v->caller) { | 2457 | if (v->caller) |
2460 | char buff[KSYM_SYMBOL_LEN]; | 2458 | seq_printf(m, " %pS", v->caller); |
2461 | |||
2462 | seq_putc(m, ' '); | ||
2463 | sprint_symbol(buff, (unsigned long)v->caller); | ||
2464 | seq_puts(m, buff); | ||
2465 | } | ||
2466 | 2459 | ||
2467 | if (v->nr_pages) | 2460 | if (v->nr_pages) |
2468 | seq_printf(m, " pages=%d", v->nr_pages); | 2461 | seq_printf(m, " pages=%d", v->nr_pages); |