diff options
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 89 |
1 files changed, 41 insertions, 48 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 816f074fb4e1..f9b166732e70 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -748,7 +748,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) | |||
748 | va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, | 748 | va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, |
749 | VMALLOC_START, VMALLOC_END, | 749 | VMALLOC_START, VMALLOC_END, |
750 | node, gfp_mask); | 750 | node, gfp_mask); |
751 | if (unlikely(IS_ERR(va))) { | 751 | if (IS_ERR(va)) { |
752 | kfree(vb); | 752 | kfree(vb); |
753 | return ERR_CAST(va); | 753 | return ERR_CAST(va); |
754 | } | 754 | } |
@@ -1316,13 +1316,6 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1316 | -1, GFP_KERNEL, caller); | 1316 | -1, GFP_KERNEL, caller); |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, | ||
1320 | int node, gfp_t gfp_mask) | ||
1321 | { | ||
1322 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, | ||
1323 | node, gfp_mask, __builtin_return_address(0)); | ||
1324 | } | ||
1325 | |||
1326 | static struct vm_struct *find_vm_area(const void *addr) | 1319 | static struct vm_struct *find_vm_area(const void *addr) |
1327 | { | 1320 | { |
1328 | struct vmap_area *va; | 1321 | struct vmap_area *va; |
@@ -1538,25 +1531,12 @@ fail: | |||
1538 | return NULL; | 1531 | return NULL; |
1539 | } | 1532 | } |
1540 | 1533 | ||
1541 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | ||
1542 | { | ||
1543 | void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, | ||
1544 | __builtin_return_address(0)); | ||
1545 | |||
1546 | /* | ||
1547 | * A ref_count = 3 is needed because the vm_struct and vmap_area | ||
1548 | * structures allocated in the __get_vm_area_node() function contain | ||
1549 | * references to the virtual address of the vmalloc'ed block. | ||
1550 | */ | ||
1551 | kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); | ||
1552 | |||
1553 | return addr; | ||
1554 | } | ||
1555 | |||
1556 | /** | 1534 | /** |
1557 | * __vmalloc_node - allocate virtually contiguous memory | 1535 | * __vmalloc_node_range - allocate virtually contiguous memory |
1558 | * @size: allocation size | 1536 | * @size: allocation size |
1559 | * @align: desired alignment | 1537 | * @align: desired alignment |
1538 | * @start: vm area range start | ||
1539 | * @end: vm area range end | ||
1560 | * @gfp_mask: flags for the page level allocator | 1540 | * @gfp_mask: flags for the page level allocator |
1561 | * @prot: protection mask for the allocated pages | 1541 | * @prot: protection mask for the allocated pages |
1562 | * @node: node to use for allocation or -1 | 1542 | * @node: node to use for allocation or -1 |
@@ -1566,9 +1546,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
1566 | * allocator with @gfp_mask flags. Map them into contiguous | 1546 | * allocator with @gfp_mask flags. Map them into contiguous |
1567 | * kernel virtual space, using a pagetable protection of @prot. | 1547 | * kernel virtual space, using a pagetable protection of @prot. |
1568 | */ | 1548 | */ |
1569 | static void *__vmalloc_node(unsigned long size, unsigned long align, | 1549 | void *__vmalloc_node_range(unsigned long size, unsigned long align, |
1570 | gfp_t gfp_mask, pgprot_t prot, | 1550 | unsigned long start, unsigned long end, gfp_t gfp_mask, |
1571 | int node, void *caller) | 1551 | pgprot_t prot, int node, void *caller) |
1572 | { | 1552 | { |
1573 | struct vm_struct *area; | 1553 | struct vm_struct *area; |
1574 | void *addr; | 1554 | void *addr; |
@@ -1578,8 +1558,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, | |||
1578 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) | 1558 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
1579 | return NULL; | 1559 | return NULL; |
1580 | 1560 | ||
1581 | area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, | 1561 | area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node, |
1582 | VMALLOC_END, node, gfp_mask, caller); | 1562 | gfp_mask, caller); |
1583 | 1563 | ||
1584 | if (!area) | 1564 | if (!area) |
1585 | return NULL; | 1565 | return NULL; |
@@ -1596,6 +1576,27 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, | |||
1596 | return addr; | 1576 | return addr; |
1597 | } | 1577 | } |
1598 | 1578 | ||
1579 | /** | ||
1580 | * __vmalloc_node - allocate virtually contiguous memory | ||
1581 | * @size: allocation size | ||
1582 | * @align: desired alignment | ||
1583 | * @gfp_mask: flags for the page level allocator | ||
1584 | * @prot: protection mask for the allocated pages | ||
1585 | * @node: node to use for allocation or -1 | ||
1586 | * @caller: caller's return address | ||
1587 | * | ||
1588 | * Allocate enough pages to cover @size from the page level | ||
1589 | * allocator with @gfp_mask flags. Map them into contiguous | ||
1590 | * kernel virtual space, using a pagetable protection of @prot. | ||
1591 | */ | ||
1592 | static void *__vmalloc_node(unsigned long size, unsigned long align, | ||
1593 | gfp_t gfp_mask, pgprot_t prot, | ||
1594 | int node, void *caller) | ||
1595 | { | ||
1596 | return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, | ||
1597 | gfp_mask, prot, node, caller); | ||
1598 | } | ||
1599 | |||
1599 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 1600 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
1600 | { | 1601 | { |
1601 | return __vmalloc_node(size, 1, gfp_mask, prot, -1, | 1602 | return __vmalloc_node(size, 1, gfp_mask, prot, -1, |
@@ -2204,17 +2205,16 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext, | |||
2204 | * @sizes: array containing size of each area | 2205 | * @sizes: array containing size of each area |
2205 | * @nr_vms: the number of areas to allocate | 2206 | * @nr_vms: the number of areas to allocate |
2206 | * @align: alignment, all entries in @offsets and @sizes must be aligned to this | 2207 | * @align: alignment, all entries in @offsets and @sizes must be aligned to this |
2207 | * @gfp_mask: allocation mask | ||
2208 | * | 2208 | * |
2209 | * Returns: kmalloc'd vm_struct pointer array pointing to allocated | 2209 | * Returns: kmalloc'd vm_struct pointer array pointing to allocated |
2210 | * vm_structs on success, %NULL on failure | 2210 | * vm_structs on success, %NULL on failure |
2211 | * | 2211 | * |
2212 | * Percpu allocator wants to use congruent vm areas so that it can | 2212 | * Percpu allocator wants to use congruent vm areas so that it can |
2213 | * maintain the offsets among percpu areas. This function allocates | 2213 | * maintain the offsets among percpu areas. This function allocates |
2214 | * congruent vmalloc areas for it. These areas tend to be scattered | 2214 | * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to |
2215 | * pretty far, distance between two areas easily going up to | 2215 | * be scattered pretty far, distance between two areas easily going up |
2216 | * gigabytes. To avoid interacting with regular vmallocs, these areas | 2216 | * to gigabytes. To avoid interacting with regular vmallocs, these |
2217 | * are allocated from top. | 2217 | * areas are allocated from top. |
2218 | * | 2218 | * |
2219 | * Despite its complicated look, this allocator is rather simple. It | 2219 | * Despite its complicated look, this allocator is rather simple. It |
2220 | * does everything top-down and scans areas from the end looking for | 2220 | * does everything top-down and scans areas from the end looking for |
@@ -2225,7 +2225,7 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext, | |||
2225 | */ | 2225 | */ |
2226 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | 2226 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
2227 | const size_t *sizes, int nr_vms, | 2227 | const size_t *sizes, int nr_vms, |
2228 | size_t align, gfp_t gfp_mask) | 2228 | size_t align) |
2229 | { | 2229 | { |
2230 | const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); | 2230 | const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); |
2231 | const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); | 2231 | const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); |
@@ -2235,8 +2235,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | |||
2235 | unsigned long base, start, end, last_end; | 2235 | unsigned long base, start, end, last_end; |
2236 | bool purged = false; | 2236 | bool purged = false; |
2237 | 2237 | ||
2238 | gfp_mask &= GFP_RECLAIM_MASK; | ||
2239 | |||
2240 | /* verify parameters and allocate data structures */ | 2238 | /* verify parameters and allocate data structures */ |
2241 | BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); | 2239 | BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); |
2242 | for (last_area = 0, area = 0; area < nr_vms; area++) { | 2240 | for (last_area = 0, area = 0; area < nr_vms; area++) { |
@@ -2269,14 +2267,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | |||
2269 | return NULL; | 2267 | return NULL; |
2270 | } | 2268 | } |
2271 | 2269 | ||
2272 | vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); | 2270 | vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL); |
2273 | vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); | 2271 | vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL); |
2274 | if (!vas || !vms) | 2272 | if (!vas || !vms) |
2275 | goto err_free; | 2273 | goto err_free; |
2276 | 2274 | ||
2277 | for (area = 0; area < nr_vms; area++) { | 2275 | for (area = 0; area < nr_vms; area++) { |
2278 | vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); | 2276 | vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL); |
2279 | vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); | 2277 | vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); |
2280 | if (!vas[area] || !vms[area]) | 2278 | if (!vas[area] || !vms[area]) |
2281 | goto err_free; | 2279 | goto err_free; |
2282 | } | 2280 | } |
@@ -2457,13 +2455,8 @@ static int s_show(struct seq_file *m, void *p) | |||
2457 | seq_printf(m, "0x%p-0x%p %7ld", | 2455 | seq_printf(m, "0x%p-0x%p %7ld", |
2458 | v->addr, v->addr + v->size, v->size); | 2456 | v->addr, v->addr + v->size, v->size); |
2459 | 2457 | ||
2460 | if (v->caller) { | 2458 | if (v->caller) |
2461 | char buff[KSYM_SYMBOL_LEN]; | 2459 | seq_printf(m, " %pS", v->caller); |
2462 | |||
2463 | seq_putc(m, ' '); | ||
2464 | sprint_symbol(buff, (unsigned long)v->caller); | ||
2465 | seq_puts(m, buff); | ||
2466 | } | ||
2467 | 2460 | ||
2468 | if (v->nr_pages) | 2461 | if (v->nr_pages) |
2469 | seq_printf(m, " pages=%d", v->nr_pages); | 2462 | seq_printf(m, " pages=%d", v->nr_pages); |