aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c90
1 files changed, 42 insertions, 48 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index eb5cc7d00c5a..f9b166732e70 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -748,7 +748,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
748 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, 748 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
749 VMALLOC_START, VMALLOC_END, 749 VMALLOC_START, VMALLOC_END,
750 node, gfp_mask); 750 node, gfp_mask);
751 if (unlikely(IS_ERR(va))) { 751 if (IS_ERR(va)) {
752 kfree(vb); 752 kfree(vb);
753 return ERR_CAST(va); 753 return ERR_CAST(va);
754 } 754 }
@@ -1175,6 +1175,7 @@ void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1175{ 1175{
1176 vunmap_page_range(addr, addr + size); 1176 vunmap_page_range(addr, addr + size);
1177} 1177}
1178EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1178 1179
1179/** 1180/**
1180 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB 1181 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
@@ -1315,13 +1316,6 @@ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1315 -1, GFP_KERNEL, caller); 1316 -1, GFP_KERNEL, caller);
1316} 1317}
1317 1318
1318struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
1319 int node, gfp_t gfp_mask)
1320{
1321 return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1322 node, gfp_mask, __builtin_return_address(0));
1323}
1324
1325static struct vm_struct *find_vm_area(const void *addr) 1319static struct vm_struct *find_vm_area(const void *addr)
1326{ 1320{
1327 struct vmap_area *va; 1321 struct vmap_area *va;
@@ -1537,25 +1531,12 @@ fail:
1537 return NULL; 1531 return NULL;
1538} 1532}
1539 1533
1540void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1541{
1542 void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1,
1543 __builtin_return_address(0));
1544
1545 /*
1546 * A ref_count = 3 is needed because the vm_struct and vmap_area
1547 * structures allocated in the __get_vm_area_node() function contain
1548 * references to the virtual address of the vmalloc'ed block.
1549 */
1550 kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask);
1551
1552 return addr;
1553}
1554
1555/** 1534/**
1556 * __vmalloc_node - allocate virtually contiguous memory 1535 * __vmalloc_node_range - allocate virtually contiguous memory
1557 * @size: allocation size 1536 * @size: allocation size
1558 * @align: desired alignment 1537 * @align: desired alignment
1538 * @start: vm area range start
1539 * @end: vm area range end
1559 * @gfp_mask: flags for the page level allocator 1540 * @gfp_mask: flags for the page level allocator
1560 * @prot: protection mask for the allocated pages 1541 * @prot: protection mask for the allocated pages
1561 * @node: node to use for allocation or -1 1542 * @node: node to use for allocation or -1
@@ -1565,9 +1546,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
1565 * allocator with @gfp_mask flags. Map them into contiguous 1546 * allocator with @gfp_mask flags. Map them into contiguous
1566 * kernel virtual space, using a pagetable protection of @prot. 1547 * kernel virtual space, using a pagetable protection of @prot.
1567 */ 1548 */
1568static void *__vmalloc_node(unsigned long size, unsigned long align, 1549void *__vmalloc_node_range(unsigned long size, unsigned long align,
1569 gfp_t gfp_mask, pgprot_t prot, 1550 unsigned long start, unsigned long end, gfp_t gfp_mask,
1570 int node, void *caller) 1551 pgprot_t prot, int node, void *caller)
1571{ 1552{
1572 struct vm_struct *area; 1553 struct vm_struct *area;
1573 void *addr; 1554 void *addr;
@@ -1577,8 +1558,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1577 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1558 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1578 return NULL; 1559 return NULL;
1579 1560
1580 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, 1561 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
1581 VMALLOC_END, node, gfp_mask, caller); 1562 gfp_mask, caller);
1582 1563
1583 if (!area) 1564 if (!area)
1584 return NULL; 1565 return NULL;
@@ -1595,6 +1576,27 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1595 return addr; 1576 return addr;
1596} 1577}
1597 1578
1579/**
1580 * __vmalloc_node - allocate virtually contiguous memory
1581 * @size: allocation size
1582 * @align: desired alignment
1583 * @gfp_mask: flags for the page level allocator
1584 * @prot: protection mask for the allocated pages
1585 * @node: node to use for allocation or -1
1586 * @caller: caller's return address
1587 *
1588 * Allocate enough pages to cover @size from the page level
1589 * allocator with @gfp_mask flags. Map them into contiguous
1590 * kernel virtual space, using a pagetable protection of @prot.
1591 */
1592static void *__vmalloc_node(unsigned long size, unsigned long align,
1593 gfp_t gfp_mask, pgprot_t prot,
1594 int node, void *caller)
1595{
1596 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1597 gfp_mask, prot, node, caller);
1598}
1599
1598void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1600void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1599{ 1601{
1600 return __vmalloc_node(size, 1, gfp_mask, prot, -1, 1602 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
@@ -2203,17 +2205,16 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
2203 * @sizes: array containing size of each area 2205 * @sizes: array containing size of each area
2204 * @nr_vms: the number of areas to allocate 2206 * @nr_vms: the number of areas to allocate
2205 * @align: alignment, all entries in @offsets and @sizes must be aligned to this 2207 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
2206 * @gfp_mask: allocation mask
2207 * 2208 *
2208 * Returns: kmalloc'd vm_struct pointer array pointing to allocated 2209 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2209 * vm_structs on success, %NULL on failure 2210 * vm_structs on success, %NULL on failure
2210 * 2211 *
2211 * Percpu allocator wants to use congruent vm areas so that it can 2212 * Percpu allocator wants to use congruent vm areas so that it can
2212 * maintain the offsets among percpu areas. This function allocates 2213 * maintain the offsets among percpu areas. This function allocates
2213 * congruent vmalloc areas for it. These areas tend to be scattered 2214 * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
2214 * pretty far, distance between two areas easily going up to 2215 * be scattered pretty far, distance between two areas easily going up
2215 * gigabytes. To avoid interacting with regular vmallocs, these areas 2216 * to gigabytes. To avoid interacting with regular vmallocs, these
2216 * are allocated from top. 2217 * areas are allocated from top.
2217 * 2218 *
2218 * Despite its complicated look, this allocator is rather simple. It 2219 * Despite its complicated look, this allocator is rather simple. It
2219 * does everything top-down and scans areas from the end looking for 2220 * does everything top-down and scans areas from the end looking for
@@ -2224,7 +2225,7 @@ static unsigned long pvm_determine_end(struct vmap_area **pnext,
2224 */ 2225 */
2225struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 2226struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2226 const size_t *sizes, int nr_vms, 2227 const size_t *sizes, int nr_vms,
2227 size_t align, gfp_t gfp_mask) 2228 size_t align)
2228{ 2229{
2229 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align); 2230 const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2230 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); 2231 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
@@ -2234,8 +2235,6 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2234 unsigned long base, start, end, last_end; 2235 unsigned long base, start, end, last_end;
2235 bool purged = false; 2236 bool purged = false;
2236 2237
2237 gfp_mask &= GFP_RECLAIM_MASK;
2238
2239 /* verify parameters and allocate data structures */ 2238 /* verify parameters and allocate data structures */
2240 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align)); 2239 BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2241 for (last_area = 0, area = 0; area < nr_vms; area++) { 2240 for (last_area = 0, area = 0; area < nr_vms; area++) {
@@ -2268,14 +2267,14 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2268 return NULL; 2267 return NULL;
2269 } 2268 }
2270 2269
2271 vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask); 2270 vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
2272 vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask); 2271 vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
2273 if (!vas || !vms) 2272 if (!vas || !vms)
2274 goto err_free; 2273 goto err_free;
2275 2274
2276 for (area = 0; area < nr_vms; area++) { 2275 for (area = 0; area < nr_vms; area++) {
2277 vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask); 2276 vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2278 vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask); 2277 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2279 if (!vas[area] || !vms[area]) 2278 if (!vas[area] || !vms[area])
2280 goto err_free; 2279 goto err_free;
2281 } 2280 }
@@ -2456,13 +2455,8 @@ static int s_show(struct seq_file *m, void *p)
2456 seq_printf(m, "0x%p-0x%p %7ld", 2455 seq_printf(m, "0x%p-0x%p %7ld",
2457 v->addr, v->addr + v->size, v->size); 2456 v->addr, v->addr + v->size, v->size);
2458 2457
2459 if (v->caller) { 2458 if (v->caller)
2460 char buff[KSYM_SYMBOL_LEN]; 2459 seq_printf(m, " %pS", v->caller);
2461
2462 seq_putc(m, ' ');
2463 sprint_symbol(buff, (unsigned long)v->caller);
2464 seq_puts(m, buff);
2465 }
2466 2460
2467 if (v->nr_pages) 2461 if (v->nr_pages)
2468 seq_printf(m, " pages=%d", v->nr_pages); 2462 seq_printf(m, " pages=%d", v->nr_pages);