aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c48
1 files changed, 25 insertions, 23 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 107454312d5e..0fdf96803c5b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -359,6 +359,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
359 if (unlikely(!va)) 359 if (unlikely(!va))
360 return ERR_PTR(-ENOMEM); 360 return ERR_PTR(-ENOMEM);
361 361
362 /*
363 * Only scan the relevant parts containing pointers to other objects
364 * to avoid false negatives.
365 */
366 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
367
362retry: 368retry:
363 spin_lock(&vmap_area_lock); 369 spin_lock(&vmap_area_lock);
364 /* 370 /*
@@ -1546,7 +1552,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1546 gfp_t gfp_mask, pgprot_t prot, 1552 gfp_t gfp_mask, pgprot_t prot,
1547 int node, const void *caller); 1553 int node, const void *caller);
1548static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1554static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1549 pgprot_t prot, int node, const void *caller) 1555 pgprot_t prot, int node)
1550{ 1556{
1551 const int order = 0; 1557 const int order = 0;
1552 struct page **pages; 1558 struct page **pages;
@@ -1560,13 +1566,12 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1560 /* Please note that the recursion is strictly bounded. */ 1566 /* Please note that the recursion is strictly bounded. */
1561 if (array_size > PAGE_SIZE) { 1567 if (array_size > PAGE_SIZE) {
1562 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM, 1568 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1563 PAGE_KERNEL, node, caller); 1569 PAGE_KERNEL, node, area->caller);
1564 area->flags |= VM_VPAGES; 1570 area->flags |= VM_VPAGES;
1565 } else { 1571 } else {
1566 pages = kmalloc_node(array_size, nested_gfp, node); 1572 pages = kmalloc_node(array_size, nested_gfp, node);
1567 } 1573 }
1568 area->pages = pages; 1574 area->pages = pages;
1569 area->caller = caller;
1570 if (!area->pages) { 1575 if (!area->pages) {
1571 remove_vm_area(area->addr); 1576 remove_vm_area(area->addr);
1572 kfree(area); 1577 kfree(area);
@@ -1577,7 +1582,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1577 struct page *page; 1582 struct page *page;
1578 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN; 1583 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
1579 1584
1580 if (node < 0) 1585 if (node == NUMA_NO_NODE)
1581 page = alloc_page(tmp_mask); 1586 page = alloc_page(tmp_mask);
1582 else 1587 else
1583 page = alloc_pages_node(node, tmp_mask, order); 1588 page = alloc_pages_node(node, tmp_mask, order);
@@ -1634,9 +1639,9 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
1634 if (!area) 1639 if (!area)
1635 goto fail; 1640 goto fail;
1636 1641
1637 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); 1642 addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1638 if (!addr) 1643 if (!addr)
1639 goto fail; 1644 return NULL;
1640 1645
1641 /* 1646 /*
1642 * In this function, newly allocated vm_struct has VM_UNINITIALIZED 1647 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -1646,11 +1651,11 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
1646 clear_vm_uninitialized_flag(area); 1651 clear_vm_uninitialized_flag(area);
1647 1652
1648 /* 1653 /*
1649 * A ref_count = 3 is needed because the vm_struct and vmap_area 1654 * A ref_count = 2 is needed because vm_struct allocated in
1650 * structures allocated in the __get_vm_area_node() function contain 1655 * __get_vm_area_node() contains a reference to the virtual address of
1651 * references to the virtual address of the vmalloc'ed block. 1656 * the vmalloc'ed block.
1652 */ 1657 */
1653 kmemleak_alloc(addr, real_size, 3, gfp_mask); 1658 kmemleak_alloc(addr, real_size, 2, gfp_mask);
1654 1659
1655 return addr; 1660 return addr;
1656 1661
@@ -2563,6 +2568,11 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2563 if (!counters) 2568 if (!counters)
2564 return; 2569 return;
2565 2570
2571 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
2572 smp_rmb();
2573 if (v->flags & VM_UNINITIALIZED)
2574 return;
2575
2566 memset(counters, 0, nr_node_ids * sizeof(unsigned int)); 2576 memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2567 2577
2568 for (nr = 0; nr < v->nr_pages; nr++) 2578 for (nr = 0; nr < v->nr_pages; nr++)
@@ -2579,23 +2589,15 @@ static int s_show(struct seq_file *m, void *p)
2579 struct vmap_area *va = p; 2589 struct vmap_area *va = p;
2580 struct vm_struct *v; 2590 struct vm_struct *v;
2581 2591
2582 if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING)) 2592 /*
2593 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
2594 * behalf of vmap area is being tear down or vm_map_ram allocation.
2595 */
2596 if (!(va->flags & VM_VM_AREA))
2583 return 0; 2597 return 0;
2584 2598
2585 if (!(va->flags & VM_VM_AREA)) {
2586 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
2587 (void *)va->va_start, (void *)va->va_end,
2588 va->va_end - va->va_start);
2589 return 0;
2590 }
2591
2592 v = va->vm; 2599 v = va->vm;
2593 2600
2594 /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
2595 smp_rmb();
2596 if (v->flags & VM_UNINITIALIZED)
2597 return 0;
2598
2599 seq_printf(m, "0x%pK-0x%pK %7ld", 2601 seq_printf(m, "0x%pK-0x%pK %7ld",
2600 v->addr, v->addr + v->size, v->size); 2602 v->addr, v->addr + v->size, v->size);
2601 2603