aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorAndrey Ryabinin <aryabinin@virtuozzo.com>2016-12-12 19:44:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 21:55:08 -0500
commitbf22e37a641327e34681b7b6959d9646e3886770 (patch)
tree1b672d3f1579da486197e2abd4df6990c4f61158 /mm/vmalloc.c
parent0574ecd141df28d573d4364adec59766ddf5f38d (diff)
mm: add vfree_atomic()
We are going to use sleeping lock for freeing vmap. However some vfree() users want to free memory from atomic (but not from interrupt) context. For this we add vfree_atomic() - deferred variation of vfree() which can be used in any atomic context (except NMIs). [akpm@linux-foundation.org: tweak comment grammar] [aryabinin@virtuozzo.com: use raw_cpu_ptr() instead of this_cpu_ptr()] Link: http://lkml.kernel.org/r/1481553981-3856-1-git-send-email-aryabinin@virtuozzo.com Link: http://lkml.kernel.org/r/1479474236-4139-5-git-send-email-hch@lst.de Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Joel Fernandes <joelaf@google.com> Cc: Jisheng Zhang <jszhang@marvell.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: John Dias <joaodias@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c42
1 files changed, 36 insertions, 6 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1f5501b43026..4ac776f10ad1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1486,7 +1486,39 @@ static void __vunmap(const void *addr, int deallocate_pages)
1486 kfree(area); 1486 kfree(area);
1487 return; 1487 return;
1488} 1488}
1489 1489
1490static inline void __vfree_deferred(const void *addr)
1491{
1492 /*
1493 * Use raw_cpu_ptr() because this can be called from preemptible
1494 * context. Preemption is absolutely fine here, because the llist_add()
1495 * implementation is lockless, so it works even if we are adding to
1496 * nother cpu's list. schedule_work() should be fine with this too.
1497 */
1498 struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
1499
1500 if (llist_add((struct llist_node *)addr, &p->list))
1501 schedule_work(&p->wq);
1502}
1503
1504/**
1505 * vfree_atomic - release memory allocated by vmalloc()
1506 * @addr: memory base address
1507 *
1508 * This one is just like vfree() but can be called in any atomic context
1509 * except NMIs.
1510 */
1511void vfree_atomic(const void *addr)
1512{
1513 BUG_ON(in_nmi());
1514
1515 kmemleak_free(addr);
1516
1517 if (!addr)
1518 return;
1519 __vfree_deferred(addr);
1520}
1521
1490/** 1522/**
1491 * vfree - release memory allocated by vmalloc() 1523 * vfree - release memory allocated by vmalloc()
1492 * @addr: memory base address 1524 * @addr: memory base address
@@ -1509,11 +1541,9 @@ void vfree(const void *addr)
1509 1541
1510 if (!addr) 1542 if (!addr)
1511 return; 1543 return;
1512 if (unlikely(in_interrupt())) { 1544 if (unlikely(in_interrupt()))
1513 struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred); 1545 __vfree_deferred(addr);
1514 if (llist_add((struct llist_node *)addr, &p->list)) 1546 else
1515 schedule_work(&p->wq);
1516 } else
1517 __vunmap(addr, 1); 1547 __vunmap(addr, 1);
1518} 1548}
1519EXPORT_SYMBOL(vfree); 1549EXPORT_SYMBOL(vfree);