diff options
-rw-r--r-- | include/linux/vmalloc.h | 1 | ||||
-rw-r--r-- | mm/vmalloc.c | 42 |
2 files changed, 37 insertions, 6 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 3d9d786a943c..d68edffbf142 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align, | |||
82 | const void *caller); | 82 | const void *caller); |
83 | 83 | ||
84 | extern void vfree(const void *addr); | 84 | extern void vfree(const void *addr); |
85 | extern void vfree_atomic(const void *addr); | ||
85 | 86 | ||
86 | extern void *vmap(struct page **pages, unsigned int count, | 87 | extern void *vmap(struct page **pages, unsigned int count, |
87 | unsigned long flags, pgprot_t prot); | 88 | unsigned long flags, pgprot_t prot); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1f5501b43026..4ac776f10ad1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1486,7 +1486,39 @@ static void __vunmap(const void *addr, int deallocate_pages) | |||
1486 | kfree(area); | 1486 | kfree(area); |
1487 | return; | 1487 | return; |
1488 | } | 1488 | } |
1489 | 1489 | ||
1490 | static inline void __vfree_deferred(const void *addr) | ||
1491 | { | ||
1492 | /* | ||
1493 | * Use raw_cpu_ptr() because this can be called from preemptible | ||
1494 | * context. Preemption is absolutely fine here, because the llist_add() | ||
1495 | * implementation is lockless, so it works even if we are adding to | ||
1496 | * nother cpu's list. schedule_work() should be fine with this too. | ||
1497 | */ | ||
1498 | struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred); | ||
1499 | |||
1500 | if (llist_add((struct llist_node *)addr, &p->list)) | ||
1501 | schedule_work(&p->wq); | ||
1502 | } | ||
1503 | |||
1504 | /** | ||
1505 | * vfree_atomic - release memory allocated by vmalloc() | ||
1506 | * @addr: memory base address | ||
1507 | * | ||
1508 | * This one is just like vfree() but can be called in any atomic context | ||
1509 | * except NMIs. | ||
1510 | */ | ||
1511 | void vfree_atomic(const void *addr) | ||
1512 | { | ||
1513 | BUG_ON(in_nmi()); | ||
1514 | |||
1515 | kmemleak_free(addr); | ||
1516 | |||
1517 | if (!addr) | ||
1518 | return; | ||
1519 | __vfree_deferred(addr); | ||
1520 | } | ||
1521 | |||
1490 | /** | 1522 | /** |
1491 | * vfree - release memory allocated by vmalloc() | 1523 | * vfree - release memory allocated by vmalloc() |
1492 | * @addr: memory base address | 1524 | * @addr: memory base address |
@@ -1509,11 +1541,9 @@ void vfree(const void *addr) | |||
1509 | 1541 | ||
1510 | if (!addr) | 1542 | if (!addr) |
1511 | return; | 1543 | return; |
1512 | if (unlikely(in_interrupt())) { | 1544 | if (unlikely(in_interrupt())) |
1513 | struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred); | 1545 | __vfree_deferred(addr); |
1514 | if (llist_add((struct llist_node *)addr, &p->list)) | 1546 | else |
1515 | schedule_work(&p->wq); | ||
1516 | } else | ||
1517 | __vunmap(addr, 1); | 1547 | __vunmap(addr, 1); |
1518 | } | 1548 | } |
1519 | EXPORT_SYMBOL(vfree); | 1549 | EXPORT_SYMBOL(vfree); |