aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2013-03-10 20:14:08 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-03-10 21:18:21 -0400
commit32fcfd40715ed13f7a80cbde49d097ddae20c8e2 (patch)
treee4c211c1135a48ee853b3ec4d00623686317293a /mm/vmalloc.c
parent6dbe51c251a327e012439c4772097a13df43c5b8 (diff)
make vfree() safe to call from interrupt contexts
A bunch of RCU callbacks want to be able to do vfree() and end up with rather kludgy schemes. Just let vfree() do the right thing - put the victim on llist and schedule actual __vunmap() via schedule_work(), so that it runs from non-interrupt context. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c45
1 files changed, 40 insertions, 5 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0f751f2068c3..ef9bdf742273 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -27,10 +27,30 @@
27#include <linux/pfn.h> 27#include <linux/pfn.h>
28#include <linux/kmemleak.h> 28#include <linux/kmemleak.h>
29#include <linux/atomic.h> 29#include <linux/atomic.h>
30#include <linux/llist.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
32#include <asm/shmparam.h> 33#include <asm/shmparam.h>
33 34
35struct vfree_deferred {
36 struct llist_head list;
37 struct work_struct wq;
38};
39static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
40
41static void __vunmap(const void *, int);
42
43static void free_work(struct work_struct *w)
44{
45 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
46 struct llist_node *llnode = llist_del_all(&p->list);
47 while (llnode) {
48 void *p = llnode;
49 llnode = llist_next(llnode);
50 __vunmap(p, 1);
51 }
52}
53
34/*** Page table manipulation functions ***/ 54/*** Page table manipulation functions ***/
35 55
36static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 56static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -1184,10 +1204,14 @@ void __init vmalloc_init(void)
1184 1204
1185 for_each_possible_cpu(i) { 1205 for_each_possible_cpu(i) {
1186 struct vmap_block_queue *vbq; 1206 struct vmap_block_queue *vbq;
1207 struct vfree_deferred *p;
1187 1208
1188 vbq = &per_cpu(vmap_block_queue, i); 1209 vbq = &per_cpu(vmap_block_queue, i);
1189 spin_lock_init(&vbq->lock); 1210 spin_lock_init(&vbq->lock);
1190 INIT_LIST_HEAD(&vbq->free); 1211 INIT_LIST_HEAD(&vbq->free);
1212 p = &per_cpu(vfree_deferred, i);
1213 init_llist_head(&p->list);
1214 INIT_WORK(&p->wq, free_work);
1191 } 1215 }
1192 1216
1193 /* Import existing vmlist entries. */ 1217 /* Import existing vmlist entries. */
@@ -1511,7 +1535,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
1511 kfree(area); 1535 kfree(area);
1512 return; 1536 return;
1513} 1537}
1514 1538
1515/** 1539/**
1516 * vfree - release memory allocated by vmalloc() 1540 * vfree - release memory allocated by vmalloc()
1517 * @addr: memory base address 1541 * @addr: memory base address
@@ -1520,15 +1544,25 @@ static void __vunmap(const void *addr, int deallocate_pages)
1520 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 1544 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1521 * NULL, no operation is performed. 1545 * NULL, no operation is performed.
1522 * 1546 *
1523 * Must not be called in interrupt context. 1547 * Must not be called in NMI context (strictly speaking, only if we don't
1548 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1549 * conventions for vfree() arch-depenedent would be a really bad idea)
1550 *
1524 */ 1551 */
1525void vfree(const void *addr) 1552void vfree(const void *addr)
1526{ 1553{
1527 BUG_ON(in_interrupt()); 1554 BUG_ON(in_nmi());
1528 1555
1529 kmemleak_free(addr); 1556 kmemleak_free(addr);
1530 1557
1531 __vunmap(addr, 1); 1558 if (!addr)
1559 return;
1560 if (unlikely(in_interrupt())) {
1561 struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
1562 llist_add((struct llist_node *)addr, &p->list);
1563 schedule_work(&p->wq);
1564 } else
1565 __vunmap(addr, 1);
1532} 1566}
1533EXPORT_SYMBOL(vfree); 1567EXPORT_SYMBOL(vfree);
1534 1568
@@ -1545,7 +1579,8 @@ void vunmap(const void *addr)
1545{ 1579{
1546 BUG_ON(in_interrupt()); 1580 BUG_ON(in_interrupt());
1547 might_sleep(); 1581 might_sleep();
1548 __vunmap(addr, 0); 1582 if (addr)
1583 __vunmap(addr, 0);
1549} 1584}
1550EXPORT_SYMBOL(vunmap); 1585EXPORT_SYMBOL(vunmap);
1551 1586