aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/filemap_xip.c3
-rw-r--r--mm/vmalloc.c45
3 files changed, 40 insertions, 10 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index e989fb1eaa72..7905fe721aa8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2546,7 +2546,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2546 2546
2547 BUG_ON(iocb->ki_pos != pos); 2547 BUG_ON(iocb->ki_pos != pos);
2548 2548
2549 sb_start_write(inode->i_sb);
2550 mutex_lock(&inode->i_mutex); 2549 mutex_lock(&inode->i_mutex);
2551 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); 2550 ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
2552 mutex_unlock(&inode->i_mutex); 2551 mutex_unlock(&inode->i_mutex);
@@ -2558,7 +2557,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2558 if (err < 0 && ret > 0) 2557 if (err < 0 && ret > 0)
2559 ret = err; 2558 ret = err;
2560 } 2559 }
2561 sb_end_write(inode->i_sb);
2562 return ret; 2560 return ret;
2563} 2561}
2564EXPORT_SYMBOL(generic_file_aio_write); 2562EXPORT_SYMBOL(generic_file_aio_write);
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index a912da6ddfd4..28fe26b64f8a 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -404,8 +404,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
404 loff_t pos; 404 loff_t pos;
405 ssize_t ret; 405 ssize_t ret;
406 406
407 sb_start_write(inode->i_sb);
408
409 mutex_lock(&inode->i_mutex); 407 mutex_lock(&inode->i_mutex);
410 408
411 if (!access_ok(VERIFY_READ, buf, len)) { 409 if (!access_ok(VERIFY_READ, buf, len)) {
@@ -439,7 +437,6 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
439 current->backing_dev_info = NULL; 437 current->backing_dev_info = NULL;
440 out_up: 438 out_up:
441 mutex_unlock(&inode->i_mutex); 439 mutex_unlock(&inode->i_mutex);
442 sb_end_write(inode->i_sb);
443 return ret; 440 return ret;
444} 441}
445EXPORT_SYMBOL_GPL(xip_file_write); 442EXPORT_SYMBOL_GPL(xip_file_write);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 72043d6c88c0..b12fd8612604 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -27,10 +27,30 @@
27#include <linux/pfn.h> 27#include <linux/pfn.h>
28#include <linux/kmemleak.h> 28#include <linux/kmemleak.h>
29#include <linux/atomic.h> 29#include <linux/atomic.h>
30#include <linux/llist.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
32#include <asm/shmparam.h> 33#include <asm/shmparam.h>
33 34
35struct vfree_deferred {
36 struct llist_head list;
37 struct work_struct wq;
38};
39static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
40
41static void __vunmap(const void *, int);
42
43static void free_work(struct work_struct *w)
44{
45 struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
46 struct llist_node *llnode = llist_del_all(&p->list);
47 while (llnode) {
48 void *p = llnode;
49 llnode = llist_next(llnode);
50 __vunmap(p, 1);
51 }
52}
53
34/*** Page table manipulation functions ***/ 54/*** Page table manipulation functions ***/
35 55
36static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 56static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -1175,10 +1195,14 @@ void __init vmalloc_init(void)
1175 1195
1176 for_each_possible_cpu(i) { 1196 for_each_possible_cpu(i) {
1177 struct vmap_block_queue *vbq; 1197 struct vmap_block_queue *vbq;
1198 struct vfree_deferred *p;
1178 1199
1179 vbq = &per_cpu(vmap_block_queue, i); 1200 vbq = &per_cpu(vmap_block_queue, i);
1180 spin_lock_init(&vbq->lock); 1201 spin_lock_init(&vbq->lock);
1181 INIT_LIST_HEAD(&vbq->free); 1202 INIT_LIST_HEAD(&vbq->free);
1203 p = &per_cpu(vfree_deferred, i);
1204 init_llist_head(&p->list);
1205 INIT_WORK(&p->wq, free_work);
1182 } 1206 }
1183 1207
1184 /* Import existing vmlist entries. */ 1208 /* Import existing vmlist entries. */
@@ -1486,7 +1510,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
1486 kfree(area); 1510 kfree(area);
1487 return; 1511 return;
1488} 1512}
1489 1513
1490/** 1514/**
1491 * vfree - release memory allocated by vmalloc() 1515 * vfree - release memory allocated by vmalloc()
1492 * @addr: memory base address 1516 * @addr: memory base address
@@ -1495,15 +1519,25 @@ static void __vunmap(const void *addr, int deallocate_pages)
1495 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is 1519 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1496 * NULL, no operation is performed. 1520 * NULL, no operation is performed.
1497 * 1521 *
1498 * Must not be called in interrupt context. 1522 * Must not be called in NMI context (strictly speaking, only if we don't
1523 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1524 * conventions for vfree() arch-depenedent would be a really bad idea)
1525 *
1499 */ 1526 */
1500void vfree(const void *addr) 1527void vfree(const void *addr)
1501{ 1528{
1502 BUG_ON(in_interrupt()); 1529 BUG_ON(in_nmi());
1503 1530
1504 kmemleak_free(addr); 1531 kmemleak_free(addr);
1505 1532
1506 __vunmap(addr, 1); 1533 if (!addr)
1534 return;
1535 if (unlikely(in_interrupt())) {
1536 struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
1537 llist_add((struct llist_node *)addr, &p->list);
1538 schedule_work(&p->wq);
1539 } else
1540 __vunmap(addr, 1);
1507} 1541}
1508EXPORT_SYMBOL(vfree); 1542EXPORT_SYMBOL(vfree);
1509 1543
@@ -1520,7 +1554,8 @@ void vunmap(const void *addr)
1520{ 1554{
1521 BUG_ON(in_interrupt()); 1555 BUG_ON(in_interrupt());
1522 might_sleep(); 1556 might_sleep();
1523 __vunmap(addr, 0); 1557 if (addr)
1558 __vunmap(addr, 0);
1524} 1559}
1525EXPORT_SYMBOL(vunmap); 1560EXPORT_SYMBOL(vunmap);
1526 1561