diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2009-09-21 20:03:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:42 -0400 |
commit | 3d2d827f5ca5e32816194119d5c980c7e04474a6 (patch) | |
tree | fe0e84669f5f20e1dff8e3dc6b191b4d5dfc0145 | |
parent | 425fbf047cc70bb30dff368a6da02c8c2d229318 (diff) |
mm: move use_mm/unuse_mm from aio.c to mm/
Anyone who wants to do copy to/from user from a kernel thread, needs
use_mm (like what fs/aio has). Move that into mm/, to make reusing and
exporting easier down the line, and make aio use it. Next intended user,
besides aio, will be vhost-net.
Acked-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/aio.c | 47 | ||||
-rw-r--r-- | include/linux/mmu_context.h | 9 | ||||
-rw-r--r-- | mm/Makefile | 2 | ||||
-rw-r--r-- | mm/mmu_context.c | 55 |
4 files changed, 66 insertions, 47 deletions
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/file.h> | 24 | #include <linux/file.h> |
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/mman.h> | 26 | #include <linux/mman.h> |
27 | #include <linux/mmu_context.h> | ||
27 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
28 | #include <linux/timer.h> | 29 | #include <linux/timer.h> |
29 | #include <linux/aio.h> | 30 | #include <linux/aio.h> |
@@ -34,7 +35,6 @@ | |||
34 | 35 | ||
35 | #include <asm/kmap_types.h> | 36 | #include <asm/kmap_types.h> |
36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
37 | #include <asm/mmu_context.h> | ||
38 | 38 | ||
39 | #if DEBUG > 1 | 39 | #if DEBUG > 1 |
40 | #define dprintk printk | 40 | #define dprintk printk |
@@ -595,51 +595,6 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) | |||
595 | } | 595 | } |
596 | 596 | ||
597 | /* | 597 | /* |
598 | * use_mm | ||
599 | * Makes the calling kernel thread take on the specified | ||
600 | * mm context. | ||
601 | * Called by the retry thread execute retries within the | ||
602 | * iocb issuer's mm context, so that copy_from/to_user | ||
603 | * operations work seamlessly for aio. | ||
604 | * (Note: this routine is intended to be called only | ||
605 | * from a kernel thread context) | ||
606 | */ | ||
607 | static void use_mm(struct mm_struct *mm) | ||
608 | { | ||
609 | struct mm_struct *active_mm; | ||
610 | struct task_struct *tsk = current; | ||
611 | |||
612 | task_lock(tsk); | ||
613 | active_mm = tsk->active_mm; | ||
614 | atomic_inc(&mm->mm_count); | ||
615 | tsk->mm = mm; | ||
616 | tsk->active_mm = mm; | ||
617 | switch_mm(active_mm, mm, tsk); | ||
618 | task_unlock(tsk); | ||
619 | |||
620 | mmdrop(active_mm); | ||
621 | } | ||
622 | |||
623 | /* | ||
624 | * unuse_mm | ||
625 | * Reverses the effect of use_mm, i.e. releases the | ||
626 | * specified mm context which was earlier taken on | ||
627 | * by the calling kernel thread | ||
628 | * (Note: this routine is intended to be called only | ||
629 | * from a kernel thread context) | ||
630 | */ | ||
631 | static void unuse_mm(struct mm_struct *mm) | ||
632 | { | ||
633 | struct task_struct *tsk = current; | ||
634 | |||
635 | task_lock(tsk); | ||
636 | tsk->mm = NULL; | ||
637 | /* active_mm is still 'mm' */ | ||
638 | enter_lazy_tlb(mm, tsk); | ||
639 | task_unlock(tsk); | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * Queue up a kiocb to be retried. Assumes that the kiocb | 598 | * Queue up a kiocb to be retried. Assumes that the kiocb |
644 | * has already been marked as kicked, and places it on | 599 | * has already been marked as kicked, and places it on |
645 | * the retry run list for the corresponding ioctx, if it | 600 | * the retry run list for the corresponding ioctx, if it |
diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h new file mode 100644 index 000000000000..70fffeba7495 --- /dev/null +++ b/include/linux/mmu_context.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _LINUX_MMU_CONTEXT_H | ||
2 | #define _LINUX_MMU_CONTEXT_H | ||
3 | |||
4 | struct mm_struct; | ||
5 | |||
6 | void use_mm(struct mm_struct *mm); | ||
7 | void unuse_mm(struct mm_struct *mm); | ||
8 | |||
9 | #endif | ||
diff --git a/mm/Makefile b/mm/Makefile index a63bf59a0c77..728a9fde49d1 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -11,7 +11,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ | |||
11 | maccess.o page_alloc.o page-writeback.o \ | 11 | maccess.o page_alloc.o page-writeback.o \ |
12 | readahead.o swap.o truncate.o vmscan.o shmem.o \ | 12 | readahead.o swap.o truncate.o vmscan.o shmem.o \ |
13 | prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ | 13 | prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ |
14 | page_isolation.o mm_init.o $(mmu-y) | 14 | page_isolation.o mm_init.o mmu_context.o $(mmu-y) |
15 | obj-y += init-mm.o | 15 | obj-y += init-mm.o |
16 | 16 | ||
17 | obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o | 17 | obj-$(CONFIG_PROC_PAGE_MONITOR) += pagewalk.o |
diff --git a/mm/mmu_context.c b/mm/mmu_context.c new file mode 100644 index 000000000000..fd473b51c903 --- /dev/null +++ b/mm/mmu_context.c | |||
@@ -0,0 +1,55 @@ | |||
1 | /* Copyright (C) 2009 Red Hat, Inc. | ||
2 | * | ||
3 | * See ../COPYING for licensing terms. | ||
4 | */ | ||
5 | |||
6 | #include <linux/mm.h> | ||
7 | #include <linux/mmu_context.h> | ||
8 | #include <linux/sched.h> | ||
9 | |||
10 | #include <asm/mmu_context.h> | ||
11 | |||
12 | /* | ||
13 | * use_mm | ||
14 | * Makes the calling kernel thread take on the specified | ||
15 | * mm context. | ||
16 | * Called by the retry thread execute retries within the | ||
17 | * iocb issuer's mm context, so that copy_from/to_user | ||
18 | * operations work seamlessly for aio. | ||
19 | * (Note: this routine is intended to be called only | ||
20 | * from a kernel thread context) | ||
21 | */ | ||
22 | void use_mm(struct mm_struct *mm) | ||
23 | { | ||
24 | struct mm_struct *active_mm; | ||
25 | struct task_struct *tsk = current; | ||
26 | |||
27 | task_lock(tsk); | ||
28 | active_mm = tsk->active_mm; | ||
29 | atomic_inc(&mm->mm_count); | ||
30 | tsk->mm = mm; | ||
31 | tsk->active_mm = mm; | ||
32 | switch_mm(active_mm, mm, tsk); | ||
33 | task_unlock(tsk); | ||
34 | |||
35 | mmdrop(active_mm); | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * unuse_mm | ||
40 | * Reverses the effect of use_mm, i.e. releases the | ||
41 | * specified mm context which was earlier taken on | ||
42 | * by the calling kernel thread | ||
43 | * (Note: this routine is intended to be called only | ||
44 | * from a kernel thread context) | ||
45 | */ | ||
46 | void unuse_mm(struct mm_struct *mm) | ||
47 | { | ||
48 | struct task_struct *tsk = current; | ||
49 | |||
50 | task_lock(tsk); | ||
51 | tsk->mm = NULL; | ||
52 | /* active_mm is still 'mm' */ | ||
53 | enter_lazy_tlb(mm, tsk); | ||
54 | task_unlock(tsk); | ||
55 | } | ||