diff options
author | Steve Capper <steve.capper@linaro.org> | 2014-10-09 18:29:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:26:01 -0400 |
commit | a0ad5496b2b3accf09ab9485ad0170e3b4b1cb27 (patch) | |
tree | abb991c729c6ff57357f5e5d358b9d2967efe42f /arch | |
parent | bd951303be5b4df578c7f30ef78839f1a9d6658c (diff) |
arm: mm: enable HAVE_RCU_TABLE_FREE logic
In order to implement fast_get_user_pages we need to ensure that the page
table walker is protected from page table pages being freed from under it.
This patch enables HAVE_RCU_TABLE_FREE, any page table pages belonging to
address spaces with multiple users will be call_rcu_sched freed. Meaning
that disabling interrupts will block the free and protect the fast gup
page walker.
Signed-off-by: Steve Capper <steve.capper@linaro.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dann Frazier <dann.frazier@canonical.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/tlb.h | 38 |
2 files changed, 37 insertions, 2 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 36d47987a9e0..eafe6aea64ff 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -62,6 +62,7 @@ config ARM | |||
62 | select HAVE_PERF_EVENTS | 62 | select HAVE_PERF_EVENTS |
63 | select HAVE_PERF_REGS | 63 | select HAVE_PERF_REGS |
64 | select HAVE_PERF_USER_STACK_DUMP | 64 | select HAVE_PERF_USER_STACK_DUMP |
65 | select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) | ||
65 | select HAVE_REGS_AND_STACK_ACCESS_API | 66 | select HAVE_REGS_AND_STACK_ACCESS_API |
66 | select HAVE_SYSCALL_TRACEPOINTS | 67 | select HAVE_SYSCALL_TRACEPOINTS |
67 | select HAVE_UID16 | 68 | select HAVE_UID16 |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f1a0dace3efe..3cadb726ec88 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -35,12 +35,39 @@ | |||
35 | 35 | ||
36 | #define MMU_GATHER_BUNDLE 8 | 36 | #define MMU_GATHER_BUNDLE 8 |
37 | 37 | ||
38 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
39 | static inline void __tlb_remove_table(void *_table) | ||
40 | { | ||
41 | free_page_and_swap_cache((struct page *)_table); | ||
42 | } | ||
43 | |||
44 | struct mmu_table_batch { | ||
45 | struct rcu_head rcu; | ||
46 | unsigned int nr; | ||
47 | void *tables[0]; | ||
48 | }; | ||
49 | |||
50 | #define MAX_TABLE_BATCH \ | ||
51 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) | ||
52 | |||
53 | extern void tlb_table_flush(struct mmu_gather *tlb); | ||
54 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | ||
55 | |||
56 | #define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) | ||
57 | #else | ||
58 | #define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) | ||
59 | #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ | ||
60 | |||
38 | /* | 61 | /* |
39 | * TLB handling. This allows us to remove pages from the page | 62 | * TLB handling. This allows us to remove pages from the page |
40 | * tables, and efficiently handle the TLB issues. | 63 | * tables, and efficiently handle the TLB issues. |
41 | */ | 64 | */ |
42 | struct mmu_gather { | 65 | struct mmu_gather { |
43 | struct mm_struct *mm; | 66 | struct mm_struct *mm; |
67 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
68 | struct mmu_table_batch *batch; | ||
69 | unsigned int need_flush; | ||
70 | #endif | ||
44 | unsigned int fullmm; | 71 | unsigned int fullmm; |
45 | struct vm_area_struct *vma; | 72 | struct vm_area_struct *vma; |
46 | unsigned long start, end; | 73 | unsigned long start, end; |
@@ -101,6 +128,9 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) | |||
101 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) | 128 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
102 | { | 129 | { |
103 | tlb_flush(tlb); | 130 | tlb_flush(tlb); |
131 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
132 | tlb_table_flush(tlb); | ||
133 | #endif | ||
104 | } | 134 | } |
105 | 135 | ||
106 | static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) | 136 | static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) |
@@ -129,6 +159,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start | |||
129 | tlb->pages = tlb->local; | 159 | tlb->pages = tlb->local; |
130 | tlb->nr = 0; | 160 | tlb->nr = 0; |
131 | __tlb_alloc_page(tlb); | 161 | __tlb_alloc_page(tlb); |
162 | |||
163 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | ||
164 | tlb->batch = NULL; | ||
165 | #endif | ||
132 | } | 166 | } |
133 | 167 | ||
134 | static inline void | 168 | static inline void |
@@ -205,7 +239,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
205 | tlb_add_flush(tlb, addr + SZ_1M); | 239 | tlb_add_flush(tlb, addr + SZ_1M); |
206 | #endif | 240 | #endif |
207 | 241 | ||
208 | tlb_remove_page(tlb, pte); | 242 | tlb_remove_entry(tlb, pte); |
209 | } | 243 | } |
210 | 244 | ||
211 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | 245 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, |
@@ -213,7 +247,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
213 | { | 247 | { |
214 | #ifdef CONFIG_ARM_LPAE | 248 | #ifdef CONFIG_ARM_LPAE |
215 | tlb_add_flush(tlb, addr); | 249 | tlb_add_flush(tlb, addr); |
216 | tlb_remove_page(tlb, virt_to_page(pmdp)); | 250 | tlb_remove_entry(tlb, virt_to_page(pmdp)); |
217 | #endif | 251 | #endif |
218 | } | 252 | } |
219 | 253 | ||