diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-05-24 20:11:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-25 11:39:14 -0400 |
commit | 9e14f6741062b6e6a71de75b4375e14c3e92c213 (patch) | |
tree | fd4eced90101a1cda6e501bd022a35c384851590 /arch/arm | |
parent | 68f03921235c59507578a0165f87100d1120ec62 (diff) |
arm: mmu_gather rework
Fix up the arm mmu_gather code to conform to the new API.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Miller <davem@davemloft.net>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Russell King <rmk@arm.linux.org.uk>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Tony Luck <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/tlb.h | 53 |
1 files changed, 37 insertions, 16 deletions
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 82dfe5d0c41e..265f908c4a6e 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -41,12 +41,12 @@ | |||
41 | */ | 41 | */ |
42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) | 42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) |
43 | #define tlb_fast_mode(tlb) 0 | 43 | #define tlb_fast_mode(tlb) 0 |
44 | #define FREE_PTE_NR 500 | ||
45 | #else | 44 | #else |
46 | #define tlb_fast_mode(tlb) 1 | 45 | #define tlb_fast_mode(tlb) 1 |
47 | #define FREE_PTE_NR 0 | ||
48 | #endif | 46 | #endif |
49 | 47 | ||
48 | #define MMU_GATHER_BUNDLE 8 | ||
49 | |||
50 | /* | 50 | /* |
51 | * TLB handling. This allows us to remove pages from the page | 51 | * TLB handling. This allows us to remove pages from the page |
52 | * tables, and efficiently handle the TLB issues. | 52 | * tables, and efficiently handle the TLB issues. |
@@ -58,7 +58,9 @@ struct mmu_gather { | |||
58 | unsigned long range_start; | 58 | unsigned long range_start; |
59 | unsigned long range_end; | 59 | unsigned long range_end; |
60 | unsigned int nr; | 60 | unsigned int nr; |
61 | struct page *pages[FREE_PTE_NR]; | 61 | unsigned int max; |
62 | struct page **pages; | ||
63 | struct page *local[MMU_GATHER_BUNDLE]; | ||
62 | }; | 64 | }; |
63 | 65 | ||
64 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 66 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
@@ -97,26 +99,37 @@ static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) | |||
97 | } | 99 | } |
98 | } | 100 | } |
99 | 101 | ||
102 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) | ||
103 | { | ||
104 | unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); | ||
105 | |||
106 | if (addr) { | ||
107 | tlb->pages = (void *)addr; | ||
108 | tlb->max = PAGE_SIZE / sizeof(struct page *); | ||
109 | } | ||
110 | } | ||
111 | |||
100 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 112 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
101 | { | 113 | { |
102 | tlb_flush(tlb); | 114 | tlb_flush(tlb); |
103 | if (!tlb_fast_mode(tlb)) { | 115 | if (!tlb_fast_mode(tlb)) { |
104 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | 116 | free_pages_and_swap_cache(tlb->pages, tlb->nr); |
105 | tlb->nr = 0; | 117 | tlb->nr = 0; |
118 | if (tlb->pages == tlb->local) | ||
119 | __tlb_alloc_page(tlb); | ||
106 | } | 120 | } |
107 | } | 121 | } |
108 | 122 | ||
109 | static inline struct mmu_gather * | 123 | static inline void |
110 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | 124 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) |
111 | { | 125 | { |
112 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); | ||
113 | |||
114 | tlb->mm = mm; | 126 | tlb->mm = mm; |
115 | tlb->fullmm = full_mm_flush; | 127 | tlb->fullmm = fullmm; |
116 | tlb->vma = NULL; | 128 | tlb->vma = NULL; |
129 | tlb->max = ARRAY_SIZE(tlb->local); | ||
130 | tlb->pages = tlb->local; | ||
117 | tlb->nr = 0; | 131 | tlb->nr = 0; |
118 | 132 | __tlb_alloc_page(tlb); | |
119 | return tlb; | ||
120 | } | 133 | } |
121 | 134 | ||
122 | static inline void | 135 | static inline void |
@@ -127,7 +140,8 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |||
127 | /* keep the page table cache within bounds */ | 140 | /* keep the page table cache within bounds */ |
128 | check_pgt_cache(); | 141 | check_pgt_cache(); |
129 | 142 | ||
130 | put_cpu_var(mmu_gathers); | 143 | if (tlb->pages != tlb->local) |
144 | free_pages((unsigned long)tlb->pages, 0); | ||
131 | } | 145 | } |
132 | 146 | ||
133 | /* | 147 | /* |
@@ -162,15 +176,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
162 | tlb_flush(tlb); | 176 | tlb_flush(tlb); |
163 | } | 177 | } |
164 | 178 | ||
165 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 179 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
166 | { | 180 | { |
167 | if (tlb_fast_mode(tlb)) { | 181 | if (tlb_fast_mode(tlb)) { |
168 | free_page_and_swap_cache(page); | 182 | free_page_and_swap_cache(page); |
169 | } else { | 183 | return 1; /* avoid calling tlb_flush_mmu */ |
170 | tlb->pages[tlb->nr++] = page; | ||
171 | if (tlb->nr >= FREE_PTE_NR) | ||
172 | tlb_flush_mmu(tlb); | ||
173 | } | 184 | } |
185 | |||
186 | tlb->pages[tlb->nr++] = page; | ||
187 | VM_BUG_ON(tlb->nr > tlb->max); | ||
188 | return tlb->max - tlb->nr; | ||
189 | } | ||
190 | |||
191 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
192 | { | ||
193 | if (!__tlb_remove_page(tlb, page)) | ||
194 | tlb_flush_mmu(tlb); | ||
174 | } | 195 | } |
175 | 196 | ||
176 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | 197 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |