aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:12:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:20 -0400
commit9547d01bfb9c351dc19067f8a4cea9d3955f4125 (patch)
tree3c32521dbbf380471e1eef3e11ae656b24164255 /include
parent88c22088bf235f50b09a10bd9f022b0472bcb6b5 (diff)
mm: uninline large generic tlb.h functions
Some of these functions have grown beyond inline sanity, move them out-of-line. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Requested-by: Andrew Morton <akpm@linux-foundation.org> Requested-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/tlb.h135
1 files changed, 13 insertions, 122 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 5a946a08ff9..e58fa777fa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -96,134 +96,25 @@ struct mmu_gather {
96 struct page *__pages[MMU_GATHER_BUNDLE]; 96 struct page *__pages[MMU_GATHER_BUNDLE];
97}; 97};
98 98
99/* 99#define HAVE_GENERIC_MMU_GATHER
100 * For UP we don't need to worry about TLB flush
101 * and page free order so much..
102 */
103#ifdef CONFIG_SMP
104 #define tlb_fast_mode(tlb) (tlb->fast_mode)
105#else
106 #define tlb_fast_mode(tlb) 1
107#endif
108 100
109static inline int tlb_next_batch(struct mmu_gather *tlb) 101static inline int tlb_fast_mode(struct mmu_gather *tlb)
110{ 102{
111 struct mmu_gather_batch *batch; 103#ifdef CONFIG_SMP
112 104 return tlb->fast_mode;
113 batch = tlb->active; 105#else
114 if (batch->next) { 106 /*
115 tlb->active = batch->next; 107 * For UP we don't need to worry about TLB flush
116 return 1; 108 * and page free order so much..
117 } 109 */
118
119 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
120 if (!batch)
121 return 0;
122
123 batch->next = NULL;
124 batch->nr = 0;
125 batch->max = MAX_GATHER_BATCH;
126
127 tlb->active->next = batch;
128 tlb->active = batch;
129
130 return 1; 110 return 1;
131}
132
133/* tlb_gather_mmu
134 * Called to initialize an (on-stack) mmu_gather structure for page-table
135 * tear-down from @mm. The @fullmm argument is used when @mm is without
136 * users and we're going to destroy the full address space (exit/execve).
137 */
138static inline void
139tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
140{
141 tlb->mm = mm;
142
143 tlb->fullmm = fullmm;
144 tlb->need_flush = 0;
145 tlb->fast_mode = (num_possible_cpus() == 1);
146 tlb->local.next = NULL;
147 tlb->local.nr = 0;
148 tlb->local.max = ARRAY_SIZE(tlb->__pages);
149 tlb->active = &tlb->local;
150
151#ifdef CONFIG_HAVE_RCU_TABLE_FREE
152 tlb->batch = NULL;
153#endif 111#endif
154} 112}
155 113
156static inline void 114void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
157tlb_flush_mmu(struct mmu_gather *tlb) 115void tlb_flush_mmu(struct mmu_gather *tlb);
158{ 116void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
159 struct mmu_gather_batch *batch; 117int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
160
161 if (!tlb->need_flush)
162 return;
163 tlb->need_flush = 0;
164 tlb_flush(tlb);
165#ifdef CONFIG_HAVE_RCU_TABLE_FREE
166 tlb_table_flush(tlb);
167#endif
168
169 if (tlb_fast_mode(tlb))
170 return;
171
172 for (batch = &tlb->local; batch; batch = batch->next) {
173 free_pages_and_swap_cache(batch->pages, batch->nr);
174 batch->nr = 0;
175 }
176 tlb->active = &tlb->local;
177}
178
179/* tlb_finish_mmu
180 * Called at the end of the shootdown operation to free up any resources
181 * that were required.
182 */
183static inline void
184tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
185{
186 struct mmu_gather_batch *batch, *next;
187
188 tlb_flush_mmu(tlb);
189
190 /* keep the page table cache within bounds */
191 check_pgt_cache();
192
193 for (batch = tlb->local.next; batch; batch = next) {
194 next = batch->next;
195 free_pages((unsigned long)batch, 0);
196 }
197 tlb->local.next = NULL;
198}
199
200/* __tlb_remove_page
201 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
202 * handling the additional races in SMP caused by other CPUs caching valid
203 * mappings in their TLBs. Returns the number of free page slots left.
204 * When out of page slots we must call tlb_flush_mmu().
205 */
206static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
207{
208 struct mmu_gather_batch *batch;
209
210 tlb->need_flush = 1;
211
212 if (tlb_fast_mode(tlb)) {
213 free_page_and_swap_cache(page);
214 return 1; /* avoid calling tlb_flush_mmu() */
215 }
216
217 batch = tlb->active;
218 batch->pages[batch->nr++] = page;
219 VM_BUG_ON(batch->nr > batch->max);
220 if (batch->nr == batch->max) {
221 if (!tlb_next_batch(tlb))
222 return 0;
223 }
224
225 return batch->max - batch->nr;
226}
227 118
228/* tlb_remove_page 119/* tlb_remove_page
229 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when 120 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when