aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2009-01-05 22:40:44 -0500
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2009-01-05 22:40:44 -0500
commitd2859751cd0bf586941ffa7308635a293f943c17 (patch)
tree24f5f4ba78bf3722609e20a9346976226b95878a /fs/xfs
parent195ec037ff8f6fa800616e0dad8d57a98b6fb37e (diff)
[XFS] remove old vmap cache
XFS's vmap batching simply defers a number (up to 64) of vunmaps, and keeps track of them in a list. To purge the batch, it just goes through the list and calls vunamp on each one. This is pretty poor: a global TLB flush is generally still performed on each vunmap, with the most expensive parts of the operation being the broadcast IPIs and locking involved in the SMP callouts, and the locking involved in the vmap management -- none of these are avoided by just batching up the calls. I'm actually surprised it ever made much difference. (Now that the lazy vmap allocator is upstream, this description is not quite right, but the vunmap batching still doesn't seem to do much) Rip all this logic out of XFS completely. I will improve vmap performance and scalability directly in subsequent patch. Signed-off-by: Nick Piggin <npiggin@suse.de> Reviewed-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c75
1 files changed, 1 insertions, 74 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index cb329edc925b..0b2177a9fbdc 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -166,75 +166,6 @@ test_page_region(
166} 166}
167 167
168/* 168/*
169 * Mapping of multi-page buffers into contiguous virtual space
170 */
171
172typedef struct a_list {
173 void *vm_addr;
174 struct a_list *next;
175} a_list_t;
176
177static a_list_t *as_free_head;
178static int as_list_len;
179static DEFINE_SPINLOCK(as_lock);
180
181/*
182 * Try to batch vunmaps because they are costly.
183 */
184STATIC void
185free_address(
186 void *addr)
187{
188 a_list_t *aentry;
189
190#ifdef CONFIG_XEN
191 /*
192 * Xen needs to be able to make sure it can get an exclusive
193 * RO mapping of pages it wants to turn into a pagetable. If
194 * a newly allocated page is also still being vmap()ed by xfs,
195 * it will cause pagetable construction to fail. This is a
196 * quick workaround to always eagerly unmap pages so that Xen
197 * is happy.
198 */
199 vunmap(addr);
200 return;
201#endif
202
203 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
204 if (likely(aentry)) {
205 spin_lock(&as_lock);
206 aentry->next = as_free_head;
207 aentry->vm_addr = addr;
208 as_free_head = aentry;
209 as_list_len++;
210 spin_unlock(&as_lock);
211 } else {
212 vunmap(addr);
213 }
214}
215
216STATIC void
217purge_addresses(void)
218{
219 a_list_t *aentry, *old;
220
221 if (as_free_head == NULL)
222 return;
223
224 spin_lock(&as_lock);
225 aentry = as_free_head;
226 as_free_head = NULL;
227 as_list_len = 0;
228 spin_unlock(&as_lock);
229
230 while ((old = aentry) != NULL) {
231 vunmap(aentry->vm_addr);
232 aentry = aentry->next;
233 kfree(old);
234 }
235}
236
237/*
238 * Internal xfs_buf_t object manipulation 169 * Internal xfs_buf_t object manipulation
239 */ 170 */
240 171
@@ -333,7 +264,7 @@ xfs_buf_free(
333 uint i; 264 uint i;
334 265
335 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 266 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
336 free_address(bp->b_addr - bp->b_offset); 267 vunmap(bp->b_addr - bp->b_offset);
337 268
338 for (i = 0; i < bp->b_page_count; i++) { 269 for (i = 0; i < bp->b_page_count; i++) {
339 struct page *page = bp->b_pages[i]; 270 struct page *page = bp->b_pages[i];
@@ -455,8 +386,6 @@ _xfs_buf_map_pages(
455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 386 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
456 bp->b_flags |= XBF_MAPPED; 387 bp->b_flags |= XBF_MAPPED;
457 } else if (flags & XBF_MAPPED) { 388 } else if (flags & XBF_MAPPED) {
458 if (as_list_len > 64)
459 purge_addresses();
460 bp->b_addr = vmap(bp->b_pages, bp->b_page_count, 389 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
461 VM_MAP, PAGE_KERNEL); 390 VM_MAP, PAGE_KERNEL);
462 if (unlikely(bp->b_addr == NULL)) 391 if (unlikely(bp->b_addr == NULL))
@@ -1743,8 +1672,6 @@ xfsbufd(
1743 count++; 1672 count++;
1744 } 1673 }
1745 1674
1746 if (as_list_len > 0)
1747 purge_addresses();
1748 if (count) 1675 if (count)
1749 blk_run_address_space(target->bt_mapping); 1676 blk_run_address_space(target->bt_mapping);
1750 1677