aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorAlex Elder <aelder@sgi.com>2010-03-16 14:55:54 -0400
committerAlex Elder <aelder@sgi.com>2010-03-16 16:40:19 -0400
commitcd9640a70d542ca026a812ac34733799da0a39c9 (patch)
tree497c3ab912f25994d7adc4c7a860cf01bb78c95c /fs/xfs
parent57d54889cd00db2752994b389ba714138652e60c (diff)
xfs: remove old vmap cache
Re-apply a commit that had been reverted due to regressions that have since been fixed. Original commit: d2859751cd0bf586941ffa7308635a293f943c17 Author: Nick Piggin <npiggin@suse.de> Date: Tue, 6 Jan 2009 14:40:44 +1100 XFS's vmap batching simply defers a number (up to 64) of vunmaps, and keeps track of them in a list. To purge the batch, it just goes through the list and calls vunamp on each one. This is pretty poor: a global TLB flush is generally still performed on each vunmap, with the most expensive parts of the operation being the broadcast IPIs and locking involved in the SMP callouts, and the locking involved in the vmap management -- none of these are avoided by just batching up the calls. I'm actually surprised it ever made much difference. (Now that the lazy vmap allocator is upstream, this description is not quite right, but the vunmap batching still doesn't seem to do much). Rip all this logic out of XFS completely. I will improve vmap performance and scalability directly in subsequent patch. Signed-off-by: Nick Piggin <npiggin@suse.de> Reviewed-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> The only change I made was to use the "new" xfs_buf_is_vmapped() function in a place it had been open-coded in the original. Modified-by: Alex Elder <aelder@sgi.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c76
1 files changed, 1 insertions, 75 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 6f76ba85f193..81f4ef27de3e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -168,75 +168,6 @@ test_page_region(
168} 168}
169 169
170/* 170/*
171 * Mapping of multi-page buffers into contiguous virtual space
172 */
173
174typedef struct a_list {
175 void *vm_addr;
176 struct a_list *next;
177} a_list_t;
178
179static a_list_t *as_free_head;
180static int as_list_len;
181static DEFINE_SPINLOCK(as_lock);
182
183/*
184 * Try to batch vunmaps because they are costly.
185 */
186STATIC void
187free_address(
188 void *addr)
189{
190 a_list_t *aentry;
191
192#ifdef CONFIG_XEN
193 /*
194 * Xen needs to be able to make sure it can get an exclusive
195 * RO mapping of pages it wants to turn into a pagetable. If
196 * a newly allocated page is also still being vmap()ed by xfs,
197 * it will cause pagetable construction to fail. This is a
198 * quick workaround to always eagerly unmap pages so that Xen
199 * is happy.
200 */
201 vunmap(addr);
202 return;
203#endif
204
205 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
206 if (likely(aentry)) {
207 spin_lock(&as_lock);
208 aentry->next = as_free_head;
209 aentry->vm_addr = addr;
210 as_free_head = aentry;
211 as_list_len++;
212 spin_unlock(&as_lock);
213 } else {
214 vunmap(addr);
215 }
216}
217
218STATIC void
219purge_addresses(void)
220{
221 a_list_t *aentry, *old;
222
223 if (as_free_head == NULL)
224 return;
225
226 spin_lock(&as_lock);
227 aentry = as_free_head;
228 as_free_head = NULL;
229 as_list_len = 0;
230 spin_unlock(&as_lock);
231
232 while ((old = aentry) != NULL) {
233 vunmap(aentry->vm_addr);
234 aentry = aentry->next;
235 kfree(old);
236 }
237}
238
239/*
240 * Internal xfs_buf_t object manipulation 171 * Internal xfs_buf_t object manipulation
241 */ 172 */
242 173
@@ -337,7 +268,7 @@ xfs_buf_free(
337 uint i; 268 uint i;
338 269
339 if (xfs_buf_is_vmapped(bp)) 270 if (xfs_buf_is_vmapped(bp))
340 free_address(bp->b_addr - bp->b_offset); 271 vunmap(bp->b_addr - bp->b_offset);
341 272
342 for (i = 0; i < bp->b_page_count; i++) { 273 for (i = 0; i < bp->b_page_count; i++) {
343 struct page *page = bp->b_pages[i]; 274 struct page *page = bp->b_pages[i];
@@ -457,8 +388,6 @@ _xfs_buf_map_pages(
457 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 388 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
458 bp->b_flags |= XBF_MAPPED; 389 bp->b_flags |= XBF_MAPPED;
459 } else if (flags & XBF_MAPPED) { 390 } else if (flags & XBF_MAPPED) {
460 if (as_list_len > 64)
461 purge_addresses();
462 bp->b_addr = vmap(bp->b_pages, bp->b_page_count, 391 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
463 VM_MAP, PAGE_KERNEL); 392 VM_MAP, PAGE_KERNEL);
464 if (unlikely(bp->b_addr == NULL)) 393 if (unlikely(bp->b_addr == NULL))
@@ -1955,9 +1884,6 @@ xfsbufd(
1955 xfs_buf_iostrategy(bp); 1884 xfs_buf_iostrategy(bp);
1956 count++; 1885 count++;
1957 } 1886 }
1958
1959 if (as_list_len > 0)
1960 purge_addresses();
1961 if (count) 1887 if (count)
1962 blk_run_address_space(target->bt_mapping); 1888 blk_run_address_space(target->bt_mapping);
1963 1889