aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c79
1 files changed, 76 insertions, 3 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d71dc44e21ed..cb329edc925b 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -166,6 +166,75 @@ test_page_region(
166} 166}
167 167
168/* 168/*
169 * Mapping of multi-page buffers into contiguous virtual space
170 */
171
172typedef struct a_list {
173 void *vm_addr;
174 struct a_list *next;
175} a_list_t;
176
177static a_list_t *as_free_head;
178static int as_list_len;
179static DEFINE_SPINLOCK(as_lock);
180
181/*
182 * Try to batch vunmaps because they are costly.
183 */
184STATIC void
185free_address(
186 void *addr)
187{
188 a_list_t *aentry;
189
190#ifdef CONFIG_XEN
191 /*
192 * Xen needs to be able to make sure it can get an exclusive
193 * RO mapping of pages it wants to turn into a pagetable. If
194 * a newly allocated page is also still being vmap()ed by xfs,
195 * it will cause pagetable construction to fail. This is a
196 * quick workaround to always eagerly unmap pages so that Xen
197 * is happy.
198 */
199 vunmap(addr);
200 return;
201#endif
202
203 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
204 if (likely(aentry)) {
205 spin_lock(&as_lock);
206 aentry->next = as_free_head;
207 aentry->vm_addr = addr;
208 as_free_head = aentry;
209 as_list_len++;
210 spin_unlock(&as_lock);
211 } else {
212 vunmap(addr);
213 }
214}
215
216STATIC void
217purge_addresses(void)
218{
219 a_list_t *aentry, *old;
220
221 if (as_free_head == NULL)
222 return;
223
224 spin_lock(&as_lock);
225 aentry = as_free_head;
226 as_free_head = NULL;
227 as_list_len = 0;
228 spin_unlock(&as_lock);
229
230 while ((old = aentry) != NULL) {
231 vunmap(aentry->vm_addr);
232 aentry = aentry->next;
233 kfree(old);
234 }
235}
236
237/*
169 * Internal xfs_buf_t object manipulation 238 * Internal xfs_buf_t object manipulation
170 */ 239 */
171 240
@@ -264,7 +333,7 @@ xfs_buf_free(
264 uint i; 333 uint i;
265 334
266 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 335 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
267 vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count); 336 free_address(bp->b_addr - bp->b_offset);
268 337
269 for (i = 0; i < bp->b_page_count; i++) { 338 for (i = 0; i < bp->b_page_count; i++) {
270 struct page *page = bp->b_pages[i]; 339 struct page *page = bp->b_pages[i];
@@ -386,8 +455,10 @@ _xfs_buf_map_pages(
386 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
387 bp->b_flags |= XBF_MAPPED; 456 bp->b_flags |= XBF_MAPPED;
388 } else if (flags & XBF_MAPPED) { 457 } else if (flags & XBF_MAPPED) {
389 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, 458 if (as_list_len > 64)
390 -1, PAGE_KERNEL); 459 purge_addresses();
460 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
461 VM_MAP, PAGE_KERNEL);
391 if (unlikely(bp->b_addr == NULL)) 462 if (unlikely(bp->b_addr == NULL))
392 return -ENOMEM; 463 return -ENOMEM;
393 bp->b_addr += bp->b_offset; 464 bp->b_addr += bp->b_offset;
@@ -1672,6 +1743,8 @@ xfsbufd(
1672 count++; 1743 count++;
1673 } 1744 }
1674 1745
1746 if (as_list_len > 0)
1747 purge_addresses();
1675 if (count) 1748 if (count)
1676 blk_run_address_space(target->bt_mapping); 1749 blk_run_address_space(target->bt_mapping);
1677 1750