aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6
diff options
context:
space:
mode:
authorFelix Blyakher <felixb@sgi.com>2009-02-18 16:56:51 -0500
committerFelix Blyakher <felixb@sgi.com>2009-02-18 16:57:51 -0500
commit3a011a171906a3a51a43bb860fb7c66a64cab140 (patch)
treee4f78a447196f19e1a94d9c8b2fc36bc0e0af249 /fs/xfs/linux-2.6
parentcf7dab801796b9ee52a6dc99888a66bf476538ec (diff)
Revert "[XFS] remove old vmap cache"
This reverts commit d2859751cd0bf586941ffa7308635a293f943c17. This commit caused regression. We'll try to fix use of new vmap API for next release. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Felix Blyakher <felixb@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c75
1 files changed, 74 insertions, 1 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 0b2177a9fbdc..cb329edc925b 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -166,6 +166,75 @@ test_page_region(
166} 166}
167 167
168/* 168/*
169 * Mapping of multi-page buffers into contiguous virtual space
170 */
171
172typedef struct a_list {
173 void *vm_addr;
174 struct a_list *next;
175} a_list_t;
176
177static a_list_t *as_free_head;
178static int as_list_len;
179static DEFINE_SPINLOCK(as_lock);
180
181/*
182 * Try to batch vunmaps because they are costly.
183 */
184STATIC void
185free_address(
186 void *addr)
187{
188 a_list_t *aentry;
189
190#ifdef CONFIG_XEN
191 /*
192 * Xen needs to be able to make sure it can get an exclusive
193 * RO mapping of pages it wants to turn into a pagetable. If
194 * a newly allocated page is also still being vmap()ed by xfs,
195 * it will cause pagetable construction to fail. This is a
196 * quick workaround to always eagerly unmap pages so that Xen
197 * is happy.
198 */
199 vunmap(addr);
200 return;
201#endif
202
203 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
204 if (likely(aentry)) {
205 spin_lock(&as_lock);
206 aentry->next = as_free_head;
207 aentry->vm_addr = addr;
208 as_free_head = aentry;
209 as_list_len++;
210 spin_unlock(&as_lock);
211 } else {
212 vunmap(addr);
213 }
214}
215
216STATIC void
217purge_addresses(void)
218{
219 a_list_t *aentry, *old;
220
221 if (as_free_head == NULL)
222 return;
223
224 spin_lock(&as_lock);
225 aentry = as_free_head;
226 as_free_head = NULL;
227 as_list_len = 0;
228 spin_unlock(&as_lock);
229
230 while ((old = aentry) != NULL) {
231 vunmap(aentry->vm_addr);
232 aentry = aentry->next;
233 kfree(old);
234 }
235}
236
237/*
169 * Internal xfs_buf_t object manipulation 238 * Internal xfs_buf_t object manipulation
170 */ 239 */
171 240
@@ -264,7 +333,7 @@ xfs_buf_free(
264 uint i; 333 uint i;
265 334
266 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) 335 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
267 vunmap(bp->b_addr - bp->b_offset); 336 free_address(bp->b_addr - bp->b_offset);
268 337
269 for (i = 0; i < bp->b_page_count; i++) { 338 for (i = 0; i < bp->b_page_count; i++) {
270 struct page *page = bp->b_pages[i]; 339 struct page *page = bp->b_pages[i];
@@ -386,6 +455,8 @@ _xfs_buf_map_pages(
386 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; 455 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
387 bp->b_flags |= XBF_MAPPED; 456 bp->b_flags |= XBF_MAPPED;
388 } else if (flags & XBF_MAPPED) { 457 } else if (flags & XBF_MAPPED) {
458 if (as_list_len > 64)
459 purge_addresses();
389 bp->b_addr = vmap(bp->b_pages, bp->b_page_count, 460 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
390 VM_MAP, PAGE_KERNEL); 461 VM_MAP, PAGE_KERNEL);
391 if (unlikely(bp->b_addr == NULL)) 462 if (unlikely(bp->b_addr == NULL))
@@ -1672,6 +1743,8 @@ xfsbufd(
1672 count++; 1743 count++;
1673 } 1744 }
1674 1745
1746 if (as_list_len > 0)
1747 purge_addresses();
1675 if (count) 1748 if (count)
1676 blk_run_address_space(target->bt_mapping); 1749 blk_run_address_space(target->bt_mapping);
1677 1750