aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-10-16 14:51:31 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2007-10-16 14:51:31 -0400
commitace2e92e193126711cb3a83a3752b2c5b8396950 (patch)
tree203e4ceb8456aec58bd27b07d78b12c90012470d
parenta122d6230e8d8ac7cffdf0bc9cc4b256b928fe49 (diff)
xfs: eagerly remove vmap mappings to avoid upsetting Xen
XFS leaves stray mappings around when it vmaps memory to make it virtually contigious. This upsets Xen if one of those pages is being recycled into a pagetable, since it finds an extra writable mapping of the page. This patch solves the problem in a brute force way, by making XFS always eagerly unmap its mappings. David Chinner says this shouldn't have any performance impact on filesystems with default block sizes; it will only affect filesystems with large block sizes. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Acked-by: David Chinner <dgc@sgi.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: XFS masters <xfs-masters@oss.sgi.com> Cc: Stable kernel <stable@kernel.org> Cc: Morten =?utf-8?q?B=C3=B8geskov?= <xen-users@morten.bogeskov.dk> Cc: Mark Williamson <mark.williamson@cl.cam.ac.uk>
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 39f44ee572e8..455e042e0c10 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -187,6 +187,19 @@ free_address(
187{ 187{
188 a_list_t *aentry; 188 a_list_t *aentry;
189 189
190#ifdef CONFIG_XEN
191 /*
192 * Xen needs to be able to make sure it can get an exclusive
193 * RO mapping of pages it wants to turn into a pagetable. If
194 * a newly allocated page is also still being vmap()ed by xfs,
195 * it will cause pagetable construction to fail. This is a
196 * quick workaround to always eagerly unmap pages so that Xen
197 * is happy.
198 */
199 vunmap(addr);
200 return;
201#endif
202
190 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT); 203 aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
191 if (likely(aentry)) { 204 if (likely(aentry)) {
192 spin_lock(&as_lock); 205 spin_lock(&as_lock);