diff options
| -rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 8d9298c99763..d5b2d2bbf5ff 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
| @@ -187,6 +187,19 @@ free_address( | |||
| 187 | { | 187 | { |
| 188 | a_list_t *aentry; | 188 | a_list_t *aentry; |
| 189 | 189 | ||
| 190 | #ifdef CONFIG_XEN | ||
| 191 | /* | ||
| 192 | * Xen needs to be able to make sure it can get an exclusive | ||
| 193 | * RO mapping of pages it wants to turn into a pagetable. If | ||
| 194 | * a newly allocated page is also still being vmap()ed by xfs, | ||
| 195 | * it will cause pagetable construction to fail. This is a | ||
| 196 | * quick workaround to always eagerly unmap pages so that Xen | ||
| 197 | * is happy. | ||
| 198 | */ | ||
| 199 | vunmap(addr); | ||
| 200 | return; | ||
| 201 | #endif | ||
| 202 | |||
| 190 | aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT); | 203 | aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT); |
| 191 | if (likely(aentry)) { | 204 | if (likely(aentry)) { |
| 192 | spin_lock(&as_lock); | 205 | spin_lock(&as_lock); |
