aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-12 21:34:12 -0500
committerTheodore Ts'o <tytso@mit.edu>2016-12-12 21:34:12 -0500
commit0cb80b4847553582830a59da2c022c37a1f4a119 (patch)
treee16efd7948740c035aef5fad3af596089e6e03da
parent73b92a2a5e97d17cc4d5c4fe9d724d3273fb6fd2 (diff)
dax: Fix sleep in atomic contex in grab_mapping_entry()
Commit 642261ac995e: "dax: add struct iomap based DAX PMD support" has introduced unmapping of page tables if huge page needs to be split in grab_mapping_entry(). However the unmapping happens after radix_tree_preload() call which disables preemption and thus unmap_mapping_range() tries to acquire i_mmap_lock in atomic context which is a bug. Fix the problem by moving unmapping before radix_tree_preload() call. Fixes: 642261ac995e01d7837db1f4b90181496f7e6835 Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
-rw-r--r--fs/dax.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/fs/dax.c b/fs/dax.c
index ad131cd2605d..5bfd27b4a69c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -333,14 +333,6 @@ restart:
333 } 333 }
334 334
335 spin_unlock_irq(&mapping->tree_lock); 335 spin_unlock_irq(&mapping->tree_lock);
336 err = radix_tree_preload(
337 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
338 if (err) {
339 if (pmd_downgrade)
340 put_locked_mapping_entry(mapping, index, entry);
341 return ERR_PTR(err);
342 }
343
344 /* 336 /*
345 * Besides huge zero pages the only other thing that gets 337 * Besides huge zero pages the only other thing that gets
346 * downgraded are empty entries which don't need to be 338 * downgraded are empty entries which don't need to be
@@ -350,6 +342,13 @@ restart:
350 unmap_mapping_range(mapping, 342 unmap_mapping_range(mapping,
351 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0); 343 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
352 344
345 err = radix_tree_preload(
346 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
347 if (err) {
348 if (pmd_downgrade)
349 put_locked_mapping_entry(mapping, index, entry);
350 return ERR_PTR(err);
351 }
353 spin_lock_irq(&mapping->tree_lock); 352 spin_lock_irq(&mapping->tree_lock);
354 353
355 if (pmd_downgrade) { 354 if (pmd_downgrade) {