diff options
author | Matthew Wilcox <willy@infradead.org> | 2018-11-16 15:19:13 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2018-11-17 12:07:53 -0500 |
commit | 0e40de0338d005f73d46898a21544cd26f01b4ce (patch) | |
tree | 219460210db66a66308167398ddc320252e30047 | |
parent | fda490d39fc0668d92e170d95c11e35a010019aa (diff) |
dax: Fix huge page faults
Using xas_load() with a PMD-sized xa_state would work if either a
PMD-sized entry was present or a PTE sized entry was present in the
first 64 entries (of the 512 PTEs in a PMD on x86). If there was no
PTE in the first 64 entries, grab_mapping_entry() would believe there
were no entries present, allocate a PMD-sized entry and overwrite the
PTE in the page cache.
Use xas_find_conflict() instead which turns out to simplify
both get_unlocked_entry() and grab_mapping_entry(). Also remove a
WARN_ON_ONCE from grab_mapping_entry() as it will have already triggered
in get_unlocked_entry().
Fixes: cfc93c6c6c96 ("dax: Convert dax_insert_pfn_mkwrite to XArray")
Signed-off-by: Matthew Wilcox <willy@infradead.org>
-rw-r--r-- | fs/dax.c | 12 |
1 files changed, 4 insertions, 8 deletions
@@ -216,9 +216,8 @@ static void *get_unlocked_entry(struct xa_state *xas) | |||
216 | ewait.wait.func = wake_exceptional_entry_func; | 216 | ewait.wait.func = wake_exceptional_entry_func; |
217 | 217 | ||
218 | for (;;) { | 218 | for (;;) { |
219 | entry = xas_load(xas); | 219 | entry = xas_find_conflict(xas); |
220 | if (!entry || xa_is_internal(entry) || | 220 | if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) || |
221 | WARN_ON_ONCE(!xa_is_value(entry)) || | ||
222 | !dax_is_locked(entry)) | 221 | !dax_is_locked(entry)) |
223 | return entry; | 222 | return entry; |
224 | 223 | ||
@@ -458,11 +457,9 @@ static void *grab_mapping_entry(struct xa_state *xas, | |||
458 | retry: | 457 | retry: |
459 | xas_lock_irq(xas); | 458 | xas_lock_irq(xas); |
460 | entry = get_unlocked_entry(xas); | 459 | entry = get_unlocked_entry(xas); |
461 | if (xa_is_internal(entry)) | ||
462 | goto fallback; | ||
463 | 460 | ||
464 | if (entry) { | 461 | if (entry) { |
465 | if (WARN_ON_ONCE(!xa_is_value(entry))) { | 462 | if (!xa_is_value(entry)) { |
466 | xas_set_err(xas, EIO); | 463 | xas_set_err(xas, EIO); |
467 | goto out_unlock; | 464 | goto out_unlock; |
468 | } | 465 | } |
@@ -1641,8 +1638,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order) | |||
1641 | /* Did we race with someone splitting entry or so? */ | 1638 | /* Did we race with someone splitting entry or so? */ |
1642 | if (!entry || | 1639 | if (!entry || |
1643 | (order == 0 && !dax_is_pte_entry(entry)) || | 1640 | (order == 0 && !dax_is_pte_entry(entry)) || |
1644 | (order == PMD_ORDER && (xa_is_internal(entry) || | 1641 | (order == PMD_ORDER && !dax_is_pmd_entry(entry))) { |
1645 | !dax_is_pmd_entry(entry)))) { | ||
1646 | put_unlocked_entry(&xas, entry); | 1642 | put_unlocked_entry(&xas, entry); |
1647 | xas_unlock_irq(&xas); | 1643 | xas_unlock_irq(&xas); |
1648 | trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, | 1644 | trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, |