summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-26 08:27:11 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-07-02 13:32:44 -0400
commit897e6365cda6ba6356e83a3aaa68dec82ef4c548 (patch)
tree91a2776f8a994aa78d9eb1f91fce9debc51b1727 /mm/memory.c
parentf6a55e1a3fe6b3bb294a80a05437fcf86488d819 (diff)
memremap: add a migrate_to_ram method to struct dev_pagemap_ops
This replaces the hacky ->fault callback, which is currently directly called from common code through a hmm specific data structure as an exercise in layering violations. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c9
1 files changed, 2 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 2d14f4c7e152..d437ccdb210c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2748,13 +2748,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
2748 migration_entry_wait(vma->vm_mm, vmf->pmd, 2748 migration_entry_wait(vma->vm_mm, vmf->pmd,
2749 vmf->address); 2749 vmf->address);
2750 } else if (is_device_private_entry(entry)) { 2750 } else if (is_device_private_entry(entry)) {
2751 /* 2751 vmf->page = device_private_entry_to_page(entry);
2752 * For un-addressable device memory we call the pgmap 2752 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
2753 * fault handler callback. The callback must migrate
2754 * the page back to some CPU accessible page.
2755 */
2756 ret = device_private_entry_fault(vma, vmf->address, entry,
2757 vmf->flags, vmf->pmd);
2758 } else if (is_hwpoison_entry(entry)) { 2753 } else if (is_hwpoison_entry(entry)) {
2759 ret = VM_FAULT_HWPOISON; 2754 ret = VM_FAULT_HWPOISON;
2760 } else { 2755 } else {