summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 18:07:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 19:04:09 -0500
commit3917048d4572b9cabf6f8f5ad395eb693717367c (patch)
tree7eac8368243d0e23e4afdd8faed09ea3fcadff90 /mm/memory.c
parent2994302bc8a17180788fac66a47102d338d5d0ec (diff)
mm: allow full handling of COW faults in ->fault handlers
Patch series "dax: Clear dirty bits after flushing caches", v5. Patchset to clear dirty bits from radix tree of DAX inodes when caches for corresponding pfns have been flushed. In principle, these patches enable handlers to easily update PTEs and do other work necessary to finish the fault without duplicating the functionality present in the generic code. I'd like to thank Kirill and Ross for reviews of the series! This patch (of 20): To allow full handling of COW faults add memcg field to struct vm_fault and a return value of ->fault() handler meaning that COW fault is fully handled and memcg charge must not be canceled. This will allow us to remove knowledge about special DAX locking from the generic fault code. Link: http://lkml.kernel.org/r/1479460644-25076-9-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index cf74f7ca911b..02504cd4ca0e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2844,9 +2844,8 @@ static int __do_fault(struct vm_fault *vmf)
2844 int ret; 2844 int ret;
2845 2845
2846 ret = vma->vm_ops->fault(vma, vmf); 2846 ret = vma->vm_ops->fault(vma, vmf);
2847 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 2847 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
2848 return ret; 2848 VM_FAULT_DAX_LOCKED | VM_FAULT_DONE_COW)))
2849 if (ret & VM_FAULT_DAX_LOCKED)
2850 return ret; 2849 return ret;
2851 2850
2852 if (unlikely(PageHWPoison(vmf->page))) { 2851 if (unlikely(PageHWPoison(vmf->page))) {
@@ -3226,7 +3225,6 @@ static int do_read_fault(struct vm_fault *vmf)
3226static int do_cow_fault(struct vm_fault *vmf) 3225static int do_cow_fault(struct vm_fault *vmf)
3227{ 3226{
3228 struct vm_area_struct *vma = vmf->vma; 3227 struct vm_area_struct *vma = vmf->vma;
3229 struct mem_cgroup *memcg;
3230 int ret; 3228 int ret;
3231 3229
3232 if (unlikely(anon_vma_prepare(vma))) 3230 if (unlikely(anon_vma_prepare(vma)))
@@ -3237,7 +3235,7 @@ static int do_cow_fault(struct vm_fault *vmf)
3237 return VM_FAULT_OOM; 3235 return VM_FAULT_OOM;
3238 3236
3239 if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, 3237 if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL,
3240 &memcg, false)) { 3238 &vmf->memcg, false)) {
3241 put_page(vmf->cow_page); 3239 put_page(vmf->cow_page);
3242 return VM_FAULT_OOM; 3240 return VM_FAULT_OOM;
3243 } 3241 }
@@ -3245,12 +3243,14 @@ static int do_cow_fault(struct vm_fault *vmf)
3245 ret = __do_fault(vmf); 3243 ret = __do_fault(vmf);
3246 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) 3244 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
3247 goto uncharge_out; 3245 goto uncharge_out;
3246 if (ret & VM_FAULT_DONE_COW)
3247 return ret;
3248 3248
3249 if (!(ret & VM_FAULT_DAX_LOCKED)) 3249 if (!(ret & VM_FAULT_DAX_LOCKED))
3250 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); 3250 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
3251 __SetPageUptodate(vmf->cow_page); 3251 __SetPageUptodate(vmf->cow_page);
3252 3252
3253 ret |= alloc_set_pte(vmf, memcg, vmf->cow_page); 3253 ret |= alloc_set_pte(vmf, vmf->memcg, vmf->cow_page);
3254 if (vmf->pte) 3254 if (vmf->pte)
3255 pte_unmap_unlock(vmf->pte, vmf->ptl); 3255 pte_unmap_unlock(vmf->pte, vmf->ptl);
3256 if (!(ret & VM_FAULT_DAX_LOCKED)) { 3256 if (!(ret & VM_FAULT_DAX_LOCKED)) {
@@ -3263,7 +3263,7 @@ static int do_cow_fault(struct vm_fault *vmf)
3263 goto uncharge_out; 3263 goto uncharge_out;
3264 return ret; 3264 return ret;
3265uncharge_out: 3265uncharge_out:
3266 mem_cgroup_cancel_charge(vmf->cow_page, memcg, false); 3266 mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false);
3267 put_page(vmf->cow_page); 3267 put_page(vmf->cow_page);
3268 return ret; 3268 return ret;
3269} 3269}