diff options
-rw-r--r-- | include/linux/mm.h | 4 | ||||
-rw-r--r-- | mm/memory.c | 14 |
2 files changed, 10 insertions, 8 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 39c17a2efcea..6e25f4916d6f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -301,7 +301,8 @@ struct vm_fault { | |||
301 | * the 'address' */ | 301 | * the 'address' */ |
302 | pte_t orig_pte; /* Value of PTE at the time of fault */ | 302 | pte_t orig_pte; /* Value of PTE at the time of fault */ |
303 | 303 | ||
304 | struct page *cow_page; /* Handler may choose to COW */ | 304 | struct page *cow_page; /* Page handler may use for COW fault */ |
305 | struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ | ||
305 | struct page *page; /* ->fault handlers should return a | 306 | struct page *page; /* ->fault handlers should return a |
306 | * page here, unless VM_FAULT_NOPAGE | 307 | * page here, unless VM_FAULT_NOPAGE |
307 | * is set (which is also implied by | 308 | * is set (which is also implied by |
@@ -1103,6 +1104,7 @@ static inline void clear_page_pfmemalloc(struct page *page) | |||
1103 | #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ | 1104 | #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ |
1104 | #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ | 1105 | #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ |
1105 | #define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ | 1106 | #define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */ |
1107 | #define VM_FAULT_DONE_COW 0x2000 /* ->fault has fully handled COW */ | ||
1106 | 1108 | ||
1107 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ | 1109 | #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ |
1108 | 1110 | ||
diff --git a/mm/memory.c b/mm/memory.c index cf74f7ca911b..02504cd4ca0e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2844,9 +2844,8 @@ static int __do_fault(struct vm_fault *vmf) | |||
2844 | int ret; | 2844 | int ret; |
2845 | 2845 | ||
2846 | ret = vma->vm_ops->fault(vma, vmf); | 2846 | ret = vma->vm_ops->fault(vma, vmf); |
2847 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) | 2847 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | |
2848 | return ret; | 2848 | VM_FAULT_DAX_LOCKED | VM_FAULT_DONE_COW))) |
2849 | if (ret & VM_FAULT_DAX_LOCKED) | ||
2850 | return ret; | 2849 | return ret; |
2851 | 2850 | ||
2852 | if (unlikely(PageHWPoison(vmf->page))) { | 2851 | if (unlikely(PageHWPoison(vmf->page))) { |
@@ -3226,7 +3225,6 @@ static int do_read_fault(struct vm_fault *vmf) | |||
3226 | static int do_cow_fault(struct vm_fault *vmf) | 3225 | static int do_cow_fault(struct vm_fault *vmf) |
3227 | { | 3226 | { |
3228 | struct vm_area_struct *vma = vmf->vma; | 3227 | struct vm_area_struct *vma = vmf->vma; |
3229 | struct mem_cgroup *memcg; | ||
3230 | int ret; | 3228 | int ret; |
3231 | 3229 | ||
3232 | if (unlikely(anon_vma_prepare(vma))) | 3230 | if (unlikely(anon_vma_prepare(vma))) |
@@ -3237,7 +3235,7 @@ static int do_cow_fault(struct vm_fault *vmf) | |||
3237 | return VM_FAULT_OOM; | 3235 | return VM_FAULT_OOM; |
3238 | 3236 | ||
3239 | if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, | 3237 | if (mem_cgroup_try_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL, |
3240 | &memcg, false)) { | 3238 | &vmf->memcg, false)) { |
3241 | put_page(vmf->cow_page); | 3239 | put_page(vmf->cow_page); |
3242 | return VM_FAULT_OOM; | 3240 | return VM_FAULT_OOM; |
3243 | } | 3241 | } |
@@ -3245,12 +3243,14 @@ static int do_cow_fault(struct vm_fault *vmf) | |||
3245 | ret = __do_fault(vmf); | 3243 | ret = __do_fault(vmf); |
3246 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) | 3244 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) |
3247 | goto uncharge_out; | 3245 | goto uncharge_out; |
3246 | if (ret & VM_FAULT_DONE_COW) | ||
3247 | return ret; | ||
3248 | 3248 | ||
3249 | if (!(ret & VM_FAULT_DAX_LOCKED)) | 3249 | if (!(ret & VM_FAULT_DAX_LOCKED)) |
3250 | copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); | 3250 | copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); |
3251 | __SetPageUptodate(vmf->cow_page); | 3251 | __SetPageUptodate(vmf->cow_page); |
3252 | 3252 | ||
3253 | ret |= alloc_set_pte(vmf, memcg, vmf->cow_page); | 3253 | ret |= alloc_set_pte(vmf, vmf->memcg, vmf->cow_page); |
3254 | if (vmf->pte) | 3254 | if (vmf->pte) |
3255 | pte_unmap_unlock(vmf->pte, vmf->ptl); | 3255 | pte_unmap_unlock(vmf->pte, vmf->ptl); |
3256 | if (!(ret & VM_FAULT_DAX_LOCKED)) { | 3256 | if (!(ret & VM_FAULT_DAX_LOCKED)) { |
@@ -3263,7 +3263,7 @@ static int do_cow_fault(struct vm_fault *vmf) | |||
3263 | goto uncharge_out; | 3263 | goto uncharge_out; |
3264 | return ret; | 3264 | return ret; |
3265 | uncharge_out: | 3265 | uncharge_out: |
3266 | mem_cgroup_cancel_charge(vmf->cow_page, memcg, false); | 3266 | mem_cgroup_cancel_charge(vmf->cow_page, vmf->memcg, false); |
3267 | put_page(vmf->cow_page); | 3267 | put_page(vmf->cow_page); |
3268 | return ret; | 3268 | return ret; |
3269 | } | 3269 | } |