diff options
author | David S. Miller <davem@davemloft.net> | 2016-06-30 05:03:36 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-06-30 05:03:36 -0400 |
commit | ee58b57100ca953da7320c285315a95db2f7053d (patch) | |
tree | 77b815a31240adc4d6326346908137fc6c2c3a96 /mm/memory.c | |
parent | 6f30e8b022c8e3a722928ddb1a2ae0be852fcc0e (diff) | |
parent | e7bdea7750eb2a64aea4a08fa5c0a31719c8155d (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several cases of overlapping changes, except the packet scheduler
conflicts which deal with the addition of the free list parameter
to qdisc_enqueue().
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 31 |
1 files changed, 5 insertions, 26 deletions
diff --git a/mm/memory.c b/mm/memory.c index 15322b73636b..cd1f29e4897e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, | |||
2877 | * vm_ops->map_pages. | 2877 | * vm_ops->map_pages. |
2878 | */ | 2878 | */ |
2879 | void do_set_pte(struct vm_area_struct *vma, unsigned long address, | 2879 | void do_set_pte(struct vm_area_struct *vma, unsigned long address, |
2880 | struct page *page, pte_t *pte, bool write, bool anon, bool old) | 2880 | struct page *page, pte_t *pte, bool write, bool anon) |
2881 | { | 2881 | { |
2882 | pte_t entry; | 2882 | pte_t entry; |
2883 | 2883 | ||
@@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, | |||
2885 | entry = mk_pte(page, vma->vm_page_prot); | 2885 | entry = mk_pte(page, vma->vm_page_prot); |
2886 | if (write) | 2886 | if (write) |
2887 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 2887 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
2888 | if (old) | ||
2889 | entry = pte_mkold(entry); | ||
2890 | if (anon) { | 2888 | if (anon) { |
2891 | inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); | 2889 | inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); |
2892 | page_add_new_anon_rmap(page, vma, address, false); | 2890 | page_add_new_anon_rmap(page, vma, address, false); |
@@ -2900,16 +2898,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, | |||
2900 | update_mmu_cache(vma, address, pte); | 2898 | update_mmu_cache(vma, address, pte); |
2901 | } | 2899 | } |
2902 | 2900 | ||
2903 | /* | ||
2904 | * If architecture emulates "accessed" or "young" bit without HW support, | ||
2905 | * there is no much gain with fault_around. | ||
2906 | */ | ||
2907 | static unsigned long fault_around_bytes __read_mostly = | 2901 | static unsigned long fault_around_bytes __read_mostly = |
2908 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
2909 | PAGE_SIZE; | ||
2910 | #else | ||
2911 | rounddown_pow_of_two(65536); | 2902 | rounddown_pow_of_two(65536); |
2912 | #endif | ||
2913 | 2903 | ||
2914 | #ifdef CONFIG_DEBUG_FS | 2904 | #ifdef CONFIG_DEBUG_FS |
2915 | static int fault_around_bytes_get(void *data, u64 *val) | 2905 | static int fault_around_bytes_get(void *data, u64 *val) |
@@ -3032,20 +3022,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3032 | */ | 3022 | */ |
3033 | if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { | 3023 | if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { |
3034 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | 3024 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
3035 | if (!pte_same(*pte, orig_pte)) | ||
3036 | goto unlock_out; | ||
3037 | do_fault_around(vma, address, pte, pgoff, flags); | 3025 | do_fault_around(vma, address, pte, pgoff, flags); |
3038 | /* Check if the fault is handled by faultaround */ | 3026 | if (!pte_same(*pte, orig_pte)) |
3039 | if (!pte_same(*pte, orig_pte)) { | ||
3040 | /* | ||
3041 | * Faultaround produce old pte, but the pte we've | ||
3042 | * handler fault for should be young. | ||
3043 | */ | ||
3044 | pte_t entry = pte_mkyoung(*pte); | ||
3045 | if (ptep_set_access_flags(vma, address, pte, entry, 0)) | ||
3046 | update_mmu_cache(vma, address, pte); | ||
3047 | goto unlock_out; | 3027 | goto unlock_out; |
3048 | } | ||
3049 | pte_unmap_unlock(pte, ptl); | 3028 | pte_unmap_unlock(pte, ptl); |
3050 | } | 3029 | } |
3051 | 3030 | ||
@@ -3060,7 +3039,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3060 | put_page(fault_page); | 3039 | put_page(fault_page); |
3061 | return ret; | 3040 | return ret; |
3062 | } | 3041 | } |
3063 | do_set_pte(vma, address, fault_page, pte, false, false, false); | 3042 | do_set_pte(vma, address, fault_page, pte, false, false); |
3064 | unlock_page(fault_page); | 3043 | unlock_page(fault_page); |
3065 | unlock_out: | 3044 | unlock_out: |
3066 | pte_unmap_unlock(pte, ptl); | 3045 | pte_unmap_unlock(pte, ptl); |
@@ -3111,7 +3090,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3111 | } | 3090 | } |
3112 | goto uncharge_out; | 3091 | goto uncharge_out; |
3113 | } | 3092 | } |
3114 | do_set_pte(vma, address, new_page, pte, true, true, false); | 3093 | do_set_pte(vma, address, new_page, pte, true, true); |
3115 | mem_cgroup_commit_charge(new_page, memcg, false, false); | 3094 | mem_cgroup_commit_charge(new_page, memcg, false, false); |
3116 | lru_cache_add_active_or_unevictable(new_page, vma); | 3095 | lru_cache_add_active_or_unevictable(new_page, vma); |
3117 | pte_unmap_unlock(pte, ptl); | 3096 | pte_unmap_unlock(pte, ptl); |
@@ -3164,7 +3143,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3164 | put_page(fault_page); | 3143 | put_page(fault_page); |
3165 | return ret; | 3144 | return ret; |
3166 | } | 3145 | } |
3167 | do_set_pte(vma, address, fault_page, pte, true, false, false); | 3146 | do_set_pte(vma, address, fault_page, pte, true, false); |
3168 | pte_unmap_unlock(pte, ptl); | 3147 | pte_unmap_unlock(pte, ptl); |
3169 | 3148 | ||
3170 | if (set_page_dirty(fault_page)) | 3149 | if (set_page_dirty(fault_page)) |