aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/memory.c36
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mremap.c21
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.h2
7 files changed, 47 insertions, 27 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 83aff0a4d093..b60f33080a28 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2490 2490
2491 mm = vma->vm_mm; 2491 mm = vma->vm_mm;
2492 2492
2493 tlb_gather_mmu(&tlb, mm, 0); 2493 tlb_gather_mmu(&tlb, mm, start, end);
2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); 2494 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2495 tlb_finish_mmu(&tlb, start, end); 2495 tlb_finish_mmu(&tlb, start, end);
2496} 2496}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c5792a5d87ce..0878ff7c26a9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6969,7 +6969,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
6969#ifdef CONFIG_MEMCG_SWAP 6969#ifdef CONFIG_MEMCG_SWAP
6970static int __init enable_swap_account(char *s) 6970static int __init enable_swap_account(char *s)
6971{ 6971{
6972 /* consider enabled if no parameter or 1 is given */
6973 if (!strcmp(s, "1")) 6972 if (!strcmp(s, "1"))
6974 really_do_swap_account = 1; 6973 really_do_swap_account = 1;
6975 else if (!strcmp(s, "0")) 6974 else if (!strcmp(s, "0"))
diff --git a/mm/memory.c b/mm/memory.c
index 40268410732a..af84bc0ec17c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
209 * tear-down from @mm. The @fullmm argument is used when @mm is without 209 * tear-down from @mm. The @fullmm argument is used when @mm is without
210 * users and we're going to destroy the full address space (exit/execve). 210 * users and we're going to destroy the full address space (exit/execve).
211 */ 211 */
212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) 212void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
213{ 213{
214 tlb->mm = mm; 214 tlb->mm = mm;
215 215
216 tlb->fullmm = fullmm; 216 /* Is it from 0 to ~0? */
217 tlb->fullmm = !(start | (end+1));
217 tlb->need_flush_all = 0; 218 tlb->need_flush_all = 0;
218 tlb->start = -1UL; 219 tlb->start = start;
219 tlb->end = 0; 220 tlb->end = end;
220 tlb->need_flush = 0; 221 tlb->need_flush = 0;
221 tlb->local.next = NULL; 222 tlb->local.next = NULL;
222 tlb->local.nr = 0; 223 tlb->local.nr = 0;
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
256{ 257{
257 struct mmu_gather_batch *batch, *next; 258 struct mmu_gather_batch *batch, *next;
258 259
259 tlb->start = start;
260 tlb->end = end;
261 tlb_flush_mmu(tlb); 260 tlb_flush_mmu(tlb);
262 261
263 /* keep the page table cache within bounds */ 262 /* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1099 spinlock_t *ptl; 1098 spinlock_t *ptl;
1100 pte_t *start_pte; 1099 pte_t *start_pte;
1101 pte_t *pte; 1100 pte_t *pte;
1102 unsigned long range_start = addr;
1103 1101
1104again: 1102again:
1105 init_rss_vec(rss); 1103 init_rss_vec(rss);
@@ -1205,17 +1203,25 @@ again:
1205 * and page-free while holding it. 1203 * and page-free while holding it.
1206 */ 1204 */
1207 if (force_flush) { 1205 if (force_flush) {
1206 unsigned long old_end;
1207
1208 force_flush = 0; 1208 force_flush = 0;
1209 1209
1210#ifdef HAVE_GENERIC_MMU_GATHER 1210 /*
1211 tlb->start = range_start; 1211 * Flush the TLB just for the previous segment,
1212 * then update the range to be the remaining
1213 * TLB range.
1214 */
1215 old_end = tlb->end;
1212 tlb->end = addr; 1216 tlb->end = addr;
1213#endif 1217
1214 tlb_flush_mmu(tlb); 1218 tlb_flush_mmu(tlb);
1215 if (addr != end) { 1219
1216 range_start = addr; 1220 tlb->start = addr;
1221 tlb->end = old_end;
1222
1223 if (addr != end)
1217 goto again; 1224 goto again;
1218 }
1219 } 1225 }
1220 1226
1221 return addr; 1227 return addr;
@@ -1400,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1400 unsigned long end = start + size; 1406 unsigned long end = start + size;
1401 1407
1402 lru_add_drain(); 1408 lru_add_drain();
1403 tlb_gather_mmu(&tlb, mm, 0); 1409 tlb_gather_mmu(&tlb, mm, start, end);
1404 update_hiwater_rss(mm); 1410 update_hiwater_rss(mm);
1405 mmu_notifier_invalidate_range_start(mm, start, end); 1411 mmu_notifier_invalidate_range_start(mm, start, end);
1406 for ( ; vma && vma->vm_start < end; vma = vma->vm_next) 1412 for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1426,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
1426 unsigned long end = address + size; 1432 unsigned long end = address + size;
1427 1433
1428 lru_add_drain(); 1434 lru_add_drain();
1429 tlb_gather_mmu(&tlb, mm, 0); 1435 tlb_gather_mmu(&tlb, mm, address, end);
1430 update_hiwater_rss(mm); 1436 update_hiwater_rss(mm);
1431 mmu_notifier_invalidate_range_start(mm, address, end); 1437 mmu_notifier_invalidate_range_start(mm, address, end);
1432 unmap_single_vma(&tlb, vma, address, end, details); 1438 unmap_single_vma(&tlb, vma, address, end, details);
diff --git a/mm/mmap.c b/mm/mmap.c
index 1edbaa3136c3..f9c97d10b873 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm,
2336 struct mmu_gather tlb; 2336 struct mmu_gather tlb;
2337 2337
2338 lru_add_drain(); 2338 lru_add_drain();
2339 tlb_gather_mmu(&tlb, mm, 0); 2339 tlb_gather_mmu(&tlb, mm, start, end);
2340 update_hiwater_rss(mm); 2340 update_hiwater_rss(mm);
2341 unmap_vmas(&tlb, vma, start, end); 2341 unmap_vmas(&tlb, vma, start, end);
2342 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 2342 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm)
2709 2709
2710 lru_add_drain(); 2710 lru_add_drain();
2711 flush_cache_mm(mm); 2711 flush_cache_mm(mm);
2712 tlb_gather_mmu(&tlb, mm, 1); 2712 tlb_gather_mmu(&tlb, mm, 0, -1);
2713 /* update_hiwater_rss(mm) here? but nobody should be looking */ 2713 /* update_hiwater_rss(mm) here? but nobody should be looking */
2714 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2714 /* Use -1 here to ensure all VMAs in the mm are unmapped */
2715 unmap_vmas(&tlb, vma, 0, -1); 2715 unmap_vmas(&tlb, vma, 0, -1);
diff --git a/mm/mremap.c b/mm/mremap.c
index 457d34ef3bf2..0843feb66f3d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -15,6 +15,7 @@
15#include <linux/swap.h> 15#include <linux/swap.h>
16#include <linux/capability.h> 16#include <linux/capability.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/swapops.h>
18#include <linux/highmem.h> 19#include <linux/highmem.h>
19#include <linux/security.h> 20#include <linux/security.h>
20#include <linux/syscalls.h> 21#include <linux/syscalls.h>
@@ -69,6 +70,23 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
69 return pmd; 70 return pmd;
70} 71}
71 72
73static pte_t move_soft_dirty_pte(pte_t pte)
74{
75 /*
76 * Set soft dirty bit so we can notice
77 * in userspace the ptes were moved.
78 */
79#ifdef CONFIG_MEM_SOFT_DIRTY
80 if (pte_present(pte))
81 pte = pte_mksoft_dirty(pte);
82 else if (is_swap_pte(pte))
83 pte = pte_swp_mksoft_dirty(pte);
84 else if (pte_file(pte))
85 pte = pte_file_mksoft_dirty(pte);
86#endif
87 return pte;
88}
89
72static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 90static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
73 unsigned long old_addr, unsigned long old_end, 91 unsigned long old_addr, unsigned long old_end,
74 struct vm_area_struct *new_vma, pmd_t *new_pmd, 92 struct vm_area_struct *new_vma, pmd_t *new_pmd,
@@ -126,7 +144,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
126 continue; 144 continue;
127 pte = ptep_get_and_clear(mm, old_addr, old_pte); 145 pte = ptep_get_and_clear(mm, old_addr, old_pte);
128 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 146 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
129 set_pte_at(mm, new_addr, new_pte, pte_mksoft_dirty(pte)); 147 pte = move_soft_dirty_pte(pte);
148 set_pte_at(mm, new_addr, new_pte, pte);
130 } 149 }
131 150
132 arch_leave_lazy_mmu_mode(); 151 arch_leave_lazy_mmu_mode();
diff --git a/mm/shmem.c b/mm/shmem.c
index 8335dbd3fc35..e43dc555069d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2909,14 +2909,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
2909 2909
2910/* common code */ 2910/* common code */
2911 2911
2912static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
2913{
2914 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
2915 dentry->d_name.name);
2916}
2917
2918static struct dentry_operations anon_ops = { 2912static struct dentry_operations anon_ops = {
2919 .d_dname = shmem_dname 2913 .d_dname = simple_dname
2920}; 2914};
2921 2915
2922/** 2916/**
diff --git a/mm/slab.h b/mm/slab.h
index 620ceeddbe1a..a535033f7e9a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -162,6 +162,8 @@ static inline const char *cache_name(struct kmem_cache *s)
162 162
163static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) 163static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
164{ 164{
165 if (!s->memcg_params)
166 return NULL;
165 return s->memcg_params->memcg_caches[idx]; 167 return s->memcg_params->memcg_caches[idx];
166} 168}
167 169