aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/frontswap.c4
-rw-r--r--mm/memory.c26
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmpressure.c8
6 files changed, 33 insertions, 23 deletions
diff --git a/mm/frontswap.c b/mm/frontswap.c
index c30eec536f03..f2a3571c6e22 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
244 the (older) page from frontswap 244 the (older) page from frontswap
245 */ 245 */
246 inc_frontswap_failed_stores(); 246 inc_frontswap_failed_stores();
247 if (dup) 247 if (dup) {
248 __frontswap_clear(sis, offset); 248 __frontswap_clear(sis, offset);
249 frontswap_ops->invalidate_page(type, offset);
250 }
249 } 251 }
250 if (frontswap_writethrough_enabled) 252 if (frontswap_writethrough_enabled)
251 /* report failure so swap also writes to swap device */ 253 /* report failure so swap also writes to swap device */
diff --git a/mm/memory.c b/mm/memory.c
index 3e503831e042..d5f2ae9c4a23 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -815,20 +815,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
815 if (!pte_file(pte)) { 815 if (!pte_file(pte)) {
816 swp_entry_t entry = pte_to_swp_entry(pte); 816 swp_entry_t entry = pte_to_swp_entry(pte);
817 817
818 if (swap_duplicate(entry) < 0) 818 if (likely(!non_swap_entry(entry))) {
819 return entry.val; 819 if (swap_duplicate(entry) < 0)
820 820 return entry.val;
821 /* make sure dst_mm is on swapoff's mmlist. */ 821
822 if (unlikely(list_empty(&dst_mm->mmlist))) { 822 /* make sure dst_mm is on swapoff's mmlist. */
823 spin_lock(&mmlist_lock); 823 if (unlikely(list_empty(&dst_mm->mmlist))) {
824 if (list_empty(&dst_mm->mmlist)) 824 spin_lock(&mmlist_lock);
825 list_add(&dst_mm->mmlist, 825 if (list_empty(&dst_mm->mmlist))
826 &src_mm->mmlist); 826 list_add(&dst_mm->mmlist,
827 spin_unlock(&mmlist_lock); 827 &src_mm->mmlist);
828 } 828 spin_unlock(&mmlist_lock);
829 if (likely(!non_swap_entry(entry))) 829 }
830 rss[MM_SWAPENTS]++; 830 rss[MM_SWAPENTS]++;
831 else if (is_migration_entry(entry)) { 831 } else if (is_migration_entry(entry)) {
832 page = migration_entry_to_page(entry); 832 page = migration_entry_to_page(entry);
833 833
834 if (PageAnon(page)) 834 if (PageAnon(page))
diff --git a/mm/mmap.c b/mm/mmap.c
index 87e82b38453c..ae919891a087 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -776,8 +776,11 @@ again: remove_next = 1 + (end > next->vm_end);
776 * shrinking vma had, to cover any anon pages imported. 776 * shrinking vma had, to cover any anon pages imported.
777 */ 777 */
778 if (exporter && exporter->anon_vma && !importer->anon_vma) { 778 if (exporter && exporter->anon_vma && !importer->anon_vma) {
779 if (anon_vma_clone(importer, exporter)) 779 int error;
780 return -ENOMEM; 780
781 error = anon_vma_clone(importer, exporter);
782 if (error)
783 return error;
781 importer->anon_vma = exporter->anon_vma; 784 importer->anon_vma = exporter->anon_vma;
782 } 785 }
783 } 786 }
@@ -2469,7 +2472,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2469 if (err) 2472 if (err)
2470 goto out_free_vma; 2473 goto out_free_vma;
2471 2474
2472 if (anon_vma_clone(new, vma)) 2475 err = anon_vma_clone(new, vma);
2476 if (err)
2473 goto out_free_mpol; 2477 goto out_free_mpol;
2474 2478
2475 if (new->vm_file) 2479 if (new->vm_file)
diff --git a/mm/rmap.c b/mm/rmap.c
index 19886fb2f13a..3e4c7213210c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
274{ 274{
275 struct anon_vma_chain *avc; 275 struct anon_vma_chain *avc;
276 struct anon_vma *anon_vma; 276 struct anon_vma *anon_vma;
277 int error;
277 278
278 /* Don't bother if the parent process has no anon_vma here. */ 279 /* Don't bother if the parent process has no anon_vma here. */
279 if (!pvma->anon_vma) 280 if (!pvma->anon_vma)
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
283 * First, attach the new VMA to the parent VMA's anon_vmas, 284 * First, attach the new VMA to the parent VMA's anon_vmas,
284 * so rmap can find non-COWed pages in child processes. 285 * so rmap can find non-COWed pages in child processes.
285 */ 286 */
286 if (anon_vma_clone(vma, pvma)) 287 error = anon_vma_clone(vma, pvma);
287 return -ENOMEM; 288 if (error)
289 return error;
288 290
289 /* Then add our own anon_vma. */ 291 /* Then add our own anon_vma. */
290 anon_vma = anon_vma_alloc(); 292 anon_vma = anon_vma_alloc();
diff --git a/mm/slab.c b/mm/slab.c
index eb2b2ea30130..f34e053ec46e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3076,7 +3076,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3076 void *obj; 3076 void *obj;
3077 int x; 3077 int x;
3078 3078
3079 VM_BUG_ON(nodeid > num_online_nodes()); 3079 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3080 n = get_node(cachep, nodeid); 3080 n = get_node(cachep, nodeid);
3081 BUG_ON(!n); 3081 BUG_ON(!n);
3082 3082
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index d4042e75f7c7..c5afd573d7da 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
165 unsigned long scanned; 165 unsigned long scanned;
166 unsigned long reclaimed; 166 unsigned long reclaimed;
167 167
168 spin_lock(&vmpr->sr_lock);
168 /* 169 /*
169 * Several contexts might be calling vmpressure(), so it is 170 * Several contexts might be calling vmpressure(), so it is
170 * possible that the work was rescheduled again before the old 171 * possible that the work was rescheduled again before the old
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
173 * here. No need for any locks here since we don't care if 174 * here. No need for any locks here since we don't care if
174 * vmpr->reclaimed is in sync. 175 * vmpr->reclaimed is in sync.
175 */ 176 */
176 if (!vmpr->scanned) 177 scanned = vmpr->scanned;
178 if (!scanned) {
179 spin_unlock(&vmpr->sr_lock);
177 return; 180 return;
181 }
178 182
179 spin_lock(&vmpr->sr_lock);
180 scanned = vmpr->scanned;
181 reclaimed = vmpr->reclaimed; 183 reclaimed = vmpr->reclaimed;
182 vmpr->scanned = 0; 184 vmpr->scanned = 0;
183 vmpr->reclaimed = 0; 185 vmpr->reclaimed = 0;