aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c40
1 files changed, 33 insertions, 7 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 0feeef860a8f..38a336e2eea1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -250,7 +250,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
250 list_del(&anon_vma_chain->same_anon_vma); 250 list_del(&anon_vma_chain->same_anon_vma);
251 251
252 /* We must garbage collect the anon_vma if it's empty */ 252 /* We must garbage collect the anon_vma if it's empty */
253 empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); 253 empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma);
254 spin_unlock(&anon_vma->lock); 254 spin_unlock(&anon_vma->lock);
255 255
256 if (empty) 256 if (empty)
@@ -274,7 +274,7 @@ static void anon_vma_ctor(void *data)
274 struct anon_vma *anon_vma = data; 274 struct anon_vma *anon_vma = data;
275 275
276 spin_lock_init(&anon_vma->lock); 276 spin_lock_init(&anon_vma->lock);
277 ksm_refcount_init(anon_vma); 277 anonvma_external_refcount_init(anon_vma);
278 INIT_LIST_HEAD(&anon_vma->head); 278 INIT_LIST_HEAD(&anon_vma->head);
279} 279}
280 280
@@ -1131,6 +1131,20 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1131 return ret; 1131 return ret;
1132} 1132}
1133 1133
1134static bool is_vma_temporary_stack(struct vm_area_struct *vma)
1135{
1136 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1137
1138 if (!maybe_stack)
1139 return false;
1140
1141 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1142 VM_STACK_INCOMPLETE_SETUP)
1143 return true;
1144
1145 return false;
1146}
1147
1134/** 1148/**
1135 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1149 * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
1136 * rmap method 1150 * rmap method
@@ -1159,7 +1173,21 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1159 1173
1160 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { 1174 list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
1161 struct vm_area_struct *vma = avc->vma; 1175 struct vm_area_struct *vma = avc->vma;
1162 unsigned long address = vma_address(page, vma); 1176 unsigned long address;
1177
1178 /*
1179 * During exec, a temporary VMA is setup and later moved.
1180 * The VMA is moved under the anon_vma lock but not the
1181 * page tables leading to a race where migration cannot
1182 * find the migration ptes. Rather than increasing the
1183 * locking requirements of exec(), migration skips
1184 * temporary VMAs until after exec() completes.
1185 */
1186 if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
1187 is_vma_temporary_stack(vma))
1188 continue;
1189
1190 address = vma_address(page, vma);
1163 if (address == -EFAULT) 1191 if (address == -EFAULT)
1164 continue; 1192 continue;
1165 ret = try_to_unmap_one(page, vma, address, flags); 1193 ret = try_to_unmap_one(page, vma, address, flags);
@@ -1355,10 +1383,8 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1355 /* 1383 /*
1356 * Note: remove_migration_ptes() cannot use page_lock_anon_vma() 1384 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
1357 * because that depends on page_mapped(); but not all its usages 1385 * because that depends on page_mapped(); but not all its usages
1358 * are holding mmap_sem, which also gave the necessary guarantee 1386 * are holding mmap_sem. Users without mmap_sem are required to
1359 * (that this anon_vma's slab has not already been destroyed). 1387 * take a reference count to prevent the anon_vma disappearing
1360 * This needs to be reviewed later: avoiding page_lock_anon_vma()
1361 * is risky, and currently limits the usefulness of rmap_walk().
1362 */ 1388 */
1363 anon_vma = page_anon_vma(page); 1389 anon_vma = page_anon_vma(page);
1364 if (!anon_vma) 1390 if (!anon_vma)