diff options
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 49 |
1 files changed, 37 insertions, 12 deletions
@@ -250,7 +250,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) | |||
250 | list_del(&anon_vma_chain->same_anon_vma); | 250 | list_del(&anon_vma_chain->same_anon_vma); |
251 | 251 | ||
252 | /* We must garbage collect the anon_vma if it's empty */ | 252 | /* We must garbage collect the anon_vma if it's empty */ |
253 | empty = list_empty(&anon_vma->head) && !ksm_refcount(anon_vma); | 253 | empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma); |
254 | spin_unlock(&anon_vma->lock); | 254 | spin_unlock(&anon_vma->lock); |
255 | 255 | ||
256 | if (empty) | 256 | if (empty) |
@@ -274,7 +274,7 @@ static void anon_vma_ctor(void *data) | |||
274 | struct anon_vma *anon_vma = data; | 274 | struct anon_vma *anon_vma = data; |
275 | 275 | ||
276 | spin_lock_init(&anon_vma->lock); | 276 | spin_lock_init(&anon_vma->lock); |
277 | ksm_refcount_init(anon_vma); | 277 | anonvma_external_refcount_init(anon_vma); |
278 | INIT_LIST_HEAD(&anon_vma->head); | 278 | INIT_LIST_HEAD(&anon_vma->head); |
279 | } | 279 | } |
280 | 280 | ||
@@ -336,14 +336,13 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
336 | 336 | ||
337 | /* | 337 | /* |
338 | * At what user virtual address is page expected in vma? | 338 | * At what user virtual address is page expected in vma? |
339 | * checking that the page matches the vma. | 339 | * Caller should check the page is actually part of the vma. |
340 | */ | 340 | */ |
341 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 341 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
342 | { | 342 | { |
343 | if (PageAnon(page)) { | 343 | if (PageAnon(page)) |
344 | if (vma->anon_vma != page_anon_vma(page)) | 344 | ; |
345 | return -EFAULT; | 345 | else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { |
346 | } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { | ||
347 | if (!vma->vm_file || | 346 | if (!vma->vm_file || |
348 | vma->vm_file->f_mapping != page->mapping) | 347 | vma->vm_file->f_mapping != page->mapping) |
349 | return -EFAULT; | 348 | return -EFAULT; |
@@ -1132,6 +1131,20 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
1132 | return ret; | 1131 | return ret; |
1133 | } | 1132 | } |
1134 | 1133 | ||
1134 | static bool is_vma_temporary_stack(struct vm_area_struct *vma) | ||
1135 | { | ||
1136 | int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); | ||
1137 | |||
1138 | if (!maybe_stack) | ||
1139 | return false; | ||
1140 | |||
1141 | if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == | ||
1142 | VM_STACK_INCOMPLETE_SETUP) | ||
1143 | return true; | ||
1144 | |||
1145 | return false; | ||
1146 | } | ||
1147 | |||
1135 | /** | 1148 | /** |
1136 | * try_to_unmap_anon - unmap or unlock anonymous page using the object-based | 1149 | * try_to_unmap_anon - unmap or unlock anonymous page using the object-based |
1137 | * rmap method | 1150 | * rmap method |
@@ -1160,7 +1173,21 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) | |||
1160 | 1173 | ||
1161 | list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { | 1174 | list_for_each_entry(avc, &anon_vma->head, same_anon_vma) { |
1162 | struct vm_area_struct *vma = avc->vma; | 1175 | struct vm_area_struct *vma = avc->vma; |
1163 | unsigned long address = vma_address(page, vma); | 1176 | unsigned long address; |
1177 | |||
1178 | /* | ||
1179 | * During exec, a temporary VMA is setup and later moved. | ||
1180 | * The VMA is moved under the anon_vma lock but not the | ||
1181 | * page tables leading to a race where migration cannot | ||
1182 | * find the migration ptes. Rather than increasing the | ||
1183 | * locking requirements of exec(), migration skips | ||
1184 | * temporary VMAs until after exec() completes. | ||
1185 | */ | ||
1186 | if (PAGE_MIGRATION && (flags & TTU_MIGRATION) && | ||
1187 | is_vma_temporary_stack(vma)) | ||
1188 | continue; | ||
1189 | |||
1190 | address = vma_address(page, vma); | ||
1164 | if (address == -EFAULT) | 1191 | if (address == -EFAULT) |
1165 | continue; | 1192 | continue; |
1166 | ret = try_to_unmap_one(page, vma, address, flags); | 1193 | ret = try_to_unmap_one(page, vma, address, flags); |
@@ -1356,10 +1383,8 @@ static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *, | |||
1356 | /* | 1383 | /* |
1357 | * Note: remove_migration_ptes() cannot use page_lock_anon_vma() | 1384 | * Note: remove_migration_ptes() cannot use page_lock_anon_vma() |
1358 | * because that depends on page_mapped(); but not all its usages | 1385 | * because that depends on page_mapped(); but not all its usages |
1359 | * are holding mmap_sem, which also gave the necessary guarantee | 1386 | * are holding mmap_sem. Users without mmap_sem are required to |
1360 | * (that this anon_vma's slab has not already been destroyed). | 1387 | * take a reference count to prevent the anon_vma disappearing |
1361 | * This needs to be reviewed later: avoiding page_lock_anon_vma() | ||
1362 | * is risky, and currently limits the usefulness of rmap_walk(). | ||
1363 | */ | 1388 | */ |
1364 | anon_vma = page_anon_vma(page); | 1389 | anon_vma = page_anon_vma(page); |
1365 | if (!anon_vma) | 1390 | if (!anon_vma) |