diff options
Diffstat (limited to 'mm/migrate.c')
| -rw-r--r-- | mm/migrate.c | 29 |
1 files changed, 22 insertions, 7 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index d8c65a65c61d..153572fb60b8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -285,7 +285,15 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, | |||
| 285 | 285 | ||
| 286 | page = migration_entry_to_page(entry); | 286 | page = migration_entry_to_page(entry); |
| 287 | 287 | ||
| 288 | get_page(page); | 288 | /* |
| 289 | * Once radix-tree replacement of page migration started, page_count | ||
| 290 | * *must* be zero. And, we don't want to call wait_on_page_locked() | ||
| 291 | * against a page without get_page(). | ||
| 292 | * So, we use get_page_unless_zero(), here. Even failed, page fault | ||
| 293 | * will occur again. | ||
| 294 | */ | ||
| 295 | if (!get_page_unless_zero(page)) | ||
| 296 | goto out; | ||
| 289 | pte_unmap_unlock(ptep, ptl); | 297 | pte_unmap_unlock(ptep, ptl); |
| 290 | wait_on_page_locked(page); | 298 | wait_on_page_locked(page); |
| 291 | put_page(page); | 299 | put_page(page); |
| @@ -305,6 +313,7 @@ out: | |||
| 305 | static int migrate_page_move_mapping(struct address_space *mapping, | 313 | static int migrate_page_move_mapping(struct address_space *mapping, |
| 306 | struct page *newpage, struct page *page) | 314 | struct page *newpage, struct page *page) |
| 307 | { | 315 | { |
| 316 | int expected_count; | ||
| 308 | void **pslot; | 317 | void **pslot; |
| 309 | 318 | ||
| 310 | if (!mapping) { | 319 | if (!mapping) { |
| @@ -314,14 +323,20 @@ static int migrate_page_move_mapping(struct address_space *mapping, | |||
| 314 | return 0; | 323 | return 0; |
| 315 | } | 324 | } |
| 316 | 325 | ||
| 317 | write_lock_irq(&mapping->tree_lock); | 326 | spin_lock_irq(&mapping->tree_lock); |
| 318 | 327 | ||
| 319 | pslot = radix_tree_lookup_slot(&mapping->page_tree, | 328 | pslot = radix_tree_lookup_slot(&mapping->page_tree, |
| 320 | page_index(page)); | 329 | page_index(page)); |
| 321 | 330 | ||
| 322 | if (page_count(page) != 2 + !!PagePrivate(page) || | 331 | expected_count = 2 + !!PagePrivate(page); |
| 332 | if (page_count(page) != expected_count || | ||
| 323 | (struct page *)radix_tree_deref_slot(pslot) != page) { | 333 | (struct page *)radix_tree_deref_slot(pslot) != page) { |
| 324 | write_unlock_irq(&mapping->tree_lock); | 334 | spin_unlock_irq(&mapping->tree_lock); |
| 335 | return -EAGAIN; | ||
| 336 | } | ||
| 337 | |||
| 338 | if (!page_freeze_refs(page, expected_count)) { | ||
| 339 | spin_unlock_irq(&mapping->tree_lock); | ||
| 325 | return -EAGAIN; | 340 | return -EAGAIN; |
| 326 | } | 341 | } |
| 327 | 342 | ||
| @@ -338,6 +353,7 @@ static int migrate_page_move_mapping(struct address_space *mapping, | |||
| 338 | 353 | ||
| 339 | radix_tree_replace_slot(pslot, newpage); | 354 | radix_tree_replace_slot(pslot, newpage); |
| 340 | 355 | ||
| 356 | page_unfreeze_refs(page, expected_count); | ||
| 341 | /* | 357 | /* |
| 342 | * Drop cache reference from old page. | 358 | * Drop cache reference from old page. |
| 343 | * We know this isn't the last reference. | 359 | * We know this isn't the last reference. |
| @@ -357,10 +373,9 @@ static int migrate_page_move_mapping(struct address_space *mapping, | |||
| 357 | __dec_zone_page_state(page, NR_FILE_PAGES); | 373 | __dec_zone_page_state(page, NR_FILE_PAGES); |
| 358 | __inc_zone_page_state(newpage, NR_FILE_PAGES); | 374 | __inc_zone_page_state(newpage, NR_FILE_PAGES); |
| 359 | 375 | ||
| 360 | write_unlock_irq(&mapping->tree_lock); | 376 | spin_unlock_irq(&mapping->tree_lock); |
| 361 | if (!PageSwapCache(newpage)) { | 377 | if (!PageSwapCache(newpage)) |
| 362 | mem_cgroup_uncharge_cache_page(page); | 378 | mem_cgroup_uncharge_cache_page(page); |
| 363 | } | ||
| 364 | 379 | ||
| 365 | return 0; | 380 | return 0; |
| 366 | } | 381 | } |
