aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-23 05:03:37 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:42:50 -0400
commit6c5240ae7f48c83fcaa8e24fa63e7eb09aba5651 (patch)
treefede2324f4348701e60758d7f894aae4b09cdc9a /mm/migrate.c
parentd75a0fcda2cfc71b50e16dc89e0c32c57d427e85 (diff)
[PATCH] Swapless page migration: modify core logic
Use the migration entries for page migration This modifies the migration code to use the new migration entries. It now becomes possible to migrate anonymous pages without having to add a swap entry. We add a couple of new functions to replace migration entries with the proper ptes. We cannot take the tree_lock for migrating anonymous pages anymore. However, we know that we hold the only remaining reference to the page when the page count reaches 1. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c53
1 files changed, 21 insertions, 32 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 81721a061d50..8f91463eab4e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -254,14 +254,20 @@ static int migrate_page_move_mapping(struct address_space *mapping,
254{ 254{
255 struct page **radix_pointer; 255 struct page **radix_pointer;
256 256
257 if (!mapping) {
258 /* Anonymous page */
259 if (page_count(page) != 1)
260 return -EAGAIN;
261 return 0;
262 }
263
257 write_lock_irq(&mapping->tree_lock); 264 write_lock_irq(&mapping->tree_lock);
258 265
259 radix_pointer = (struct page **)radix_tree_lookup_slot( 266 radix_pointer = (struct page **)radix_tree_lookup_slot(
260 &mapping->page_tree, 267 &mapping->page_tree,
261 page_index(page)); 268 page_index(page));
262 269
263 if (!page_mapping(page) || 270 if (page_count(page) != 2 + !!PagePrivate(page) ||
264 page_count(page) != 2 + !!PagePrivate(page) ||
265 *radix_pointer != page) { 271 *radix_pointer != page) {
266 write_unlock_irq(&mapping->tree_lock); 272 write_unlock_irq(&mapping->tree_lock);
267 return -EAGAIN; 273 return -EAGAIN;
@@ -271,10 +277,12 @@ static int migrate_page_move_mapping(struct address_space *mapping,
271 * Now we know that no one else is looking at the page. 277 * Now we know that no one else is looking at the page.
272 */ 278 */
273 get_page(newpage); 279 get_page(newpage);
280#ifdef CONFIG_SWAP
274 if (PageSwapCache(page)) { 281 if (PageSwapCache(page)) {
275 SetPageSwapCache(newpage); 282 SetPageSwapCache(newpage);
276 set_page_private(newpage, page_private(page)); 283 set_page_private(newpage, page_private(page));
277 } 284 }
285#endif
278 286
279 *radix_pointer = newpage; 287 *radix_pointer = newpage;
280 __put_page(page); 288 __put_page(page);
@@ -308,7 +316,9 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
308 set_page_dirty(newpage); 316 set_page_dirty(newpage);
309 } 317 }
310 318
319#ifdef CONFIG_SWAP
311 ClearPageSwapCache(page); 320 ClearPageSwapCache(page);
321#endif
312 ClearPageActive(page); 322 ClearPageActive(page);
313 ClearPagePrivate(page); 323 ClearPagePrivate(page);
314 set_page_private(page, 0); 324 set_page_private(page, 0);
@@ -353,16 +363,6 @@ int migrate_page(struct address_space *mapping,
353 return rc; 363 return rc;
354 364
355 migrate_page_copy(newpage, page); 365 migrate_page_copy(newpage, page);
356
357 /*
358 * Remove auxiliary swap entries and replace
359 * them with real ptes.
360 *
361 * Note that a real pte entry will allow processes that are not
362 * waiting on the page lock to use the new page via the page tables
363 * before the new page is unlocked.
364 */
365 remove_from_swap(newpage);
366 return 0; 366 return 0;
367} 367}
368EXPORT_SYMBOL(migrate_page); 368EXPORT_SYMBOL(migrate_page);
@@ -530,23 +530,7 @@ redo:
530 goto unlock_page; 530 goto unlock_page;
531 531
532 /* 532 /*
533 * Establish swap ptes for anonymous pages or destroy pte 533 * Establish migration ptes or remove ptes
534 * maps for files.
535 *
536 * In order to reestablish file backed mappings the fault handlers
537 * will take the radix tree_lock which may then be used to stop
538 * processses from accessing this page until the new page is ready.
539 *
540 * A process accessing via a swap pte (an anonymous page) will take a
541 * page_lock on the old page which will block the process until the
542 * migration attempt is complete. At that time the PageSwapCache bit
543 * will be examined. If the page was migrated then the PageSwapCache
544 * bit will be clear and the operation to retrieve the page will be
545 * retried which will find the new page in the radix tree. Then a new
546 * direct mapping may be generated based on the radix tree contents.
547 *
548 * If the page was not migrated then the PageSwapCache bit
549 * is still set and the operation may continue.
550 */ 534 */
551 rc = -EPERM; 535 rc = -EPERM;
552 if (try_to_unmap(page, 1) == SWAP_FAIL) 536 if (try_to_unmap(page, 1) == SWAP_FAIL)
@@ -569,9 +553,9 @@ redo:
569 */ 553 */
570 mapping = page_mapping(page); 554 mapping = page_mapping(page);
571 if (!mapping) 555 if (!mapping)
572 goto unlock_both; 556 rc = migrate_page(mapping, newpage, page);
573 557
574 if (mapping->a_ops->migratepage) 558 else if (mapping->a_ops->migratepage)
575 /* 559 /*
576 * Most pages have a mapping and most filesystems 560 * Most pages have a mapping and most filesystems
577 * should provide a migration function. Anonymous 561 * should provide a migration function. Anonymous
@@ -584,10 +568,15 @@ redo:
584 else 568 else
585 rc = fallback_migrate_page(mapping, newpage, page); 569 rc = fallback_migrate_page(mapping, newpage, page);
586 570
587unlock_both: 571 if (!rc)
572 remove_migration_ptes(page, newpage);
573
588 unlock_page(newpage); 574 unlock_page(newpage);
589 575
590unlock_page: 576unlock_page:
577 if (rc)
578 remove_migration_ptes(page, page);
579
591 unlock_page(page); 580 unlock_page(page);
592 581
593next: 582next: