aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-23 05:03:33 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:42:50 -0400
commit2d1db3b1170db4e8bf0531dd636742269c2cf579 (patch)
tree1161ed614a55869c278234d7472673fd1a577887
parentc3fcf8a5daacf350f0632e1379414c01f34eeea3 (diff)
[PATCH] page migration cleanup: pass "mapping" to migration functions
Change handling of address spaces. Pass a pointer to the address space in which the page is migrated to all migration function. This avoids repeatedly having to retrieve the address space pointer from the page and checking it for validity. The old page mapping will change once migration has gone to a certain step, so it is less confusing to have the pointer always available. Move the setting of the mapping and index for the new page into migrate_pages(). Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/migrate.h6
-rw-r--r--mm/migrate.c70
3 files changed, 42 insertions, 40 deletions
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c823a3815e24..e917403f4d58 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -376,7 +376,8 @@ struct address_space_operations {
376 struct page* (*get_xip_page)(struct address_space *, sector_t, 376 struct page* (*get_xip_page)(struct address_space *, sector_t,
377 int); 377 int);
378 /* migrate the contents of a page to the specified target */ 378 /* migrate the contents of a page to the specified target */
379 int (*migratepage) (struct page *, struct page *); 379 int (*migratepage) (struct address_space *,
380 struct page *, struct page *);
380}; 381};
381 382
382struct backing_dev_info; 383struct backing_dev_info;
@@ -1772,7 +1773,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count);
1772extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t); 1773extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
1773 1774
1774#ifdef CONFIG_MIGRATION 1775#ifdef CONFIG_MIGRATION
1775extern int buffer_migrate_page(struct page *, struct page *); 1776extern int buffer_migrate_page(struct address_space *,
1777 struct page *, struct page *);
1776#else 1778#else
1777#define buffer_migrate_page NULL 1779#define buffer_migrate_page NULL
1778#endif 1780#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e8d3b08cc354..287c47b5e5df 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -6,12 +6,14 @@
6#ifdef CONFIG_MIGRATION 6#ifdef CONFIG_MIGRATION
7extern int isolate_lru_page(struct page *p, struct list_head *pagelist); 7extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
8extern int putback_lru_pages(struct list_head *l); 8extern int putback_lru_pages(struct list_head *l);
9extern int migrate_page(struct page *, struct page *); 9extern int migrate_page(struct address_space *,
10 struct page *, struct page *);
10extern int migrate_pages(struct list_head *l, struct list_head *t, 11extern int migrate_pages(struct list_head *l, struct list_head *t,
11 struct list_head *moved, struct list_head *failed); 12 struct list_head *moved, struct list_head *failed);
12extern int migrate_pages_to(struct list_head *pagelist, 13extern int migrate_pages_to(struct list_head *pagelist,
13 struct vm_area_struct *vma, int dest); 14 struct vm_area_struct *vma, int dest);
14extern int fail_migrate_page(struct page *, struct page *); 15extern int fail_migrate_page(struct address_space *,
16 struct page *, struct page *);
15 17
16extern int migrate_prep(void); 18extern int migrate_prep(void);
17 19
diff --git a/mm/migrate.c b/mm/migrate.c
index 8095c607a494..f65e69d94527 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -173,15 +173,11 @@ retry:
173 * 2 for pages with a mapping 173 * 2 for pages with a mapping
174 * 3 for pages with a mapping and PagePrivate set. 174 * 3 for pages with a mapping and PagePrivate set.
175 */ 175 */
176static int migrate_page_move_mapping(struct page *newpage, 176static int migrate_page_move_mapping(struct address_space *mapping,
177 struct page *page) 177 struct page *newpage, struct page *page)
178{ 178{
179 struct address_space *mapping = page_mapping(page);
180 struct page **radix_pointer; 179 struct page **radix_pointer;
181 180
182 if (!mapping)
183 return -EAGAIN;
184
185 write_lock_irq(&mapping->tree_lock); 181 write_lock_irq(&mapping->tree_lock);
186 182
187 radix_pointer = (struct page **)radix_tree_lookup_slot( 183 radix_pointer = (struct page **)radix_tree_lookup_slot(
@@ -197,15 +193,8 @@ static int migrate_page_move_mapping(struct page *newpage,
197 193
198 /* 194 /*
199 * Now we know that no one else is looking at the page. 195 * Now we know that no one else is looking at the page.
200 *
201 * Certain minimal information about a page must be available
202 * in order for other subsystems to properly handle the page if they
203 * find it through the radix tree update before we are finished
204 * copying the page.
205 */ 196 */
206 get_page(newpage); 197 get_page(newpage);
207 newpage->index = page->index;
208 newpage->mapping = page->mapping;
209 if (PageSwapCache(page)) { 198 if (PageSwapCache(page)) {
210 SetPageSwapCache(newpage); 199 SetPageSwapCache(newpage);
211 set_page_private(newpage, page_private(page)); 200 set_page_private(newpage, page_private(page));
@@ -262,7 +251,8 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
262 ***********************************************************/ 251 ***********************************************************/
263 252
264/* Always fail migration. Used for mappings that are not movable */ 253/* Always fail migration. Used for mappings that are not movable */
265int fail_migrate_page(struct page *newpage, struct page *page) 254int fail_migrate_page(struct address_space *mapping,
255 struct page *newpage, struct page *page)
266{ 256{
267 return -EIO; 257 return -EIO;
268} 258}
@@ -274,13 +264,14 @@ EXPORT_SYMBOL(fail_migrate_page);
274 * 264 *
275 * Pages are locked upon entry and exit. 265 * Pages are locked upon entry and exit.
276 */ 266 */
277int migrate_page(struct page *newpage, struct page *page) 267int migrate_page(struct address_space *mapping,
268 struct page *newpage, struct page *page)
278{ 269{
279 int rc; 270 int rc;
280 271
281 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 272 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
282 273
283 rc = migrate_page_move_mapping(newpage, page); 274 rc = migrate_page_move_mapping(mapping, newpage, page);
284 275
285 if (rc) 276 if (rc)
286 return rc; 277 return rc;
@@ -305,21 +296,18 @@ EXPORT_SYMBOL(migrate_page);
305 * if the underlying filesystem guarantees that no other references to "page" 296 * if the underlying filesystem guarantees that no other references to "page"
306 * exist. 297 * exist.
307 */ 298 */
308int buffer_migrate_page(struct page *newpage, struct page *page) 299int buffer_migrate_page(struct address_space *mapping,
300 struct page *newpage, struct page *page)
309{ 301{
310 struct address_space *mapping = page->mapping;
311 struct buffer_head *bh, *head; 302 struct buffer_head *bh, *head;
312 int rc; 303 int rc;
313 304
314 if (!mapping)
315 return -EAGAIN;
316
317 if (!page_has_buffers(page)) 305 if (!page_has_buffers(page))
318 return migrate_page(newpage, page); 306 return migrate_page(mapping, newpage, page);
319 307
320 head = page_buffers(page); 308 head = page_buffers(page);
321 309
322 rc = migrate_page_move_mapping(newpage, page); 310 rc = migrate_page_move_mapping(mapping, newpage, page);
323 311
324 if (rc) 312 if (rc)
325 return rc; 313 return rc;
@@ -448,9 +436,6 @@ redo:
448 goto next; 436 goto next;
449 } 437 }
450 438
451 newpage = lru_to_page(to);
452 lock_page(newpage);
453
454 /* 439 /*
455 * Establish swap ptes for anonymous pages or destroy pte 440 * Establish swap ptes for anonymous pages or destroy pte
456 * maps for files. 441 * maps for files.
@@ -473,11 +458,18 @@ redo:
473 rc = -EPERM; 458 rc = -EPERM;
474 if (try_to_unmap(page, 1) == SWAP_FAIL) 459 if (try_to_unmap(page, 1) == SWAP_FAIL)
475 /* A vma has VM_LOCKED set -> permanent failure */ 460 /* A vma has VM_LOCKED set -> permanent failure */
476 goto unlock_both; 461 goto unlock_page;
477 462
478 rc = -EAGAIN; 463 rc = -EAGAIN;
479 if (page_mapped(page)) 464 if (page_mapped(page))
480 goto unlock_both; 465 goto unlock_page;
466
467 newpage = lru_to_page(to);
468 lock_page(newpage);
469 /* Prepare mapping for the new page.*/
470 newpage->index = page->index;
471 newpage->mapping = page->mapping;
472
481 /* 473 /*
482 * Pages are properly locked and writeback is complete. 474 * Pages are properly locked and writeback is complete.
483 * Try to migrate the page. 475 * Try to migrate the page.
@@ -494,7 +486,8 @@ redo:
494 * own migration function. This is the most common 486 * own migration function. This is the most common
495 * path for page migration. 487 * path for page migration.
496 */ 488 */
497 rc = mapping->a_ops->migratepage(newpage, page); 489 rc = mapping->a_ops->migratepage(mapping,
490 newpage, page);
498 goto unlock_both; 491 goto unlock_both;
499 } 492 }
500 493
@@ -524,7 +517,7 @@ redo:
524 */ 517 */
525 if (!page_has_buffers(page) || 518 if (!page_has_buffers(page) ||
526 try_to_release_page(page, GFP_KERNEL)) { 519 try_to_release_page(page, GFP_KERNEL)) {
527 rc = migrate_page(newpage, page); 520 rc = migrate_page(mapping, newpage, page);
528 goto unlock_both; 521 goto unlock_both;
529 } 522 }
530 523
@@ -553,12 +546,17 @@ unlock_page:
553 unlock_page(page); 546 unlock_page(page);
554 547
555next: 548next:
556 if (rc == -EAGAIN) { 549 if (rc) {
557 retry++; 550 if (newpage)
558 } else if (rc) { 551 newpage->mapping = NULL;
559 /* Permanent failure */ 552
560 list_move(&page->lru, failed); 553 if (rc == -EAGAIN)
561 nr_failed++; 554 retry++;
555 else {
556 /* Permanent failure */
557 list_move(&page->lru, failed);
558 nr_failed++;
559 }
562 } else { 560 } else {
563 if (newpage) { 561 if (newpage) {
564 /* Successful migration. Return page to LRU */ 562 /* Successful migration. Return page to LRU */