diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-23 05:03:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:42:50 -0400 |
commit | c3fcf8a5daacf350f0632e1379414c01f34eeea3 (patch) | |
tree | ec7a4cd5d7a2b60ec4539479bb5b24c46b5cf72f | |
parent | 5b5c7120e2154239837fad5e3c7b7b781092b19c (diff) |
[PATCH] page migration cleanup: extract try_to_unmap from migration functions
Extract try_to_unmap and rename remove_references -> move_mapping
try_to_unmap() may significantly change the page state by for example setting
the dirty bit. It is therefore best to unmap in migrate_pages() before
calling any migration functions.
migrate_page_remove_references() will then only move the new page in place of
the old page in the mapping. Rename the function to
migrate_page_move_mapping().
This allows us to get rid of the special unmapping for the fallback path.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/migrate.c | 76 |
1 files changed, 31 insertions, 45 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 2803a6698dd6..8095c607a494 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -166,15 +166,14 @@ retry: | |||
166 | } | 166 | } |
167 | 167 | ||
168 | /* | 168 | /* |
169 | * Remove references for a page and establish the new page with the correct | 169 | * Replace the page in the mapping. |
170 | * basic settings to be able to stop accesses to the page. | ||
171 | * | 170 | * |
172 | * The number of remaining references must be: | 171 | * The number of remaining references must be: |
173 | * 1 for anonymous pages without a mapping | 172 | * 1 for anonymous pages without a mapping |
174 | * 2 for pages with a mapping | 173 | * 2 for pages with a mapping |
175 | * 3 for pages with a mapping and PagePrivate set. | 174 | * 3 for pages with a mapping and PagePrivate set. |
176 | */ | 175 | */ |
177 | static int migrate_page_remove_references(struct page *newpage, | 176 | static int migrate_page_move_mapping(struct page *newpage, |
178 | struct page *page) | 177 | struct page *page) |
179 | { | 178 | { |
180 | struct address_space *mapping = page_mapping(page); | 179 | struct address_space *mapping = page_mapping(page); |
@@ -183,35 +182,6 @@ static int migrate_page_remove_references(struct page *newpage, | |||
183 | if (!mapping) | 182 | if (!mapping) |
184 | return -EAGAIN; | 183 | return -EAGAIN; |
185 | 184 | ||
186 | /* | ||
187 | * Establish swap ptes for anonymous pages or destroy pte | ||
188 | * maps for files. | ||
189 | * | ||
190 | * In order to reestablish file backed mappings the fault handlers | ||
191 | * will take the radix tree_lock which may then be used to stop | ||
192 | * processses from accessing this page until the new page is ready. | ||
193 | * | ||
194 | * A process accessing via a swap pte (an anonymous page) will take a | ||
195 | * page_lock on the old page which will block the process until the | ||
196 | * migration attempt is complete. At that time the PageSwapCache bit | ||
197 | * will be examined. If the page was migrated then the PageSwapCache | ||
198 | * bit will be clear and the operation to retrieve the page will be | ||
199 | * retried which will find the new page in the radix tree. Then a new | ||
200 | * direct mapping may be generated based on the radix tree contents. | ||
201 | * | ||
202 | * If the page was not migrated then the PageSwapCache bit | ||
203 | * is still set and the operation may continue. | ||
204 | */ | ||
205 | if (try_to_unmap(page, 1) == SWAP_FAIL) | ||
206 | /* A vma has VM_LOCKED set -> permanent failure */ | ||
207 | return -EPERM; | ||
208 | |||
209 | /* | ||
210 | * Give up if we were unable to remove all mappings. | ||
211 | */ | ||
212 | if (page_mapcount(page)) | ||
213 | return -EAGAIN; | ||
214 | |||
215 | write_lock_irq(&mapping->tree_lock); | 185 | write_lock_irq(&mapping->tree_lock); |
216 | 186 | ||
217 | radix_pointer = (struct page **)radix_tree_lookup_slot( | 187 | radix_pointer = (struct page **)radix_tree_lookup_slot( |
@@ -310,7 +280,7 @@ int migrate_page(struct page *newpage, struct page *page) | |||
310 | 280 | ||
311 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ | 281 | BUG_ON(PageWriteback(page)); /* Writeback must be complete */ |
312 | 282 | ||
313 | rc = migrate_page_remove_references(newpage, page); | 283 | rc = migrate_page_move_mapping(newpage, page); |
314 | 284 | ||
315 | if (rc) | 285 | if (rc) |
316 | return rc; | 286 | return rc; |
@@ -349,7 +319,7 @@ int buffer_migrate_page(struct page *newpage, struct page *page) | |||
349 | 319 | ||
350 | head = page_buffers(page); | 320 | head = page_buffers(page); |
351 | 321 | ||
352 | rc = migrate_page_remove_references(newpage, page); | 322 | rc = migrate_page_move_mapping(newpage, page); |
353 | 323 | ||
354 | if (rc) | 324 | if (rc) |
355 | return rc; | 325 | return rc; |
@@ -482,6 +452,33 @@ redo: | |||
482 | lock_page(newpage); | 452 | lock_page(newpage); |
483 | 453 | ||
484 | /* | 454 | /* |
455 | * Establish swap ptes for anonymous pages or destroy pte | ||
456 | * maps for files. | ||
457 | * | ||
458 | * In order to reestablish file backed mappings the fault handlers | ||
459 | * will take the radix tree_lock which may then be used to stop | ||
460 | * processses from accessing this page until the new page is ready. | ||
461 | * | ||
462 | * A process accessing via a swap pte (an anonymous page) will take a | ||
463 | * page_lock on the old page which will block the process until the | ||
464 | * migration attempt is complete. At that time the PageSwapCache bit | ||
465 | * will be examined. If the page was migrated then the PageSwapCache | ||
466 | * bit will be clear and the operation to retrieve the page will be | ||
467 | * retried which will find the new page in the radix tree. Then a new | ||
468 | * direct mapping may be generated based on the radix tree contents. | ||
469 | * | ||
470 | * If the page was not migrated then the PageSwapCache bit | ||
471 | * is still set and the operation may continue. | ||
472 | */ | ||
473 | rc = -EPERM; | ||
474 | if (try_to_unmap(page, 1) == SWAP_FAIL) | ||
475 | /* A vma has VM_LOCKED set -> permanent failure */ | ||
476 | goto unlock_both; | ||
477 | |||
478 | rc = -EAGAIN; | ||
479 | if (page_mapped(page)) | ||
480 | goto unlock_both; | ||
481 | /* | ||
485 | * Pages are properly locked and writeback is complete. | 482 | * Pages are properly locked and writeback is complete. |
486 | * Try to migrate the page. | 483 | * Try to migrate the page. |
487 | */ | 484 | */ |
@@ -501,17 +498,6 @@ redo: | |||
501 | goto unlock_both; | 498 | goto unlock_both; |
502 | } | 499 | } |
503 | 500 | ||
504 | /* Make sure the dirty bit is up to date */ | ||
505 | if (try_to_unmap(page, 1) == SWAP_FAIL) { | ||
506 | rc = -EPERM; | ||
507 | goto unlock_both; | ||
508 | } | ||
509 | |||
510 | if (page_mapcount(page)) { | ||
511 | rc = -EAGAIN; | ||
512 | goto unlock_both; | ||
513 | } | ||
514 | |||
515 | /* | 501 | /* |
516 | * Default handling if a filesystem does not provide | 502 | * Default handling if a filesystem does not provide |
517 | * a migration function. We can only migrate clean | 503 | * a migration function. We can only migrate clean |