diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-06-23 05:03:38 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:42:51 -0400 |
commit | 04e62a29bf157ce1edd168f2b71b533c80d13628 (patch) | |
tree | 7f0d5a58eeef2c2e08da86dc7141a1ccd050a37d /mm/vmscan.c | |
parent | 442c9137de8d769053e81d325709dca72f0b5e44 (diff) |
[PATCH] More page migration: use migration entries for file pages
This implements the use of migration entries to preserve ptes of file backed
pages during migration. Processes can therefore be migrated back and forth
without loosing their connection to pagecache pages.
Note that we implement the migration entries only for linear mappings.
Nonlinear mappings still require the unmapping of the ptes for migration.
And another writepage() ugliness shows up. writepage() can drop the page
lock. Therefore we have to remove migration ptes before calling writepages()
in order to avoid having migration entries point to unlocked pages.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 14 |
1 files changed, 13 insertions, 1 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index bc5d4f43036c..71a02e295037 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -290,11 +290,23 @@ static void handle_write_error(struct address_space *mapping, | |||
290 | unlock_page(page); | 290 | unlock_page(page); |
291 | } | 291 | } |
292 | 292 | ||
293 | /* possible outcome of pageout() */ | ||
294 | typedef enum { | ||
295 | /* failed to write page out, page is locked */ | ||
296 | PAGE_KEEP, | ||
297 | /* move page to the active list, page is locked */ | ||
298 | PAGE_ACTIVATE, | ||
299 | /* page has been sent to the disk successfully, page is unlocked */ | ||
300 | PAGE_SUCCESS, | ||
301 | /* page is clean and locked */ | ||
302 | PAGE_CLEAN, | ||
303 | } pageout_t; | ||
304 | |||
293 | /* | 305 | /* |
294 | * pageout is called by shrink_page_list() for each dirty page. | 306 | * pageout is called by shrink_page_list() for each dirty page. |
295 | * Calls ->writepage(). | 307 | * Calls ->writepage(). |
296 | */ | 308 | */ |
297 | pageout_t pageout(struct page *page, struct address_space *mapping) | 309 | static pageout_t pageout(struct page *page, struct address_space *mapping) |
298 | { | 310 | { |
299 | /* | 311 | /* |
300 | * If the page is dirty, only perform writeback if that write | 312 | * If the page is dirty, only perform writeback if that write |