aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-25 08:46:48 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 13:00:55 -0400
commit7b2259b3e53f128c10a9fded0965e69d4a949847 (patch)
treec1827144c22dd49775190e05de791531e9fd21fd
parent68402ddc677005ed1b1359bbc1f279548cfc0928 (diff)
[PATCH] page migration: Support a vma migration function
Hooks for calling vma specific migration functions With this patch a vma may define a vma->vm_ops->migrate function. That function may perform page migration on its own (some vmas may not contain page structs and therefore cannot be handled by regular page migration. Pages in a vma may require special preparatory treatment before migration is possible etc) . Only mmap_sem is held when the migration function is called. The migrate() function gets passed two sets of nodemasks describing the source and the target of the migration. The flags parameter either contains MPOL_MF_MOVE which means that only pages used exclusively by the specified mm should be moved or MPOL_MF_MOVE_ALL which means that pages shared with other processes should also be moved. The migration function returns 0 on success or an error condition. An error condition will prevent regular page migration from occurring. On its own this patch cannot be included since there are no users for this functionality. But it seems that the uncached allocator will need this functionality at some point. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/migrate.h11
-rw-r--r--include/linux/mm.h2
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/migrate.c20
4 files changed, 37 insertions, 2 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 5dba23a1c0d0..48148e0cdbd1 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -16,7 +16,9 @@ extern int fail_migrate_page(struct address_space *,
16 struct page *, struct page *); 16 struct page *, struct page *);
17 17
18extern int migrate_prep(void); 18extern int migrate_prep(void);
19 19extern int migrate_vmas(struct mm_struct *mm,
20 const nodemask_t *from, const nodemask_t *to,
21 unsigned long flags);
20#else 22#else
21 23
22static inline int isolate_lru_page(struct page *p, struct list_head *list) 24static inline int isolate_lru_page(struct page *p, struct list_head *list)
@@ -30,6 +32,13 @@ static inline int migrate_pages_to(struct list_head *pagelist,
30 32
31static inline int migrate_prep(void) { return -ENOSYS; } 33static inline int migrate_prep(void) { return -ENOSYS; }
32 34
35static inline int migrate_vmas(struct mm_struct *mm,
36 const nodemask_t *from, const nodemask_t *to,
37 unsigned long flags)
38{
39 return -ENOSYS;
40}
41
33/* Possible settings for the migrate_page() method in address_operations */ 42/* Possible settings for the migrate_page() method in address_operations */
34#define migrate_page NULL 43#define migrate_page NULL
35#define fail_migrate_page NULL 44#define fail_migrate_page NULL
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 71c5d2f667ed..a929ea197e48 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -206,6 +206,8 @@ struct vm_operations_struct {
206 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 206 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
207 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 207 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
208 unsigned long addr); 208 unsigned long addr);
209 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
210 const nodemask_t *to, unsigned long flags);
209#endif 211#endif
210}; 212};
211 213
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ec4a1a950df9..73e0f23b7f51 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -632,6 +632,10 @@ int do_migrate_pages(struct mm_struct *mm,
632 632
633 down_read(&mm->mmap_sem); 633 down_read(&mm->mmap_sem);
634 634
635 err = migrate_vmas(mm, from_nodes, to_nodes, flags);
636 if (err)
637 goto out;
638
635/* 639/*
636 * Find a 'source' bit set in 'tmp' whose corresponding 'dest' 640 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
637 * bit in 'to' is not also set in 'tmp'. Clear the found 'source' 641 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
@@ -691,7 +695,7 @@ int do_migrate_pages(struct mm_struct *mm,
691 if (err < 0) 695 if (err < 0)
692 break; 696 break;
693 } 697 }
694 698out:
695 up_read(&mm->mmap_sem); 699 up_read(&mm->mmap_sem);
696 if (err < 0) 700 if (err < 0)
697 return err; 701 return err;
diff --git a/mm/migrate.c b/mm/migrate.c
index 1c2a71aa05cd..0576c0535988 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -976,3 +976,23 @@ out2:
976} 976}
977#endif 977#endif
978 978
979/*
980 * Call migration functions in the vma_ops that may prepare
981 * memory in a vm for migration. migration functions may perform
982 * the migration for vmas that do not have an underlying page struct.
983 */
984int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
985 const nodemask_t *from, unsigned long flags)
986{
987 struct vm_area_struct *vma;
988 int err = 0;
989
990 for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
991 if (vma->vm_ops && vma->vm_ops->migrate) {
992 err = vma->vm_ops->migrate(vma, to, from, flags);
993 if (err)
994 break;
995 }
996 }
997 return err;
998}