diff options
-rw-r--r-- | include/linux/mempolicy.h | 19 | ||||
-rw-r--r-- | include/linux/migrate.h | 21 | ||||
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/migrate.c | 2 |
4 files changed, 21 insertions, 23 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 3a39570b81b8..085c903fe0f1 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -59,6 +59,7 @@ enum { | |||
59 | #include <linux/rbtree.h> | 59 | #include <linux/rbtree.h> |
60 | #include <linux/spinlock.h> | 60 | #include <linux/spinlock.h> |
61 | #include <linux/nodemask.h> | 61 | #include <linux/nodemask.h> |
62 | #include <linux/pagemap.h> | ||
62 | 63 | ||
63 | struct mm_struct; | 64 | struct mm_struct; |
64 | 65 | ||
@@ -220,6 +221,24 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context); | |||
220 | extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, | 221 | extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, |
221 | int no_context); | 222 | int no_context); |
222 | #endif | 223 | #endif |
224 | |||
225 | /* Check if a vma is migratable */ | ||
226 | static inline int vma_migratable(struct vm_area_struct *vma) | ||
227 | { | ||
228 | if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) | ||
229 | return 0; | ||
230 | /* | ||
231 | * Migration allocates pages in the highest zone. If we cannot | ||
232 | * do so then migration (at least from node to node) is not | ||
233 | * possible. | ||
234 | */ | ||
235 | if (vma->vm_file && | ||
236 | gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) | ||
237 | < policy_zone) | ||
238 | return 0; | ||
239 | return 1; | ||
240 | } | ||
241 | |||
223 | #else | 242 | #else |
224 | 243 | ||
225 | struct mempolicy {}; | 244 | struct mempolicy {}; |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index e10a90a93b5d..03aea612d284 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -3,28 +3,10 @@ | |||
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <linux/mempolicy.h> | 5 | #include <linux/mempolicy.h> |
6 | #include <linux/pagemap.h> | ||
7 | 6 | ||
8 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); | 7 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
9 | 8 | ||
10 | #ifdef CONFIG_MIGRATION | 9 | #ifdef CONFIG_MIGRATION |
11 | /* Check if a vma is migratable */ | ||
12 | static inline int vma_migratable(struct vm_area_struct *vma) | ||
13 | { | ||
14 | if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED)) | ||
15 | return 0; | ||
16 | /* | ||
17 | * Migration allocates pages in the highest zone. If we cannot | ||
18 | * do so then migration (at least from node to node) is not | ||
19 | * possible. | ||
20 | */ | ||
21 | if (vma->vm_file && | ||
22 | gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping)) | ||
23 | < policy_zone) | ||
24 | return 0; | ||
25 | return 1; | ||
26 | } | ||
27 | |||
28 | extern int isolate_lru_page(struct page *p, struct list_head *pagelist); | 10 | extern int isolate_lru_page(struct page *p, struct list_head *pagelist); |
29 | extern int putback_lru_pages(struct list_head *l); | 11 | extern int putback_lru_pages(struct list_head *l); |
30 | extern int migrate_page(struct address_space *, | 12 | extern int migrate_page(struct address_space *, |
@@ -39,9 +21,6 @@ extern int migrate_vmas(struct mm_struct *mm, | |||
39 | const nodemask_t *from, const nodemask_t *to, | 21 | const nodemask_t *from, const nodemask_t *to, |
40 | unsigned long flags); | 22 | unsigned long flags); |
41 | #else | 23 | #else |
42 | static inline int vma_migratable(struct vm_area_struct *vma) | ||
43 | { return 0; } | ||
44 | |||
45 | static inline int isolate_lru_page(struct page *p, struct list_head *list) | 24 | static inline int isolate_lru_page(struct page *p, struct list_head *list) |
46 | { return -ENOSYS; } | 25 | { return -ENOSYS; } |
47 | static inline int putback_lru_pages(struct list_head *l) { return 0; } | 26 | static inline int putback_lru_pages(struct list_head *l) { return 0; } |
diff --git a/mm/Kconfig b/mm/Kconfig index c4de85285bb4..aa799007a11b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -174,7 +174,7 @@ config SPLIT_PTLOCK_CPUS | |||
174 | config MIGRATION | 174 | config MIGRATION |
175 | bool "Page migration" | 175 | bool "Page migration" |
176 | def_bool y | 176 | def_bool y |
177 | depends on NUMA | 177 | depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE |
178 | help | 178 | help |
179 | Allows the migration of the physical location of pages of processes | 179 | Allows the migration of the physical location of pages of processes |
180 | while the virtual addresses are not changed. This is useful for | 180 | while the virtual addresses are not changed. This is useful for |
diff --git a/mm/migrate.c b/mm/migrate.c index e7d13a708da0..376cceba82f9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1071,7 +1071,6 @@ out2: | |||
1071 | mmput(mm); | 1071 | mmput(mm); |
1072 | return err; | 1072 | return err; |
1073 | } | 1073 | } |
1074 | #endif | ||
1075 | 1074 | ||
1076 | /* | 1075 | /* |
1077 | * Call migration functions in the vma_ops that may prepare | 1076 | * Call migration functions in the vma_ops that may prepare |
@@ -1093,3 +1092,4 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to, | |||
1093 | } | 1092 | } |
1094 | return err; | 1093 | return err; |
1095 | } | 1094 | } |
1095 | #endif | ||