diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /include/linux/migrate.h | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'include/linux/migrate.h')
-rw-r--r-- | include/linux/migrate.h | 78 |
1 files changed, 9 insertions, 69 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 1e9f627967a..e39aeecfe9a 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -3,48 +3,21 @@ | |||
3 | 3 | ||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <linux/mempolicy.h> | 5 | #include <linux/mempolicy.h> |
6 | #include <linux/migrate_mode.h> | ||
7 | 6 | ||
8 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); | 7 | typedef struct page *new_page_t(struct page *, unsigned long private, int **); |
9 | 8 | ||
10 | /* | ||
11 | * Return values from addresss_space_operations.migratepage(): | ||
12 | * - negative errno on page migration failure; | ||
13 | * - zero on page migration success; | ||
14 | * | ||
15 | * The balloon page migration introduces this special case where a 'distinct' | ||
16 | * return code is used to flag a successful page migration to unmap_and_move(). | ||
17 | * This approach is necessary because page migration can race against balloon | ||
18 | * deflation procedure, and for such case we could introduce a nasty page leak | ||
19 | * if a successfully migrated balloon page gets released concurrently with | ||
20 | * migration's unmap_and_move() wrap-up steps. | ||
21 | */ | ||
22 | #define MIGRATEPAGE_SUCCESS 0 | ||
23 | #define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page | ||
24 | * sucessful migration case. | ||
25 | */ | ||
26 | enum migrate_reason { | ||
27 | MR_COMPACTION, | ||
28 | MR_MEMORY_FAILURE, | ||
29 | MR_MEMORY_HOTPLUG, | ||
30 | MR_SYSCALL, /* also applies to cpusets */ | ||
31 | MR_MEMPOLICY_MBIND, | ||
32 | MR_NUMA_MISPLACED, | ||
33 | MR_CMA | ||
34 | }; | ||
35 | |||
36 | #ifdef CONFIG_MIGRATION | 9 | #ifdef CONFIG_MIGRATION |
10 | #define PAGE_MIGRATION 1 | ||
37 | 11 | ||
38 | extern void putback_lru_pages(struct list_head *l); | 12 | extern void putback_lru_pages(struct list_head *l); |
39 | extern void putback_movable_pages(struct list_head *l); | ||
40 | extern int migrate_page(struct address_space *, | 13 | extern int migrate_page(struct address_space *, |
41 | struct page *, struct page *, enum migrate_mode); | 14 | struct page *, struct page *); |
42 | extern int migrate_pages(struct list_head *l, new_page_t x, | 15 | extern int migrate_pages(struct list_head *l, new_page_t x, |
43 | unsigned long private, bool offlining, | 16 | unsigned long private, bool offlining, |
44 | enum migrate_mode mode, int reason); | 17 | bool sync); |
45 | extern int migrate_huge_page(struct page *, new_page_t x, | 18 | extern int migrate_huge_pages(struct list_head *l, new_page_t x, |
46 | unsigned long private, bool offlining, | 19 | unsigned long private, bool offlining, |
47 | enum migrate_mode mode); | 20 | bool sync); |
48 | 21 | ||
49 | extern int fail_migrate_page(struct address_space *, | 22 | extern int fail_migrate_page(struct address_space *, |
50 | struct page *, struct page *); | 23 | struct page *, struct page *); |
@@ -58,15 +31,15 @@ extern void migrate_page_copy(struct page *newpage, struct page *page); | |||
58 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, | 31 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
59 | struct page *newpage, struct page *page); | 32 | struct page *newpage, struct page *page); |
60 | #else | 33 | #else |
34 | #define PAGE_MIGRATION 0 | ||
61 | 35 | ||
62 | static inline void putback_lru_pages(struct list_head *l) {} | 36 | static inline void putback_lru_pages(struct list_head *l) {} |
63 | static inline void putback_movable_pages(struct list_head *l) {} | ||
64 | static inline int migrate_pages(struct list_head *l, new_page_t x, | 37 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
65 | unsigned long private, bool offlining, | 38 | unsigned long private, bool offlining, |
66 | enum migrate_mode mode, int reason) { return -ENOSYS; } | 39 | bool sync) { return -ENOSYS; } |
67 | static inline int migrate_huge_page(struct page *page, new_page_t x, | 40 | static inline int migrate_huge_pages(struct list_head *l, new_page_t x, |
68 | unsigned long private, bool offlining, | 41 | unsigned long private, bool offlining, |
69 | enum migrate_mode mode) { return -ENOSYS; } | 42 | bool sync) { return -ENOSYS; } |
70 | 43 | ||
71 | static inline int migrate_prep(void) { return -ENOSYS; } | 44 | static inline int migrate_prep(void) { return -ENOSYS; } |
72 | static inline int migrate_prep_local(void) { return -ENOSYS; } | 45 | static inline int migrate_prep_local(void) { return -ENOSYS; } |
@@ -92,37 +65,4 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, | |||
92 | #define fail_migrate_page NULL | 65 | #define fail_migrate_page NULL |
93 | 66 | ||
94 | #endif /* CONFIG_MIGRATION */ | 67 | #endif /* CONFIG_MIGRATION */ |
95 | |||
96 | #ifdef CONFIG_NUMA_BALANCING | ||
97 | extern int migrate_misplaced_page(struct page *page, int node); | ||
98 | extern int migrate_misplaced_page(struct page *page, int node); | ||
99 | extern bool migrate_ratelimited(int node); | ||
100 | #else | ||
101 | static inline int migrate_misplaced_page(struct page *page, int node) | ||
102 | { | ||
103 | return -EAGAIN; /* can't migrate now */ | ||
104 | } | ||
105 | static inline bool migrate_ratelimited(int node) | ||
106 | { | ||
107 | return false; | ||
108 | } | ||
109 | #endif /* CONFIG_NUMA_BALANCING */ | ||
110 | |||
111 | #if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE) | ||
112 | extern int migrate_misplaced_transhuge_page(struct mm_struct *mm, | ||
113 | struct vm_area_struct *vma, | ||
114 | pmd_t *pmd, pmd_t entry, | ||
115 | unsigned long address, | ||
116 | struct page *page, int node); | ||
117 | #else | ||
118 | static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, | ||
119 | struct vm_area_struct *vma, | ||
120 | pmd_t *pmd, pmd_t entry, | ||
121 | unsigned long address, | ||
122 | struct page *page, int node) | ||
123 | { | ||
124 | return -EAGAIN; | ||
125 | } | ||
126 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ | ||
127 | |||
128 | #endif /* _LINUX_MIGRATE_H */ | 68 | #endif /* _LINUX_MIGRATE_H */ |