diff options
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/migrate.h | 104 |
1 files changed, 104 insertions, 0 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 7db4c812a2a6..8f73cebfc3f5 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -156,4 +156,108 @@ static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm, | |||
156 | } | 156 | } |
157 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ | 157 | #endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/ |
158 | 158 | ||
159 | |||
160 | #ifdef CONFIG_MIGRATION | ||
161 | |||
162 | #define MIGRATE_PFN_VALID (1UL << 0) | ||
163 | #define MIGRATE_PFN_MIGRATE (1UL << 1) | ||
164 | #define MIGRATE_PFN_LOCKED (1UL << 2) | ||
165 | #define MIGRATE_PFN_WRITE (1UL << 3) | ||
166 | #define MIGRATE_PFN_ERROR (1UL << 4) | ||
167 | #define MIGRATE_PFN_SHIFT 5 | ||
168 | |||
169 | static inline struct page *migrate_pfn_to_page(unsigned long mpfn) | ||
170 | { | ||
171 | if (!(mpfn & MIGRATE_PFN_VALID)) | ||
172 | return NULL; | ||
173 | return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT); | ||
174 | } | ||
175 | |||
176 | static inline unsigned long migrate_pfn(unsigned long pfn) | ||
177 | { | ||
178 | return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * struct migrate_vma_ops - migrate operation callback | ||
183 | * | ||
184 | * @alloc_and_copy: alloc destination memory and copy source memory to it | ||
185 | * @finalize_and_map: allow caller to map the successfully migrated pages | ||
186 | * | ||
187 | * | ||
188 | * The alloc_and_copy() callback happens once all source pages have been locked, | ||
189 | * unmapped and checked (checked whether pinned or not). All pages that can be | ||
190 | * migrated will have an entry in the src array set with the pfn value of the | ||
191 | * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set (other | ||
192 | * flags might be set but should be ignored by the callback). | ||
193 | * | ||
194 | * The alloc_and_copy() callback can then allocate destination memory and copy | ||
195 | * source memory to it for all those entries (ie with MIGRATE_PFN_VALID and | ||
196 | * MIGRATE_PFN_MIGRATE flag set). Once these are allocated and copied, the | ||
197 | * callback must update each corresponding entry in the dst array with the pfn | ||
198 | * value of the destination page and with the MIGRATE_PFN_VALID and | ||
199 | * MIGRATE_PFN_LOCKED flags set (destination pages must have their struct pages | ||
200 | * locked, via lock_page()). | ||
201 | * | ||
202 | * At this point the alloc_and_copy() callback is done and returns. | ||
203 | * | ||
204 | * Note that the callback does not have to migrate all the pages that are | ||
205 | * marked with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration | ||
206 | * from device memory to system memory (ie the MIGRATE_PFN_DEVICE flag is also | ||
207 | * set in the src array entry). If the device driver cannot migrate a device | ||
208 | * page back to system memory, then it must set the corresponding dst array | ||
209 | * entry to MIGRATE_PFN_ERROR. This will trigger a SIGBUS if CPU tries to | ||
210 | * access any of the virtual addresses originally backed by this page. Because | ||
211 | * a SIGBUS is such a severe result for the userspace process, the device | ||
212 | * driver should avoid setting MIGRATE_PFN_ERROR unless it is really in an | ||
213 | * unrecoverable state. | ||
214 | * | ||
215 | * THE alloc_and_copy() CALLBACK MUST NOT CHANGE ANY OF THE SRC ARRAY ENTRIES | ||
216 | * OR BAD THINGS WILL HAPPEN ! | ||
217 | * | ||
218 | * | ||
219 | * The finalize_and_map() callback happens after struct page migration from | ||
220 | * source to destination (destination struct pages are the struct pages for the | ||
221 | * memory allocated by the alloc_and_copy() callback). Migration can fail, and | ||
222 | * thus the finalize_and_map() allows the driver to inspect which pages were | ||
223 | * successfully migrated, and which were not. Successfully migrated pages will | ||
224 | * have the MIGRATE_PFN_MIGRATE flag set for their src array entry. | ||
225 | * | ||
226 | * It is safe to update device page table from within the finalize_and_map() | ||
227 | * callback because both destination and source page are still locked, and the | ||
228 | * mmap_sem is held in read mode (hence no one can unmap the range being | ||
229 | * migrated). | ||
230 | * | ||
231 | * Once callback is done cleaning up things and updating its page table (if it | ||
232 | * chose to do so, this is not an obligation) then it returns. At this point, | ||
233 | * the HMM core will finish up the final steps, and the migration is complete. | ||
234 | * | ||
235 | * THE finalize_and_map() CALLBACK MUST NOT CHANGE ANY OF THE SRC OR DST ARRAY | ||
236 | * ENTRIES OR BAD THINGS WILL HAPPEN ! | ||
237 | */ | ||
238 | struct migrate_vma_ops { | ||
239 | void (*alloc_and_copy)(struct vm_area_struct *vma, | ||
240 | const unsigned long *src, | ||
241 | unsigned long *dst, | ||
242 | unsigned long start, | ||
243 | unsigned long end, | ||
244 | void *private); | ||
245 | void (*finalize_and_map)(struct vm_area_struct *vma, | ||
246 | const unsigned long *src, | ||
247 | const unsigned long *dst, | ||
248 | unsigned long start, | ||
249 | unsigned long end, | ||
250 | void *private); | ||
251 | }; | ||
252 | |||
253 | int migrate_vma(const struct migrate_vma_ops *ops, | ||
254 | struct vm_area_struct *vma, | ||
255 | unsigned long start, | ||
256 | unsigned long end, | ||
257 | unsigned long *src, | ||
258 | unsigned long *dst, | ||
259 | void *private); | ||
260 | |||
261 | #endif /* CONFIG_MIGRATION */ | ||
262 | |||
159 | #endif /* _LINUX_MIGRATE_H */ | 263 | #endif /* _LINUX_MIGRATE_H */ |