aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/hmm.h71
-rw-r--r--mm/hmm.c62
2 files changed, 68 insertions, 65 deletions
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 740bb00853f5..7007123842ba 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -21,8 +21,8 @@
21 * 21 *
22 * HMM address space mirroring API: 22 * HMM address space mirroring API:
23 * 23 *
24 * Use HMM address space mirroring if you want to mirror range of the CPU page 24 * Use HMM address space mirroring if you want to mirror a range of the CPU
25 * table of a process into a device page table. Here, "mirror" means "keep 25 * page tables of a process into a device page table. Here, "mirror" means "keep
26 * synchronized". Prerequisites: the device must provide the ability to write- 26 * synchronized". Prerequisites: the device must provide the ability to write-
27 * protect its page tables (at PAGE_SIZE granularity), and must be able to 27 * protect its page tables (at PAGE_SIZE granularity), and must be able to
28 * recover from the resulting potential page faults. 28 * recover from the resulting potential page faults.
@@ -105,10 +105,11 @@ struct hmm {
105 * HMM_PFN_WRITE: CPU page table has write permission set 105 * HMM_PFN_WRITE: CPU page table has write permission set
106 * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) 106 * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE)
107 * 107 *
108 * The driver provide a flags array, if driver valid bit for an entry is bit 108 * The driver provides a flags array for mapping page protections to device
109 * 3 ie (entry & (1 << 3)) is true if entry is valid then driver must provide 109 * PTE bits. If the driver valid bit for an entry is bit 3,
110 * i.e., (entry & (1 << 3)), then the driver must provide
110 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. 111 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
111 * Same logic apply to all flags. This is same idea as vm_page_prot in vma 112 * Same logic apply to all flags. This is the same idea as vm_page_prot in vma
112 * except that this is per device driver rather than per architecture. 113 * except that this is per device driver rather than per architecture.
113 */ 114 */
114enum hmm_pfn_flag_e { 115enum hmm_pfn_flag_e {
@@ -129,13 +130,13 @@ enum hmm_pfn_flag_e {
129 * be mirrored by a device, because the entry will never have HMM_PFN_VALID 130 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
130 * set and the pfn value is undefined. 131 * set and the pfn value is undefined.
131 * 132 *
132 * Driver provide entry value for none entry, error entry and special entry, 133 * Driver provides values for none entry, error entry, and special entry.
133 * driver can alias (ie use same value for error and special for instance). It 134 * Driver can alias (i.e., use same value) error and special, but
134 * should not alias none and error or special. 135 * it should not alias none with error or special.
135 * 136 *
136 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: 137 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
137 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, 138 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
138 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table 139 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
139 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one 140 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
140 */ 141 */
141enum hmm_pfn_value_e { 142enum hmm_pfn_value_e {
@@ -158,6 +159,7 @@ enum hmm_pfn_value_e {
158 * @values: pfn value for some special case (none, special, error, ...) 159 * @values: pfn value for some special case (none, special, error, ...)
159 * @default_flags: default flags for the range (write, read, ... see hmm doc) 160 * @default_flags: default flags for the range (write, read, ... see hmm doc)
160 * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter 161 * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
162 * @page_shift: device virtual address shift value (should be >= PAGE_SHIFT)
161 * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) 163 * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
162 * @valid: pfns array did not change since it has been fill by an HMM function 164 * @valid: pfns array did not change since it has been fill by an HMM function
163 */ 165 */
@@ -180,7 +182,7 @@ struct hmm_range {
180/* 182/*
181 * hmm_range_page_shift() - return the page shift for the range 183 * hmm_range_page_shift() - return the page shift for the range
182 * @range: range being queried 184 * @range: range being queried
183 * Returns: page shift (page size = 1 << page shift) for the range 185 * Return: page shift (page size = 1 << page shift) for the range
184 */ 186 */
185static inline unsigned hmm_range_page_shift(const struct hmm_range *range) 187static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
186{ 188{
@@ -190,7 +192,7 @@ static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
190/* 192/*
191 * hmm_range_page_size() - return the page size for the range 193 * hmm_range_page_size() - return the page size for the range
192 * @range: range being queried 194 * @range: range being queried
193 * Returns: page size for the range in bytes 195 * Return: page size for the range in bytes
194 */ 196 */
195static inline unsigned long hmm_range_page_size(const struct hmm_range *range) 197static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
196{ 198{
@@ -201,7 +203,7 @@ static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
201 * hmm_range_wait_until_valid() - wait for range to be valid 203 * hmm_range_wait_until_valid() - wait for range to be valid
202 * @range: range affected by invalidation to wait on 204 * @range: range affected by invalidation to wait on
203 * @timeout: time out for wait in ms (ie abort wait after that period of time) 205 * @timeout: time out for wait in ms (ie abort wait after that period of time)
204 * Returns: true if the range is valid, false otherwise. 206 * Return: true if the range is valid, false otherwise.
205 */ 207 */
206static inline bool hmm_range_wait_until_valid(struct hmm_range *range, 208static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
207 unsigned long timeout) 209 unsigned long timeout)
@@ -222,7 +224,7 @@ static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
222/* 224/*
223 * hmm_range_valid() - test if a range is valid or not 225 * hmm_range_valid() - test if a range is valid or not
224 * @range: range 226 * @range: range
225 * Returns: true if the range is valid, false otherwise. 227 * Return: true if the range is valid, false otherwise.
226 */ 228 */
227static inline bool hmm_range_valid(struct hmm_range *range) 229static inline bool hmm_range_valid(struct hmm_range *range)
228{ 230{
@@ -233,7 +235,7 @@ static inline bool hmm_range_valid(struct hmm_range *range)
233 * hmm_device_entry_to_page() - return struct page pointed to by a device entry 235 * hmm_device_entry_to_page() - return struct page pointed to by a device entry
234 * @range: range use to decode device entry value 236 * @range: range use to decode device entry value
235 * @entry: device entry value to get corresponding struct page from 237 * @entry: device entry value to get corresponding struct page from
236 * Returns: struct page pointer if entry is a valid, NULL otherwise 238 * Return: struct page pointer if entry is a valid, NULL otherwise
237 * 239 *
238 * If the device entry is valid (ie valid flag set) then return the struct page 240 * If the device entry is valid (ie valid flag set) then return the struct page
239 * matching the entry value. Otherwise return NULL. 241 * matching the entry value. Otherwise return NULL.
@@ -256,7 +258,7 @@ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *rang
256 * hmm_device_entry_to_pfn() - return pfn value store in a device entry 258 * hmm_device_entry_to_pfn() - return pfn value store in a device entry
257 * @range: range use to decode device entry value 259 * @range: range use to decode device entry value
258 * @entry: device entry to extract pfn from 260 * @entry: device entry to extract pfn from
259 * Returns: pfn value if device entry is valid, -1UL otherwise 261 * Return: pfn value if device entry is valid, -1UL otherwise
260 */ 262 */
261static inline unsigned long 263static inline unsigned long
262hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn) 264hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
@@ -276,7 +278,7 @@ hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
276 * hmm_device_entry_from_page() - create a valid device entry for a page 278 * hmm_device_entry_from_page() - create a valid device entry for a page
277 * @range: range use to encode HMM pfn value 279 * @range: range use to encode HMM pfn value
278 * @page: page for which to create the device entry 280 * @page: page for which to create the device entry
279 * Returns: valid device entry for the page 281 * Return: valid device entry for the page
280 */ 282 */
281static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range, 283static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
282 struct page *page) 284 struct page *page)
@@ -289,7 +291,7 @@ static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
289 * hmm_device_entry_from_pfn() - create a valid device entry value from pfn 291 * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
290 * @range: range use to encode HMM pfn value 292 * @range: range use to encode HMM pfn value
291 * @pfn: pfn value for which to create the device entry 293 * @pfn: pfn value for which to create the device entry
292 * Returns: valid device entry for the pfn 294 * Return: valid device entry for the pfn
293 */ 295 */
294static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, 296static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
295 unsigned long pfn) 297 unsigned long pfn)
@@ -394,7 +396,7 @@ enum hmm_update_event {
394}; 396};
395 397
396/* 398/*
397 * struct hmm_update - HMM update informations for callback 399 * struct hmm_update - HMM update information for callback
398 * 400 *
399 * @start: virtual start address of the range to update 401 * @start: virtual start address of the range to update
400 * @end: virtual end address of the range to update 402 * @end: virtual end address of the range to update
@@ -428,8 +430,8 @@ struct hmm_mirror_ops {
428 /* sync_cpu_device_pagetables() - synchronize page tables 430 /* sync_cpu_device_pagetables() - synchronize page tables
429 * 431 *
430 * @mirror: pointer to struct hmm_mirror 432 * @mirror: pointer to struct hmm_mirror
431 * @update: update informations (see struct hmm_update) 433 * @update: update information (see struct hmm_update)
432 * Returns: -EAGAIN if update.blockable false and callback need to 434 * Return: -EAGAIN if update.blockable false and callback need to
433 * block, 0 otherwise. 435 * block, 0 otherwise.
434 * 436 *
435 * This callback ultimately originates from mmu_notifiers when the CPU 437 * This callback ultimately originates from mmu_notifiers when the CPU
@@ -468,13 +470,13 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror);
468/* 470/*
469 * hmm_mirror_mm_is_alive() - test if mm is still alive 471 * hmm_mirror_mm_is_alive() - test if mm is still alive
470 * @mirror: the HMM mm mirror for which we want to lock the mmap_sem 472 * @mirror: the HMM mm mirror for which we want to lock the mmap_sem
471 * Returns: false if the mm is dead, true otherwise 473 * Return: false if the mm is dead, true otherwise
472 * 474 *
473 * This is an optimization it will not accurately always return -EINVAL if the 475 * This is an optimization, it will not always accurately return false if the
474 * mm is dead ie there can be false negative (process is being kill but HMM is 476 * mm is dead; i.e., there can be false negatives (process is being killed but
475 * not yet inform of that). It is only intented to be use to optimize out case 477 * HMM is not yet informed of that). It is only intended to be used to optimize
476 * where driver is about to do something time consuming and it would be better 478 * out cases where the driver is about to do something time consuming and it
477 * to skip it if the mm is dead. 479 * would be better to skip it if the mm is dead.
478 */ 480 */
479static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror) 481static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
480{ 482{
@@ -489,7 +491,6 @@ static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
489 return true; 491 return true;
490} 492}
491 493
492
493/* 494/*
494 * Please see Documentation/vm/hmm.rst for how to use the range API. 495 * Please see Documentation/vm/hmm.rst for how to use the range API.
495 */ 496 */
@@ -562,7 +563,7 @@ static inline int hmm_vma_fault(struct hmm_range *range, bool block)
562 ret = hmm_range_fault(range, block); 563 ret = hmm_range_fault(range, block);
563 if (ret <= 0) { 564 if (ret <= 0) {
564 if (ret == -EBUSY || !ret) { 565 if (ret == -EBUSY || !ret) {
565 /* Same as above drop mmap_sem to match old API. */ 566 /* Same as above, drop mmap_sem to match old API. */
566 up_read(&range->vma->vm_mm->mmap_sem); 567 up_read(&range->vma->vm_mm->mmap_sem);
567 ret = -EBUSY; 568 ret = -EBUSY;
568 } else if (ret == -EAGAIN) 569 } else if (ret == -EAGAIN)
@@ -629,7 +630,7 @@ struct hmm_devmem_ops {
629 * @page: pointer to struct page backing virtual address (unreliable) 630 * @page: pointer to struct page backing virtual address (unreliable)
630 * @flags: FAULT_FLAG_* (see include/linux/mm.h) 631 * @flags: FAULT_FLAG_* (see include/linux/mm.h)
631 * @pmdp: page middle directory 632 * @pmdp: page middle directory
632 * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR 633 * Return: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
633 * on error 634 * on error
634 * 635 *
635 * The callback occurs whenever there is a CPU page fault or GUP on a 636 * The callback occurs whenever there is a CPU page fault or GUP on a
@@ -637,14 +638,14 @@ struct hmm_devmem_ops {
637 * page back to regular memory (CPU accessible). 638 * page back to regular memory (CPU accessible).
638 * 639 *
639 * The device driver is free to migrate more than one page from the 640 * The device driver is free to migrate more than one page from the
640 * fault() callback as an optimization. However if device decide to 641 * fault() callback as an optimization. However if the device decides
641 * migrate more than one page it must always priotirize the faulting 642 * to migrate more than one page it must always priotirize the faulting
642 * address over the others. 643 * address over the others.
643 * 644 *
644 * The struct page pointer is only given as an hint to allow quick 645 * The struct page pointer is only given as a hint to allow quick
645 * lookup of internal device driver data. A concurrent migration 646 * lookup of internal device driver data. A concurrent migration
646 * might have already free that page and the virtual address might 647 * might have already freed that page and the virtual address might
647 * not longer be back by it. So it should not be modified by the 648 * no longer be backed by it. So it should not be modified by the
648 * callback. 649 * callback.
649 * 650 *
650 * Note that mmap semaphore is held in read mode at least when this 651 * Note that mmap semaphore is held in read mode at least when this
@@ -671,7 +672,7 @@ struct hmm_devmem_ops {
671 * @ref: per CPU refcount 672 * @ref: per CPU refcount
672 * @page_fault: callback when CPU fault on an unaddressable device page 673 * @page_fault: callback when CPU fault on an unaddressable device page
673 * 674 *
674 * This an helper structure for device drivers that do not wish to implement 675 * This is a helper structure for device drivers that do not wish to implement
675 * the gory details related to hotplugging new memoy and allocating struct 676 * the gory details related to hotplugging new memoy and allocating struct
676 * pages. 677 * pages.
677 * 678 *
diff --git a/mm/hmm.c b/mm/hmm.c
index c62ae414a3a2..4db5dcf110ba 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -153,9 +153,8 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
153 153
154 /* Wake-up everyone waiting on any range. */ 154 /* Wake-up everyone waiting on any range. */
155 mutex_lock(&hmm->lock); 155 mutex_lock(&hmm->lock);
156 list_for_each_entry(range, &hmm->ranges, list) { 156 list_for_each_entry(range, &hmm->ranges, list)
157 range->valid = false; 157 range->valid = false;
158 }
159 wake_up_all(&hmm->wq); 158 wake_up_all(&hmm->wq);
160 mutex_unlock(&hmm->lock); 159 mutex_unlock(&hmm->lock);
161 160
@@ -166,9 +165,10 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
166 list_del_init(&mirror->list); 165 list_del_init(&mirror->list);
167 if (mirror->ops->release) { 166 if (mirror->ops->release) {
168 /* 167 /*
169 * Drop mirrors_sem so callback can wait on any pending 168 * Drop mirrors_sem so the release callback can wait
170 * work that might itself trigger mmu_notifier callback 169 * on any pending work that might itself trigger a
171 * and thus would deadlock with us. 170 * mmu_notifier callback and thus would deadlock with
171 * us.
172 */ 172 */
173 up_write(&hmm->mirrors_sem); 173 up_write(&hmm->mirrors_sem);
174 mirror->ops->release(mirror); 174 mirror->ops->release(mirror);
@@ -223,11 +223,8 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
223 int ret; 223 int ret;
224 224
225 ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update); 225 ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
226 if (!update.blockable && ret == -EAGAIN) { 226 if (!update.blockable && ret == -EAGAIN)
227 up_read(&hmm->mirrors_sem); 227 break;
228 ret = -EAGAIN;
229 goto out;
230 }
231 } 228 }
232 up_read(&hmm->mirrors_sem); 229 up_read(&hmm->mirrors_sem);
233 230
@@ -271,6 +268,7 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
271 * 268 *
272 * @mirror: new mirror struct to register 269 * @mirror: new mirror struct to register
273 * @mm: mm to register against 270 * @mm: mm to register against
271 * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
274 * 272 *
275 * To start mirroring a process address space, the device driver must register 273 * To start mirroring a process address space, the device driver must register
276 * an HMM mirror struct. 274 * an HMM mirror struct.
@@ -298,7 +296,7 @@ EXPORT_SYMBOL(hmm_mirror_register);
298/* 296/*
299 * hmm_mirror_unregister() - unregister a mirror 297 * hmm_mirror_unregister() - unregister a mirror
300 * 298 *
301 * @mirror: new mirror struct to register 299 * @mirror: mirror struct to unregister
302 * 300 *
303 * Stop mirroring a process address space, and cleanup. 301 * Stop mirroring a process address space, and cleanup.
304 */ 302 */
@@ -372,7 +370,7 @@ static int hmm_pfns_bad(unsigned long addr,
372 * @fault: should we fault or not ? 370 * @fault: should we fault or not ?
373 * @write_fault: write fault ? 371 * @write_fault: write fault ?
374 * @walk: mm_walk structure 372 * @walk: mm_walk structure
375 * Returns: 0 on success, -EBUSY after page fault, or page fault error 373 * Return: 0 on success, -EBUSY after page fault, or page fault error
376 * 374 *
377 * This function will be called whenever pmd_none() or pte_none() returns true, 375 * This function will be called whenever pmd_none() or pte_none() returns true,
378 * or whenever there is no page directory covering the virtual address range. 376 * or whenever there is no page directory covering the virtual address range.
@@ -911,6 +909,7 @@ int hmm_range_register(struct hmm_range *range,
911 unsigned page_shift) 909 unsigned page_shift)
912{ 910{
913 unsigned long mask = ((1UL << page_shift) - 1UL); 911 unsigned long mask = ((1UL << page_shift) - 1UL);
912 struct hmm *hmm;
914 913
915 range->valid = false; 914 range->valid = false;
916 range->hmm = NULL; 915 range->hmm = NULL;
@@ -924,28 +923,29 @@ int hmm_range_register(struct hmm_range *range,
924 range->start = start; 923 range->start = start;
925 range->end = end; 924 range->end = end;
926 925
927 range->hmm = hmm_get_or_create(mm); 926 hmm = hmm_get_or_create(mm);
928 if (!range->hmm) 927 if (!hmm)
929 return -EFAULT; 928 return -EFAULT;
930 929
931 /* Check if hmm_mm_destroy() was call. */ 930 /* Check if hmm_mm_destroy() was call. */
932 if (range->hmm->mm == NULL || range->hmm->dead) { 931 if (hmm->mm == NULL || hmm->dead) {
933 hmm_put(range->hmm); 932 hmm_put(hmm);
934 return -EFAULT; 933 return -EFAULT;
935 } 934 }
936 935
937 /* Initialize range to track CPU page table update */ 936 /* Initialize range to track CPU page table updates. */
938 mutex_lock(&range->hmm->lock); 937 mutex_lock(&hmm->lock);
939 938
940 list_add_rcu(&range->list, &range->hmm->ranges); 939 range->hmm = hmm;
940 list_add_rcu(&range->list, &hmm->ranges);
941 941
942 /* 942 /*
943 * If there are any concurrent notifiers we have to wait for them for 943 * If there are any concurrent notifiers we have to wait for them for
944 * the range to be valid (see hmm_range_wait_until_valid()). 944 * the range to be valid (see hmm_range_wait_until_valid()).
945 */ 945 */
946 if (!range->hmm->notifiers) 946 if (!hmm->notifiers)
947 range->valid = true; 947 range->valid = true;
948 mutex_unlock(&range->hmm->lock); 948 mutex_unlock(&hmm->lock);
949 949
950 return 0; 950 return 0;
951} 951}
@@ -960,17 +960,19 @@ EXPORT_SYMBOL(hmm_range_register);
960 */ 960 */
961void hmm_range_unregister(struct hmm_range *range) 961void hmm_range_unregister(struct hmm_range *range)
962{ 962{
963 struct hmm *hmm = range->hmm;
964
963 /* Sanity check this really should not happen. */ 965 /* Sanity check this really should not happen. */
964 if (range->hmm == NULL || range->end <= range->start) 966 if (hmm == NULL || range->end <= range->start)
965 return; 967 return;
966 968
967 mutex_lock(&range->hmm->lock); 969 mutex_lock(&hmm->lock);
968 list_del_rcu(&range->list); 970 list_del_rcu(&range->list);
969 mutex_unlock(&range->hmm->lock); 971 mutex_unlock(&hmm->lock);
970 972
971 /* Drop reference taken by hmm_range_register() */ 973 /* Drop reference taken by hmm_range_register() */
972 range->valid = false; 974 range->valid = false;
973 hmm_put(range->hmm); 975 hmm_put(hmm);
974 range->hmm = NULL; 976 range->hmm = NULL;
975} 977}
976EXPORT_SYMBOL(hmm_range_unregister); 978EXPORT_SYMBOL(hmm_range_unregister);
@@ -978,7 +980,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
978/* 980/*
979 * hmm_range_snapshot() - snapshot CPU page table for a range 981 * hmm_range_snapshot() - snapshot CPU page table for a range
980 * @range: range 982 * @range: range
981 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid 983 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
982 * permission (for instance asking for write and range is read only), 984 * permission (for instance asking for write and range is read only),
983 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid 985 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
984 * vma or it is illegal to access that range), number of valid pages 986 * vma or it is illegal to access that range), number of valid pages
@@ -1061,7 +1063,7 @@ EXPORT_SYMBOL(hmm_range_snapshot);
1061 * hmm_range_fault() - try to fault some address in a virtual address range 1063 * hmm_range_fault() - try to fault some address in a virtual address range
1062 * @range: range being faulted 1064 * @range: range being faulted
1063 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 1065 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
1064 * Returns: number of valid pages in range->pfns[] (from range start 1066 * Return: number of valid pages in range->pfns[] (from range start
1065 * address). This may be zero. If the return value is negative, 1067 * address). This may be zero. If the return value is negative,
1066 * then one of the following values may be returned: 1068 * then one of the following values may be returned:
1067 * 1069 *
@@ -1179,7 +1181,7 @@ EXPORT_SYMBOL(hmm_range_fault);
1179 * @device: device against to dma map page to 1181 * @device: device against to dma map page to
1180 * @daddrs: dma address of mapped pages 1182 * @daddrs: dma address of mapped pages
1181 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem) 1183 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
1182 * Returns: number of pages mapped on success, -EAGAIN if mmap_sem have been 1184 * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
1183 * drop and you need to try again, some other error value otherwise 1185 * drop and you need to try again, some other error value otherwise
1184 * 1186 *
1185 * Note same usage pattern as hmm_range_fault(). 1187 * Note same usage pattern as hmm_range_fault().
@@ -1267,7 +1269,7 @@ EXPORT_SYMBOL(hmm_range_dma_map);
1267 * @device: device against which dma map was done 1269 * @device: device against which dma map was done
1268 * @daddrs: dma address of mapped pages 1270 * @daddrs: dma address of mapped pages
1269 * @dirty: dirty page if it had the write flag set 1271 * @dirty: dirty page if it had the write flag set
1270 * Returns: number of page unmapped on success, -EINVAL otherwise 1272 * Return: number of page unmapped on success, -EINVAL otherwise
1271 * 1273 *
1272 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide 1274 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1273 * to the sync_cpu_device_pagetables() callback so that it is safe here to 1275 * to the sync_cpu_device_pagetables() callback so that it is safe here to
@@ -1390,7 +1392,7 @@ static void hmm_devmem_free(struct page *page, void *data)
1390 * @ops: memory event device driver callback (see struct hmm_devmem_ops) 1392 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1391 * @device: device struct to bind the resource too 1393 * @device: device struct to bind the resource too
1392 * @size: size in bytes of the device memory to add 1394 * @size: size in bytes of the device memory to add
1393 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise 1395 * Return: pointer to new hmm_devmem struct ERR_PTR otherwise
1394 * 1396 *
1395 * This function first finds an empty range of physical address big enough to 1397 * This function first finds an empty range of physical address big enough to
1396 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which 1398 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which