diff options
| author | Jason Gunthorpe <jgg@mellanox.com> | 2019-05-23 08:41:19 -0400 |
|---|---|---|
| committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-06-10 09:10:30 -0400 |
| commit | e36acfe6c86d13eec62321e1e86a1ce287e52e7d (patch) | |
| tree | dce3ca3311aa436c629a5443b126dd73865177df | |
| parent | 6d7c3cde93c1d9ac0b37f78ec3f2ff052159a242 (diff) | |
mm/hmm: Use hmm_mirror not mm as an argument for hmm_range_register
Ralph observes that hmm_range_register() can only be called by a driver
while a mirror is registered. Make this clear in the API by passing in the
mirror structure as a parameter.
This also simplifies understanding the lifetime model for struct hmm, as
the hmm pointer must be valid as part of a registered mirror so all we
need in hmm_register_range() is a simple kref_get.
Suggested-by: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Philip Yang <Philip.Yang@amd.com>
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_svm.c | 2 | ||||
| -rw-r--r-- | include/linux/hmm.h | 7 | ||||
| -rw-r--r-- | mm/hmm.c | 13 |
3 files changed, 9 insertions, 13 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 93ed43c413f0..8c92374afcf2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c | |||
| @@ -649,7 +649,7 @@ nouveau_svm_fault(struct nvif_notify *notify) | |||
| 649 | range.values = nouveau_svm_pfn_values; | 649 | range.values = nouveau_svm_pfn_values; |
| 650 | range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; | 650 | range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; |
| 651 | again: | 651 | again: |
| 652 | ret = hmm_vma_fault(&range, true); | 652 | ret = hmm_vma_fault(&svmm->mirror, &range, true); |
| 653 | if (ret == 0) { | 653 | if (ret == 0) { |
| 654 | mutex_lock(&svmm->mutex); | 654 | mutex_lock(&svmm->mutex); |
| 655 | if (!hmm_vma_range_done(&range)) { | 655 | if (!hmm_vma_range_done(&range)) { |
diff --git a/include/linux/hmm.h b/include/linux/hmm.h index cb01cf1fa3c0..1fba6979adf4 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h | |||
| @@ -496,7 +496,7 @@ static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror) | |||
| 496 | * Please see Documentation/vm/hmm.rst for how to use the range API. | 496 | * Please see Documentation/vm/hmm.rst for how to use the range API. |
| 497 | */ | 497 | */ |
| 498 | int hmm_range_register(struct hmm_range *range, | 498 | int hmm_range_register(struct hmm_range *range, |
| 499 | struct mm_struct *mm, | 499 | struct hmm_mirror *mirror, |
| 500 | unsigned long start, | 500 | unsigned long start, |
| 501 | unsigned long end, | 501 | unsigned long end, |
| 502 | unsigned page_shift); | 502 | unsigned page_shift); |
| @@ -532,7 +532,8 @@ static inline bool hmm_vma_range_done(struct hmm_range *range) | |||
| 532 | } | 532 | } |
| 533 | 533 | ||
| 534 | /* This is a temporary helper to avoid merge conflict between trees. */ | 534 | /* This is a temporary helper to avoid merge conflict between trees. */ |
| 535 | static inline int hmm_vma_fault(struct hmm_range *range, bool block) | 535 | static inline int hmm_vma_fault(struct hmm_mirror *mirror, |
| 536 | struct hmm_range *range, bool block) | ||
| 536 | { | 537 | { |
| 537 | long ret; | 538 | long ret; |
| 538 | 539 | ||
| @@ -545,7 +546,7 @@ static inline int hmm_vma_fault(struct hmm_range *range, bool block) | |||
| 545 | range->default_flags = 0; | 546 | range->default_flags = 0; |
| 546 | range->pfn_flags_mask = -1UL; | 547 | range->pfn_flags_mask = -1UL; |
| 547 | 548 | ||
| 548 | ret = hmm_range_register(range, range->vma->vm_mm, | 549 | ret = hmm_range_register(range, mirror, |
| 549 | range->start, range->end, | 550 | range->start, range->end, |
| 550 | PAGE_SHIFT); | 551 | PAGE_SHIFT); |
| 551 | if (ret) | 552 | if (ret) |
| @@ -914,13 +914,13 @@ static void hmm_pfns_clear(struct hmm_range *range, | |||
| 914 | * Track updates to the CPU page table see include/linux/hmm.h | 914 | * Track updates to the CPU page table see include/linux/hmm.h |
| 915 | */ | 915 | */ |
| 916 | int hmm_range_register(struct hmm_range *range, | 916 | int hmm_range_register(struct hmm_range *range, |
| 917 | struct mm_struct *mm, | 917 | struct hmm_mirror *mirror, |
| 918 | unsigned long start, | 918 | unsigned long start, |
| 919 | unsigned long end, | 919 | unsigned long end, |
| 920 | unsigned page_shift) | 920 | unsigned page_shift) |
| 921 | { | 921 | { |
| 922 | unsigned long mask = ((1UL << page_shift) - 1UL); | 922 | unsigned long mask = ((1UL << page_shift) - 1UL); |
| 923 | struct hmm *hmm; | 923 | struct hmm *hmm = mirror->hmm; |
| 924 | 924 | ||
| 925 | range->valid = false; | 925 | range->valid = false; |
| 926 | range->hmm = NULL; | 926 | range->hmm = NULL; |
| @@ -934,20 +934,15 @@ int hmm_range_register(struct hmm_range *range, | |||
| 934 | range->start = start; | 934 | range->start = start; |
| 935 | range->end = end; | 935 | range->end = end; |
| 936 | 936 | ||
| 937 | hmm = hmm_get_or_create(mm); | ||
| 938 | if (!hmm) | ||
| 939 | return -EFAULT; | ||
| 940 | |||
| 941 | /* Check if hmm_mm_destroy() was call. */ | 937 | /* Check if hmm_mm_destroy() was call. */ |
| 942 | if (hmm->mm == NULL || hmm->dead) { | 938 | if (hmm->mm == NULL || hmm->dead) |
| 943 | hmm_put(hmm); | ||
| 944 | return -EFAULT; | 939 | return -EFAULT; |
| 945 | } | ||
| 946 | 940 | ||
| 947 | /* Initialize range to track CPU page table updates. */ | 941 | /* Initialize range to track CPU page table updates. */ |
| 948 | mutex_lock(&hmm->lock); | 942 | mutex_lock(&hmm->lock); |
| 949 | 943 | ||
| 950 | range->hmm = hmm; | 944 | range->hmm = hmm; |
| 945 | kref_get(&hmm->kref); | ||
| 951 | list_add_rcu(&range->list, &hmm->ranges); | 946 | list_add_rcu(&range->list, &hmm->ranges); |
| 952 | 947 | ||
| 953 | /* | 948 | /* |
