diff options
| author | Christian König <christian.koenig@amd.com> | 2016-03-18 14:29:52 -0400 |
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2016-03-21 11:52:14 -0400 |
| commit | ae20f12d2de6629ee6f679ccf22f9b7b209c464d (patch) | |
| tree | 3674da2e1ee23a9077ef5a7a3e4aa7004b5124da | |
| parent | 0d2b42b0bdba45c82d29d794ea30a4c90e3f4098 (diff) | |
drm/amdgpu: add invalidate_page callback for userptrs
Otherwise we can run into problems with the writeback code.
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 98 |
1 files changed, 72 insertions, 26 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index c47f22224a65..9f4a45cd2aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
| @@ -106,6 +106,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, | |||
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | /** | 108 | /** |
| 109 | * amdgpu_mn_invalidate_node - unmap all BOs of a node | ||
| 110 | * | ||
| 111 | * @node: the node with the BOs to unmap | ||
| 112 | * | ||
| 113 | * We block for all BOs and unmap them by move them | ||
| 114 | * into system domain again. | ||
| 115 | */ | ||
| 116 | static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, | ||
| 117 | unsigned long start, | ||
| 118 | unsigned long end) | ||
| 119 | { | ||
| 120 | struct amdgpu_bo *bo; | ||
| 121 | long r; | ||
| 122 | |||
| 123 | list_for_each_entry(bo, &node->bos, mn_list) { | ||
| 124 | |||
| 125 | if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) | ||
| 126 | continue; | ||
| 127 | |||
| 128 | r = amdgpu_bo_reserve(bo, true); | ||
| 129 | if (r) { | ||
| 130 | DRM_ERROR("(%ld) failed to reserve user bo\n", r); | ||
| 131 | continue; | ||
| 132 | } | ||
| 133 | |||
| 134 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, | ||
| 135 | true, false, MAX_SCHEDULE_TIMEOUT); | ||
| 136 | if (r <= 0) | ||
| 137 | DRM_ERROR("(%ld) failed to wait for user bo\n", r); | ||
| 138 | |||
| 139 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); | ||
| 140 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
| 141 | if (r) | ||
| 142 | DRM_ERROR("(%ld) failed to validate user bo\n", r); | ||
| 143 | |||
| 144 | amdgpu_bo_unreserve(bo); | ||
| 145 | } | ||
| 146 | } | ||
| 147 | |||
| 148 | /** | ||
| 149 | * amdgpu_mn_invalidate_page - callback to notify about mm change | ||
| 150 | * | ||
| 151 | * @mn: our notifier | ||
| 152 | * @mn: the mm this callback is about | ||
| 153 | * @address: address of invalidate page | ||
| 154 | * | ||
| 155 | * Invalidation of a single page. Blocks for all BOs mapping it | ||
| 156 | * and unmap them by move them into system domain again. | ||
| 157 | */ | ||
| 158 | static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, | ||
| 159 | struct mm_struct *mm, | ||
| 160 | unsigned long address) | ||
| 161 | { | ||
| 162 | struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); | ||
| 163 | struct interval_tree_node *it; | ||
| 164 | |||
| 165 | mutex_lock(&rmn->lock); | ||
| 166 | |||
| 167 | it = interval_tree_iter_first(&rmn->objects, address, address); | ||
| 168 | if (it) { | ||
| 169 | struct amdgpu_mn_node *node; | ||
| 170 | |||
| 171 | node = container_of(it, struct amdgpu_mn_node, it); | ||
| 172 | amdgpu_mn_invalidate_node(node, address, address); | ||
| 173 | } | ||
| 174 | |||
| 175 | mutex_unlock(&rmn->lock); | ||
| 176 | } | ||
| 177 | |||
| 178 | /** | ||
| 109 | * amdgpu_mn_invalidate_range_start - callback to notify about mm change | 179 | * amdgpu_mn_invalidate_range_start - callback to notify about mm change |
| 110 | * | 180 | * |
| 111 | * @mn: our notifier | 181 | * @mn: our notifier |
| @@ -132,36 +202,11 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
| 132 | it = interval_tree_iter_first(&rmn->objects, start, end); | 202 | it = interval_tree_iter_first(&rmn->objects, start, end); |
| 133 | while (it) { | 203 | while (it) { |
| 134 | struct amdgpu_mn_node *node; | 204 | struct amdgpu_mn_node *node; |
| 135 | struct amdgpu_bo *bo; | ||
| 136 | long r; | ||
| 137 | 205 | ||
| 138 | node = container_of(it, struct amdgpu_mn_node, it); | 206 | node = container_of(it, struct amdgpu_mn_node, it); |
| 139 | it = interval_tree_iter_next(it, start, end); | 207 | it = interval_tree_iter_next(it, start, end); |
| 140 | 208 | ||
| 141 | list_for_each_entry(bo, &node->bos, mn_list) { | 209 | amdgpu_mn_invalidate_node(node, start, end); |
| 142 | |||
| 143 | if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, | ||
| 144 | end)) | ||
| 145 | continue; | ||
| 146 | |||
| 147 | r = amdgpu_bo_reserve(bo, true); | ||
| 148 | if (r) { | ||
| 149 | DRM_ERROR("(%ld) failed to reserve user bo\n", r); | ||
| 150 | continue; | ||
| 151 | } | ||
| 152 | |||
| 153 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, | ||
| 154 | true, false, MAX_SCHEDULE_TIMEOUT); | ||
| 155 | if (r <= 0) | ||
| 156 | DRM_ERROR("(%ld) failed to wait for user bo\n", r); | ||
| 157 | |||
| 158 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); | ||
| 159 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
| 160 | if (r) | ||
| 161 | DRM_ERROR("(%ld) failed to validate user bo\n", r); | ||
| 162 | |||
| 163 | amdgpu_bo_unreserve(bo); | ||
| 164 | } | ||
| 165 | } | 210 | } |
| 166 | 211 | ||
| 167 | mutex_unlock(&rmn->lock); | 212 | mutex_unlock(&rmn->lock); |
| @@ -169,6 +214,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
| 169 | 214 | ||
| 170 | static const struct mmu_notifier_ops amdgpu_mn_ops = { | 215 | static const struct mmu_notifier_ops amdgpu_mn_ops = { |
| 171 | .release = amdgpu_mn_release, | 216 | .release = amdgpu_mn_release, |
| 217 | .invalidate_page = amdgpu_mn_invalidate_page, | ||
| 172 | .invalidate_range_start = amdgpu_mn_invalidate_range_start, | 218 | .invalidate_range_start = amdgpu_mn_invalidate_range_start, |
| 173 | }; | 219 | }; |
| 174 | 220 | ||
