aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-09-12 14:25:14 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-09-12 14:26:37 -0400
commit3fe89771cb0a65d3b686bcafb5b7e3ebae0ea604 (patch)
treea952951fd0b2b4334eb9616a283b3ce39d26fd02 /drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
parent60de1c1740f390fe48141b54d04cc53a6073d347 (diff)
drm/amdgpu: stop reserving the BO in the MMU callback v3
Instead take the callback lock during the final parts of CS. This should solve the last remaining locking order problems with BO reservations. v2: rebase, make dummy functions static inline v3: add one more missing inline and comments Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c30
1 files changed, 21 insertions, 9 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 6d216abd0e1d..99edb40b5f99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -106,6 +106,25 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
106 schedule_work(&rmn->work); 106 schedule_work(&rmn->work);
107} 107}
108 108
109
110/**
111 * amdgpu_mn_lock - take the write side lock for this mn
112 */
113void amdgpu_mn_lock(struct amdgpu_mn *mn)
114{
115 if (mn)
116 down_write(&mn->lock);
117}
118
119/**
120 * amdgpu_mn_unlock - drop the write side lock for this mn
121 */
122void amdgpu_mn_unlock(struct amdgpu_mn *mn)
123{
124 if (mn)
125 up_write(&mn->lock);
126}
127
109/** 128/**
110 * amdgpu_mn_invalidate_node - unmap all BOs of a node 129 * amdgpu_mn_invalidate_node - unmap all BOs of a node
111 * 130 *
@@ -126,20 +145,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
126 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) 145 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
127 continue; 146 continue;
128 147
129 r = amdgpu_bo_reserve(bo, true);
130 if (r) {
131 DRM_ERROR("(%ld) failed to reserve user bo\n", r);
132 continue;
133 }
134
135 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, 148 r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
136 true, false, MAX_SCHEDULE_TIMEOUT); 149 true, false, MAX_SCHEDULE_TIMEOUT);
137 if (r <= 0) 150 if (r <= 0)
138 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 151 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
139 152
140 amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm); 153 amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
141
142 amdgpu_bo_unreserve(bo);
143 } 154 }
144} 155}
145 156
@@ -223,7 +234,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = {
223 * 234 *
224 * Creates a notifier context for current->mm. 235 * Creates a notifier context for current->mm.
225 */ 236 */
226static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) 237struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
227{ 238{
228 struct mm_struct *mm = current->mm; 239 struct mm_struct *mm = current->mm;
229 struct amdgpu_mn *rmn; 240 struct amdgpu_mn *rmn;
@@ -368,3 +379,4 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
368 up_write(&rmn->lock); 379 up_write(&rmn->lock);
369 mutex_unlock(&adev->mn_lock); 380 mutex_unlock(&adev->mn_lock);
370} 381}
382