aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelix Kuehling <Felix.Kuehling@amd.com>2016-02-16 15:29:23 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-02-17 16:10:42 -0500
commitb8ea3783467586246d0b2b08f2e3f42853219d94 (patch)
tree4eb5c6f1a9bc517209f2e8bd3429df6f5503a922
parent418aa0c296ddb5df90c4e94f995cfd3f3c9e96dc (diff)
drm/amdgpu: Fix race condition in amdgpu_mn_unregister
Exchange locking order of adev->mn_lock and mm_sem, so that rmn->mm->mmap_sem can be taken safely, protected by adev->mn_lock, when amdgpu_mn_destroy runs concurrently. Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 1b2105ca5af1..d12dff92f3ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -71,12 +71,11 @@ static void amdgpu_mn_destroy(struct work_struct *work)
71 struct amdgpu_mn_node *node, *next_node; 71 struct amdgpu_mn_node *node, *next_node;
72 struct amdgpu_bo *bo, *next_bo; 72 struct amdgpu_bo *bo, *next_bo;
73 73
74 down_write(&rmn->mm->mmap_sem);
75 mutex_lock(&adev->mn_lock); 74 mutex_lock(&adev->mn_lock);
75 down_write(&rmn->mm->mmap_sem);
76 hash_del(&rmn->node); 76 hash_del(&rmn->node);
77 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, 77 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
78 it.rb) { 78 it.rb) {
79
80 interval_tree_remove(&node->it, &rmn->objects); 79 interval_tree_remove(&node->it, &rmn->objects);
81 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) { 80 list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
82 bo->mn = NULL; 81 bo->mn = NULL;
@@ -84,8 +83,8 @@ static void amdgpu_mn_destroy(struct work_struct *work)
84 } 83 }
85 kfree(node); 84 kfree(node);
86 } 85 }
87 mutex_unlock(&adev->mn_lock);
88 up_write(&rmn->mm->mmap_sem); 86 up_write(&rmn->mm->mmap_sem);
87 mutex_unlock(&adev->mn_lock);
89 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); 88 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
90 kfree(rmn); 89 kfree(rmn);
91} 90}
@@ -182,8 +181,8 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
182 struct amdgpu_mn *rmn; 181 struct amdgpu_mn *rmn;
183 int r; 182 int r;
184 183
185 down_write(&mm->mmap_sem);
186 mutex_lock(&adev->mn_lock); 184 mutex_lock(&adev->mn_lock);
185 down_write(&mm->mmap_sem);
187 186
188 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm) 187 hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
189 if (rmn->mm == mm) 188 if (rmn->mm == mm)
@@ -207,14 +206,14 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
207 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm); 206 hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
208 207
209release_locks: 208release_locks:
210 mutex_unlock(&adev->mn_lock);
211 up_write(&mm->mmap_sem); 209 up_write(&mm->mmap_sem);
210 mutex_unlock(&adev->mn_lock);
212 211
213 return rmn; 212 return rmn;
214 213
215free_rmn: 214free_rmn:
216 mutex_unlock(&adev->mn_lock);
217 up_write(&mm->mmap_sem); 215 up_write(&mm->mmap_sem);
216 mutex_unlock(&adev->mn_lock);
218 kfree(rmn); 217 kfree(rmn);
219 218
220 return ERR_PTR(r); 219 return ERR_PTR(r);
@@ -288,14 +287,18 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
288void amdgpu_mn_unregister(struct amdgpu_bo *bo) 287void amdgpu_mn_unregister(struct amdgpu_bo *bo)
289{ 288{
290 struct amdgpu_device *adev = bo->adev; 289 struct amdgpu_device *adev = bo->adev;
291 struct amdgpu_mn *rmn = bo->mn; 290 struct amdgpu_mn *rmn;
292 struct list_head *head; 291 struct list_head *head;
293 292
294 if (rmn == NULL) 293 mutex_lock(&adev->mn_lock);
294
295 rmn = bo->mn;
296 if (rmn == NULL) {
297 mutex_unlock(&adev->mn_lock);
295 return; 298 return;
299 }
296 300
297 down_write(&rmn->mm->mmap_sem); 301 down_write(&rmn->mm->mmap_sem);
298 mutex_lock(&adev->mn_lock);
299 302
300 /* save the next list entry for later */ 303 /* save the next list entry for later */
301 head = bo->mn_list.next; 304 head = bo->mn_list.next;
@@ -310,6 +313,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
310 kfree(node); 313 kfree(node);
311 } 314 }
312 315
313 mutex_unlock(&adev->mn_lock);
314 up_write(&rmn->mm->mmap_sem); 316 up_write(&rmn->mm->mmap_sem);
317 mutex_unlock(&adev->mn_lock);
315} 318}