diff options
author | Christian König <christian.koenig@amd.com> | 2016-02-09 10:13:37 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-02-12 15:32:07 -0500 |
commit | c41d271d751ea023f67f1ea755e144b49079b6d5 (patch) | |
tree | 45944f26b707a6dff1b28dfdc31ccc10775aa923 | |
parent | e7813d0cd8b6183fcd967843ab1f53e43e05423c (diff) |
drm/amdgpu: remove the userptr rmn->lock
Avoid a lock inversion problem by just using the mmap_sem to
protect the entries of the intervall tree.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 32 |
1 files changed, 12 insertions, 20 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index d4e2780c0796..61f0e3c3172a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |||
@@ -48,8 +48,7 @@ struct amdgpu_mn { | |||
48 | /* protected by adev->mn_lock */ | 48 | /* protected by adev->mn_lock */ |
49 | struct hlist_node node; | 49 | struct hlist_node node; |
50 | 50 | ||
51 | /* objects protected by lock */ | 51 | /* objects protected by mm->mmap_sem */ |
52 | struct mutex lock; | ||
53 | struct rb_root objects; | 52 | struct rb_root objects; |
54 | }; | 53 | }; |
55 | 54 | ||
@@ -72,8 +71,8 @@ static void amdgpu_mn_destroy(struct work_struct *work) | |||
72 | struct amdgpu_mn_node *node, *next_node; | 71 | struct amdgpu_mn_node *node, *next_node; |
73 | struct amdgpu_bo *bo, *next_bo; | 72 | struct amdgpu_bo *bo, *next_bo; |
74 | 73 | ||
74 | down_write(&rmn->mm->mmap_sem); | ||
75 | mutex_lock(&adev->mn_lock); | 75 | mutex_lock(&adev->mn_lock); |
76 | mutex_lock(&rmn->lock); | ||
77 | hash_del(&rmn->node); | 76 | hash_del(&rmn->node); |
78 | rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, | 77 | rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, |
79 | it.rb) { | 78 | it.rb) { |
@@ -85,8 +84,8 @@ static void amdgpu_mn_destroy(struct work_struct *work) | |||
85 | } | 84 | } |
86 | kfree(node); | 85 | kfree(node); |
87 | } | 86 | } |
88 | mutex_unlock(&rmn->lock); | ||
89 | mutex_unlock(&adev->mn_lock); | 87 | mutex_unlock(&adev->mn_lock); |
88 | up_write(&rmn->mm->mmap_sem); | ||
90 | mmu_notifier_unregister(&rmn->mn, rmn->mm); | 89 | mmu_notifier_unregister(&rmn->mn, rmn->mm); |
91 | kfree(rmn); | 90 | kfree(rmn); |
92 | } | 91 | } |
@@ -129,8 +128,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
129 | /* notification is exclusive, but interval is inclusive */ | 128 | /* notification is exclusive, but interval is inclusive */ |
130 | end -= 1; | 129 | end -= 1; |
131 | 130 | ||
132 | mutex_lock(&rmn->lock); | ||
133 | |||
134 | it = interval_tree_iter_first(&rmn->objects, start, end); | 131 | it = interval_tree_iter_first(&rmn->objects, start, end); |
135 | while (it) { | 132 | while (it) { |
136 | struct amdgpu_mn_node *node; | 133 | struct amdgpu_mn_node *node; |
@@ -165,8 +162,6 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
165 | amdgpu_bo_unreserve(bo); | 162 | amdgpu_bo_unreserve(bo); |
166 | } | 163 | } |
167 | } | 164 | } |
168 | |||
169 | mutex_unlock(&rmn->lock); | ||
170 | } | 165 | } |
171 | 166 | ||
172 | static const struct mmu_notifier_ops amdgpu_mn_ops = { | 167 | static const struct mmu_notifier_ops amdgpu_mn_ops = { |
@@ -203,7 +198,6 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) | |||
203 | rmn->adev = adev; | 198 | rmn->adev = adev; |
204 | rmn->mm = mm; | 199 | rmn->mm = mm; |
205 | rmn->mn.ops = &amdgpu_mn_ops; | 200 | rmn->mn.ops = &amdgpu_mn_ops; |
206 | mutex_init(&rmn->lock); | ||
207 | rmn->objects = RB_ROOT; | 201 | rmn->objects = RB_ROOT; |
208 | 202 | ||
209 | r = __mmu_notifier_register(&rmn->mn, mm); | 203 | r = __mmu_notifier_register(&rmn->mn, mm); |
@@ -250,7 +244,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | |||
250 | 244 | ||
251 | INIT_LIST_HEAD(&bos); | 245 | INIT_LIST_HEAD(&bos); |
252 | 246 | ||
253 | mutex_lock(&rmn->lock); | 247 | down_write(&rmn->mm->mmap_sem); |
254 | 248 | ||
255 | while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { | 249 | while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { |
256 | kfree(node); | 250 | kfree(node); |
@@ -264,7 +258,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | |||
264 | if (!node) { | 258 | if (!node) { |
265 | node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); | 259 | node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); |
266 | if (!node) { | 260 | if (!node) { |
267 | mutex_unlock(&rmn->lock); | 261 | up_write(&rmn->mm->mmap_sem); |
268 | return -ENOMEM; | 262 | return -ENOMEM; |
269 | } | 263 | } |
270 | } | 264 | } |
@@ -279,7 +273,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | |||
279 | 273 | ||
280 | interval_tree_insert(&node->it, &rmn->objects); | 274 | interval_tree_insert(&node->it, &rmn->objects); |
281 | 275 | ||
282 | mutex_unlock(&rmn->lock); | 276 | up_write(&rmn->mm->mmap_sem); |
283 | 277 | ||
284 | return 0; | 278 | return 0; |
285 | } | 279 | } |
@@ -294,17 +288,15 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | |||
294 | void amdgpu_mn_unregister(struct amdgpu_bo *bo) | 288 | void amdgpu_mn_unregister(struct amdgpu_bo *bo) |
295 | { | 289 | { |
296 | struct amdgpu_device *adev = bo->adev; | 290 | struct amdgpu_device *adev = bo->adev; |
297 | struct amdgpu_mn *rmn; | 291 | struct amdgpu_mn *rmn = bo->mn; |
298 | struct list_head *head; | 292 | struct list_head *head; |
299 | 293 | ||
300 | mutex_lock(&adev->mn_lock); | 294 | if (rmn == NULL) |
301 | rmn = bo->mn; | ||
302 | if (rmn == NULL) { | ||
303 | mutex_unlock(&adev->mn_lock); | ||
304 | return; | 295 | return; |
305 | } | ||
306 | 296 | ||
307 | mutex_lock(&rmn->lock); | 297 | down_write(&rmn->mm->mmap_sem); |
298 | mutex_lock(&adev->mn_lock); | ||
299 | |||
308 | /* save the next list entry for later */ | 300 | /* save the next list entry for later */ |
309 | head = bo->mn_list.next; | 301 | head = bo->mn_list.next; |
310 | 302 | ||
@@ -318,6 +310,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) | |||
318 | kfree(node); | 310 | kfree(node); |
319 | } | 311 | } |
320 | 312 | ||
321 | mutex_unlock(&rmn->lock); | ||
322 | mutex_unlock(&adev->mn_lock); | 313 | mutex_unlock(&adev->mn_lock); |
314 | up_write(&rmn->mm->mmap_sem); | ||
323 | } | 315 | } |