aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-03-18 14:29:51 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-03-21 11:52:14 -0400
commit0d2b42b0bdba45c82d29d794ea30a4c90e3f4098 (patch)
tree6e124aaeb64bc04cd8300ecea89aecd4ac96c376
parent0ccbf11988d30c826810884fb9d2743e3923a464 (diff)
drm/amdgpu: Revert "remove the userptr rmn->lock"
This reverts commit c02196834456f2d5fad334088b70e98ce4967c34. In the meantime we moved get_user_pages() outside of the reservation lock, so that shouldn't be an issue any more Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index d7ec9bd6755f..c47f22224a65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -48,7 +48,8 @@ struct amdgpu_mn {
48 /* protected by adev->mn_lock */ 48 /* protected by adev->mn_lock */
49 struct hlist_node node; 49 struct hlist_node node;
50 50
51 /* objects protected by mm->mmap_sem */ 51 /* objects protected by lock */
52 struct mutex lock;
52 struct rb_root objects; 53 struct rb_root objects;
53}; 54};
54 55
@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
72 struct amdgpu_bo *bo, *next_bo; 73 struct amdgpu_bo *bo, *next_bo;
73 74
74 mutex_lock(&adev->mn_lock); 75 mutex_lock(&adev->mn_lock);
75 down_write(&rmn->mm->mmap_sem); 76 mutex_lock(&rmn->lock);
76 hash_del(&rmn->node); 77 hash_del(&rmn->node);
77 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, 78 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
78 it.rb) { 79 it.rb) {
@@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
82 } 83 }
83 kfree(node); 84 kfree(node);
84 } 85 }
85 up_write(&rmn->mm->mmap_sem); 86 mutex_unlock(&rmn->lock);
86 mutex_unlock(&adev->mn_lock); 87 mutex_unlock(&adev->mn_lock);
87 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); 88 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
88 kfree(rmn); 89 kfree(rmn);
@@ -126,6 +127,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
126 /* notification is exclusive, but interval is inclusive */ 127 /* notification is exclusive, but interval is inclusive */
127 end -= 1; 128 end -= 1;
128 129
130 mutex_lock(&rmn->lock);
131
129 it = interval_tree_iter_first(&rmn->objects, start, end); 132 it = interval_tree_iter_first(&rmn->objects, start, end);
130 while (it) { 133 while (it) {
131 struct amdgpu_mn_node *node; 134 struct amdgpu_mn_node *node;
@@ -160,6 +163,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
160 amdgpu_bo_unreserve(bo); 163 amdgpu_bo_unreserve(bo);
161 } 164 }
162 } 165 }
166
167 mutex_unlock(&rmn->lock);
163} 168}
164 169
165static const struct mmu_notifier_ops amdgpu_mn_ops = { 170static const struct mmu_notifier_ops amdgpu_mn_ops = {
@@ -196,6 +201,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
196 rmn->adev = adev; 201 rmn->adev = adev;
197 rmn->mm = mm; 202 rmn->mm = mm;
198 rmn->mn.ops = &amdgpu_mn_ops; 203 rmn->mn.ops = &amdgpu_mn_ops;
204 mutex_init(&rmn->lock);
199 rmn->objects = RB_ROOT; 205 rmn->objects = RB_ROOT;
200 206
201 r = __mmu_notifier_register(&rmn->mn, mm); 207 r = __mmu_notifier_register(&rmn->mn, mm);
@@ -242,7 +248,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
242 248
243 INIT_LIST_HEAD(&bos); 249 INIT_LIST_HEAD(&bos);
244 250
245 down_write(&rmn->mm->mmap_sem); 251 mutex_lock(&rmn->lock);
246 252
247 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { 253 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
248 kfree(node); 254 kfree(node);
@@ -256,7 +262,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
256 if (!node) { 262 if (!node) {
257 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); 263 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
258 if (!node) { 264 if (!node) {
259 up_write(&rmn->mm->mmap_sem); 265 mutex_unlock(&rmn->lock);
260 return -ENOMEM; 266 return -ENOMEM;
261 } 267 }
262 } 268 }
@@ -271,7 +277,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
271 277
272 interval_tree_insert(&node->it, &rmn->objects); 278 interval_tree_insert(&node->it, &rmn->objects);
273 279
274 up_write(&rmn->mm->mmap_sem); 280 mutex_unlock(&rmn->lock);
275 281
276 return 0; 282 return 0;
277} 283}
@@ -297,7 +303,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
297 return; 303 return;
298 } 304 }
299 305
300 down_write(&rmn->mm->mmap_sem); 306 mutex_lock(&rmn->lock);
301 307
302 /* save the next list entry for later */ 308 /* save the next list entry for later */
303 head = bo->mn_list.next; 309 head = bo->mn_list.next;
@@ -312,6 +318,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
312 kfree(node); 318 kfree(node);
313 } 319 }
314 320
315 up_write(&rmn->mm->mmap_sem); 321 mutex_unlock(&rmn->lock);
316 mutex_unlock(&adev->mn_lock); 322 mutex_unlock(&adev->mn_lock);
317} 323}