aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-09-05 08:50:24 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-09-12 14:24:27 -0400
commit60de1c1740f390fe48141b54d04cc53a6073d347 (patch)
tree23f66c1342080f6dec12481792f02100db6a2e0e /drivers/gpu/drm/amd/amdgpu
parent1b0c0f9dc5ca6c0c8be21eeac92c7aa77bbf1d33 (diff)
drm/amdgpu: use a rw_semaphore for MMU notifiers
Allow at least some parallel processing. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index df85a1314799..6d216abd0e1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -50,7 +50,7 @@ struct amdgpu_mn {
50 struct hlist_node node; 50 struct hlist_node node;
51 51
52 /* objects protected by lock */ 52 /* objects protected by lock */
53 struct mutex lock; 53 struct rw_semaphore lock;
54 struct rb_root objects; 54 struct rb_root objects;
55}; 55};
56 56
@@ -74,7 +74,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
74 struct amdgpu_bo *bo, *next_bo; 74 struct amdgpu_bo *bo, *next_bo;
75 75
76 mutex_lock(&adev->mn_lock); 76 mutex_lock(&adev->mn_lock);
77 mutex_lock(&rmn->lock); 77 down_write(&rmn->lock);
78 hash_del(&rmn->node); 78 hash_del(&rmn->node);
79 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, 79 rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
80 it.rb) { 80 it.rb) {
@@ -84,7 +84,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
84 } 84 }
85 kfree(node); 85 kfree(node);
86 } 86 }
87 mutex_unlock(&rmn->lock); 87 up_write(&rmn->lock);
88 mutex_unlock(&adev->mn_lock); 88 mutex_unlock(&adev->mn_lock);
89 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); 89 mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
90 kfree(rmn); 90 kfree(rmn);
@@ -160,7 +160,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
160 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); 160 struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
161 struct interval_tree_node *it; 161 struct interval_tree_node *it;
162 162
163 mutex_lock(&rmn->lock); 163 down_read(&rmn->lock);
164 164
165 it = interval_tree_iter_first(&rmn->objects, address, address); 165 it = interval_tree_iter_first(&rmn->objects, address, address);
166 if (it) { 166 if (it) {
@@ -170,7 +170,7 @@ static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
170 amdgpu_mn_invalidate_node(node, address, address); 170 amdgpu_mn_invalidate_node(node, address, address);
171 } 171 }
172 172
173 mutex_unlock(&rmn->lock); 173 up_read(&rmn->lock);
174} 174}
175 175
176/** 176/**
@@ -195,7 +195,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
195 /* notification is exclusive, but interval is inclusive */ 195 /* notification is exclusive, but interval is inclusive */
196 end -= 1; 196 end -= 1;
197 197
198 mutex_lock(&rmn->lock); 198 down_read(&rmn->lock);
199 199
200 it = interval_tree_iter_first(&rmn->objects, start, end); 200 it = interval_tree_iter_first(&rmn->objects, start, end);
201 while (it) { 201 while (it) {
@@ -207,7 +207,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
207 amdgpu_mn_invalidate_node(node, start, end); 207 amdgpu_mn_invalidate_node(node, start, end);
208 } 208 }
209 209
210 mutex_unlock(&rmn->lock); 210 up_read(&rmn->lock);
211} 211}
212 212
213static const struct mmu_notifier_ops amdgpu_mn_ops = { 213static const struct mmu_notifier_ops amdgpu_mn_ops = {
@@ -248,7 +248,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
248 rmn->adev = adev; 248 rmn->adev = adev;
249 rmn->mm = mm; 249 rmn->mm = mm;
250 rmn->mn.ops = &amdgpu_mn_ops; 250 rmn->mn.ops = &amdgpu_mn_ops;
251 mutex_init(&rmn->lock); 251 init_rwsem(&rmn->lock);
252 rmn->objects = RB_ROOT; 252 rmn->objects = RB_ROOT;
253 253
254 r = __mmu_notifier_register(&rmn->mn, mm); 254 r = __mmu_notifier_register(&rmn->mn, mm);
@@ -295,7 +295,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
295 295
296 INIT_LIST_HEAD(&bos); 296 INIT_LIST_HEAD(&bos);
297 297
298 mutex_lock(&rmn->lock); 298 down_write(&rmn->lock);
299 299
300 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { 300 while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
301 kfree(node); 301 kfree(node);
@@ -309,7 +309,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
309 if (!node) { 309 if (!node) {
310 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); 310 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
311 if (!node) { 311 if (!node) {
312 mutex_unlock(&rmn->lock); 312 up_write(&rmn->lock);
313 return -ENOMEM; 313 return -ENOMEM;
314 } 314 }
315 } 315 }
@@ -324,7 +324,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
324 324
325 interval_tree_insert(&node->it, &rmn->objects); 325 interval_tree_insert(&node->it, &rmn->objects);
326 326
327 mutex_unlock(&rmn->lock); 327 up_write(&rmn->lock);
328 328
329 return 0; 329 return 0;
330} 330}
@@ -350,7 +350,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
350 return; 350 return;
351 } 351 }
352 352
353 mutex_lock(&rmn->lock); 353 down_write(&rmn->lock);
354 354
355 /* save the next list entry for later */ 355 /* save the next list entry for later */
356 head = bo->mn_list.next; 356 head = bo->mn_list.next;
@@ -365,6 +365,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
365 kfree(node); 365 kfree(node);
366 } 366 }
367 367
368 mutex_unlock(&rmn->lock); 368 up_write(&rmn->lock);
369 mutex_unlock(&adev->mn_lock); 369 mutex_unlock(&adev->mn_lock);
370} 370}