aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
diff options
context:
space:
mode:
authorFelix Kuehling <Felix.Kuehling@amd.com>2018-03-23 15:32:30 -0400
committerOded Gabbay <oded.gabbay@gmail.com>2018-03-23 15:32:30 -0400
commit6e08e0995b8f339fd2a7ee4fa11f17396405ef60 (patch)
tree629fb974b4420f93208988205bd455966e342341 /drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
parent0919195f2b0d7437cb0de49b8975fdd7b5575490 (diff)
drm/amdgpu: Avoid reclaim while holding locks taken in MMU notifier
When an MMU notifier runs in memory reclaim context, it can deadlock trying to take locks that are already held in the thread causing the memory reclaim. The solution is to avoid memory reclaim while holding locks that are taken in MMU notifiers. This commit fixes kmalloc while holding rmn->lock by moving the call outside the lock. The GFX MMU notifier also locks reservation objects. I have no good solution for avoiding reclaim while holding reservation objects. The HSA MMU notifier will not lock any reservation objects. v2: Moved allocation outside lock instead of using GFP_NOIO Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Acked-by: Oded Gabbay <oded.gabbay@gmail.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index f2ed18e2ff03..83e344fbb50a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -380,7 +380,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
380 enum amdgpu_mn_type type = 380 enum amdgpu_mn_type type =
381 bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX; 381 bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX;
382 struct amdgpu_mn *rmn; 382 struct amdgpu_mn *rmn;
383 struct amdgpu_mn_node *node = NULL; 383 struct amdgpu_mn_node *node = NULL, *new_node;
384 struct list_head bos; 384 struct list_head bos;
385 struct interval_tree_node *it; 385 struct interval_tree_node *it;
386 386
@@ -388,6 +388,10 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
388 if (IS_ERR(rmn)) 388 if (IS_ERR(rmn))
389 return PTR_ERR(rmn); 389 return PTR_ERR(rmn);
390 390
391 new_node = kmalloc(sizeof(*new_node), GFP_KERNEL);
392 if (!new_node)
393 return -ENOMEM;
394
391 INIT_LIST_HEAD(&bos); 395 INIT_LIST_HEAD(&bos);
392 396
393 down_write(&rmn->lock); 397 down_write(&rmn->lock);
@@ -401,13 +405,10 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
401 list_splice(&node->bos, &bos); 405 list_splice(&node->bos, &bos);
402 } 406 }
403 407
404 if (!node) { 408 if (!node)
405 node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); 409 node = new_node;
406 if (!node) { 410 else
407 up_write(&rmn->lock); 411 kfree(new_node);
408 return -ENOMEM;
409 }
410 }
411 412
412 bo->mn = rmn; 413 bo->mn = rmn;
413 414