aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2016-10-28 11:04:07 -0400
committerAlex Deucher <alexander.deucher@amd.com>2016-10-31 12:43:20 -0400
commitc24784f01549ecdf23fc00d0588423bcf8956714 (patch)
tree14de2bb1c8f9ecd2d38c230c286c13e59d4dcbad
parent91efdb2718e0c5ff014f0cf98cac99f088a9a4d2 (diff)
drm/amd: fix scheduler fence teardown order v2
Some fences might be alive even after we have stopped the scheduler leading to warnings about leaked objects from the SLUB allocator. Fix this by allocating/freeing the SLUB allocator from the module init/fini functions just like we do it for hw fences. v2: make variable static, add link to bug Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=97500 Reported-by: Grazvydas Ignotas <notasas@gmail.com> Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1) Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Cc: stable@vger.kernel.org
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c13
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h6
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c19
4 files changed, 24 insertions, 16 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 71ed27eb3dde..73f2415630f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -737,6 +737,7 @@ static int __init amdgpu_init(void)
737{ 737{
738 amdgpu_sync_init(); 738 amdgpu_sync_init();
739 amdgpu_fence_slab_init(); 739 amdgpu_fence_slab_init();
740 amd_sched_fence_slab_init();
740 if (vgacon_text_force()) { 741 if (vgacon_text_force()) {
741 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); 742 DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
742 return -EINVAL; 743 return -EINVAL;
@@ -756,6 +757,7 @@ static void __exit amdgpu_exit(void)
756 drm_pci_exit(driver, pdriver); 757 drm_pci_exit(driver, pdriver);
757 amdgpu_unregister_atpx_handler(); 758 amdgpu_unregister_atpx_handler();
758 amdgpu_sync_fini(); 759 amdgpu_sync_fini();
760 amd_sched_fence_slab_fini();
759 amdgpu_fence_slab_fini(); 761 amdgpu_fence_slab_fini();
760} 762}
761 763
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 910b8d5b21c5..ffe1f85ce300 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); 35static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
36 36
37struct kmem_cache *sched_fence_slab;
38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39
40/* Initialize a given run queue struct */ 37/* Initialize a given run queue struct */
41static void amd_sched_rq_init(struct amd_sched_rq *rq) 38static void amd_sched_rq_init(struct amd_sched_rq *rq)
42{ 39{
@@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
618 INIT_LIST_HEAD(&sched->ring_mirror_list); 615 INIT_LIST_HEAD(&sched->ring_mirror_list);
619 spin_lock_init(&sched->job_list_lock); 616 spin_lock_init(&sched->job_list_lock);
620 atomic_set(&sched->hw_rq_count, 0); 617 atomic_set(&sched->hw_rq_count, 0);
621 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
622 sched_fence_slab = kmem_cache_create(
623 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
624 SLAB_HWCACHE_ALIGN, NULL);
625 if (!sched_fence_slab)
626 return -ENOMEM;
627 }
628 618
629 /* Each scheduler will run on a seperate kernel thread */ 619 /* Each scheduler will run on a seperate kernel thread */
630 sched->thread = kthread_run(amd_sched_main, sched, sched->name); 620 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -645,7 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
645{ 635{
646 if (sched->thread) 636 if (sched->thread)
647 kthread_stop(sched->thread); 637 kthread_stop(sched->thread);
648 rcu_barrier();
649 if (atomic_dec_and_test(&sched_fence_slab_ref))
650 kmem_cache_destroy(sched_fence_slab);
651} 638}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 7cbbbfb502ef..51068e6c3d9a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -30,9 +30,6 @@
30struct amd_gpu_scheduler; 30struct amd_gpu_scheduler;
31struct amd_sched_rq; 31struct amd_sched_rq;
32 32
33extern struct kmem_cache *sched_fence_slab;
34extern atomic_t sched_fence_slab_ref;
35
36/** 33/**
37 * A scheduler entity is a wrapper around a job queue or a group 34 * A scheduler entity is a wrapper around a job queue or a group
38 * of other entities. Entities take turns emitting jobs from their 35 * of other entities. Entities take turns emitting jobs from their
@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
145 struct amd_sched_entity *entity); 142 struct amd_sched_entity *entity);
146void amd_sched_entity_push_job(struct amd_sched_job *sched_job); 143void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
147 144
145int amd_sched_fence_slab_init(void);
146void amd_sched_fence_slab_fini(void);
147
148struct amd_sched_fence *amd_sched_fence_create( 148struct amd_sched_fence *amd_sched_fence_create(
149 struct amd_sched_entity *s_entity, void *owner); 149 struct amd_sched_entity *s_entity, void *owner);
150void amd_sched_fence_scheduled(struct amd_sched_fence *fence); 150void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 3653b5a40494..88fc2d662579 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,6 +27,25 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "gpu_scheduler.h" 28#include "gpu_scheduler.h"
29 29
30static struct kmem_cache *sched_fence_slab;
31
32int amd_sched_fence_slab_init(void)
33{
34 sched_fence_slab = kmem_cache_create(
35 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
36 SLAB_HWCACHE_ALIGN, NULL);
37 if (!sched_fence_slab)
38 return -ENOMEM;
39
40 return 0;
41}
42
43void amd_sched_fence_slab_fini(void)
44{
45 rcu_barrier();
46 kmem_cache_destroy(sched_fence_slab);
47}
48
30struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, 49struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
31 void *owner) 50 void *owner)
32{ 51{