diff options
author | Christian König <christian.koenig@amd.com> | 2015-08-04 10:58:36 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-08-17 16:51:01 -0400 |
commit | 0e89d0c16b9446a094215e71734e583c438bf83d (patch) | |
tree | 914cd8c6aa9766789e041b0b04c04dc5e285944c /drivers/gpu | |
parent | efd4ccb59a4acb8b85835d6b053362dbacee40f9 (diff) |
drm/amdgpu: stop leaking the ctx id into the scheduler v2
Id's are for the IOCTL ABI only.
v2: remove tgid as well
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | 6 |
3 files changed, 7 insertions, 20 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 3c353375b228..c2290ae20312 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | |||
@@ -50,8 +50,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) | |||
50 | 50 | ||
51 | static void amdgpu_ctx_init(struct amdgpu_device *adev, | 51 | static void amdgpu_ctx_init(struct amdgpu_device *adev, |
52 | struct amdgpu_fpriv *fpriv, | 52 | struct amdgpu_fpriv *fpriv, |
53 | struct amdgpu_ctx *ctx, | 53 | struct amdgpu_ctx *ctx) |
54 | uint32_t id) | ||
55 | { | 54 | { |
56 | int i; | 55 | int i; |
57 | memset(ctx, 0, sizeof(*ctx)); | 56 | memset(ctx, 0, sizeof(*ctx)); |
@@ -81,7 +80,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, | |||
81 | return r; | 80 | return r; |
82 | } | 81 | } |
83 | *id = (uint32_t)r; | 82 | *id = (uint32_t)r; |
84 | amdgpu_ctx_init(adev, fpriv, ctx, *id); | 83 | amdgpu_ctx_init(adev, fpriv, ctx); |
85 | mutex_unlock(&mgr->lock); | 84 | mutex_unlock(&mgr->lock); |
86 | } else { | 85 | } else { |
87 | if (adev->kernel_ctx) { | 86 | if (adev->kernel_ctx) { |
@@ -89,8 +88,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, | |||
89 | kfree(ctx); | 88 | kfree(ctx); |
90 | return 0; | 89 | return 0; |
91 | } | 90 | } |
92 | *id = AMD_KERNEL_CONTEXT_ID; | 91 | amdgpu_ctx_init(adev, fpriv, ctx); |
93 | amdgpu_ctx_init(adev, fpriv, ctx, *id); | ||
94 | 92 | ||
95 | adev->kernel_ctx = ctx; | 93 | adev->kernel_ctx = ctx; |
96 | } | 94 | } |
@@ -105,8 +103,7 @@ int amdgpu_ctx_alloc(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv, | |||
105 | rq = &adev->rings[i]->scheduler->kernel_rq; | 103 | rq = &adev->rings[i]->scheduler->kernel_rq; |
106 | r = amd_context_entity_init(adev->rings[i]->scheduler, | 104 | r = amd_context_entity_init(adev->rings[i]->scheduler, |
107 | &ctx->rings[i].c_entity, | 105 | &ctx->rings[i].c_entity, |
108 | NULL, rq, *id, | 106 | NULL, rq, amdgpu_sched_jobs); |
109 | amdgpu_sched_jobs); | ||
110 | if (r) | 107 | if (r) |
111 | break; | 108 | break; |
112 | } | 109 | } |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 4ad1825e713e..b9aa572980d2 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -172,7 +172,7 @@ exit: | |||
172 | * @entity The pointer to a valid amd_context_entity | 172 | * @entity The pointer to a valid amd_context_entity |
173 | * @parent The parent entity of this amd_context_entity | 173 | * @parent The parent entity of this amd_context_entity |
174 | * @rq The run queue this entity belongs | 174 | * @rq The run queue this entity belongs |
175 | * @context_id The context id for this entity | 175 | * @kernel If this is an entity for the kernel |
176 | * @jobs The max number of jobs in the job queue | 176 | * @jobs The max number of jobs in the job queue |
177 | * | 177 | * |
178 | * return 0 if succeed. negative error code on failure | 178 | * return 0 if succeed. negative error code on failure |
@@ -181,7 +181,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, | |||
181 | struct amd_context_entity *entity, | 181 | struct amd_context_entity *entity, |
182 | struct amd_sched_entity *parent, | 182 | struct amd_sched_entity *parent, |
183 | struct amd_run_queue *rq, | 183 | struct amd_run_queue *rq, |
184 | uint32_t context_id, | ||
185 | uint32_t jobs) | 184 | uint32_t jobs) |
186 | { | 185 | { |
187 | uint64_t seq_ring = 0; | 186 | uint64_t seq_ring = 0; |
@@ -203,9 +202,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, | |||
203 | return -EINVAL; | 202 | return -EINVAL; |
204 | 203 | ||
205 | spin_lock_init(&entity->queue_lock); | 204 | spin_lock_init(&entity->queue_lock); |
206 | entity->tgid = (context_id == AMD_KERNEL_CONTEXT_ID) ? | ||
207 | AMD_KERNEL_PROCESS_ID : current->tgid; | ||
208 | entity->context_id = context_id; | ||
209 | atomic64_set(&entity->last_emitted_v_seq, seq_ring); | 205 | atomic64_set(&entity->last_emitted_v_seq, seq_ring); |
210 | atomic64_set(&entity->last_queued_v_seq, seq_ring); | 206 | atomic64_set(&entity->last_queued_v_seq, seq_ring); |
211 | 207 | ||
@@ -275,9 +271,9 @@ int amd_context_entity_fini(struct amd_gpu_scheduler *sched, | |||
275 | 271 | ||
276 | if (r) { | 272 | if (r) { |
277 | if (entity->is_pending) | 273 | if (entity->is_pending) |
278 | DRM_INFO("Entity %u is in waiting state during fini,\ | 274 | DRM_INFO("Entity %p is in waiting state during fini,\ |
279 | all pending ibs will be canceled.\n", | 275 | all pending ibs will be canceled.\n", |
280 | entity->context_id); | 276 | entity); |
281 | } | 277 | } |
282 | 278 | ||
283 | mutex_lock(&rq->lock); | 279 | mutex_lock(&rq->lock); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index fd6d699d42e1..c46d0854ab75 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h | |||
@@ -26,9 +26,6 @@ | |||
26 | 26 | ||
27 | #include <linux/kfifo.h> | 27 | #include <linux/kfifo.h> |
28 | 28 | ||
29 | #define AMD_KERNEL_CONTEXT_ID 0 | ||
30 | #define AMD_KERNEL_PROCESS_ID 0 | ||
31 | |||
32 | #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 | 29 | #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
33 | 30 | ||
34 | struct amd_gpu_scheduler; | 31 | struct amd_gpu_scheduler; |
@@ -74,8 +71,6 @@ struct amd_context_entity { | |||
74 | /* the virtual_seq is unique per context per ring */ | 71 | /* the virtual_seq is unique per context per ring */ |
75 | atomic64_t last_queued_v_seq; | 72 | atomic64_t last_queued_v_seq; |
76 | atomic64_t last_emitted_v_seq; | 73 | atomic64_t last_emitted_v_seq; |
77 | pid_t tgid; | ||
78 | uint32_t context_id; | ||
79 | /* the job_queue maintains the jobs submitted by clients */ | 74 | /* the job_queue maintains the jobs submitted by clients */ |
80 | struct kfifo job_queue; | 75 | struct kfifo job_queue; |
81 | spinlock_t queue_lock; | 76 | spinlock_t queue_lock; |
@@ -148,7 +143,6 @@ int amd_context_entity_init(struct amd_gpu_scheduler *sched, | |||
148 | struct amd_context_entity *entity, | 143 | struct amd_context_entity *entity, |
149 | struct amd_sched_entity *parent, | 144 | struct amd_sched_entity *parent, |
150 | struct amd_run_queue *rq, | 145 | struct amd_run_queue *rq, |
151 | uint32_t context_id, | ||
152 | uint32_t jobs); | 146 | uint32_t jobs); |
153 | 147 | ||
154 | void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq); | 148 | void amd_sched_emit(struct amd_context_entity *c_entity, uint64_t seq); |