aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-07-21 01:45:14 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-08-17 16:50:33 -0400
commitc1b69ed0c62f9d86599600f4c1a3bd82db1b7362 (patch)
tree7e560093bffb5d53be1ed50d1e0539e6c2ec8c71
parent2c4888a0d392b206eb348d4fc6dec539eee2c534 (diff)
drm/amdgpu: add backend implementation of gpu scheduler (v2)
v2: fix rebase breakage Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Acked-by: Christian K?nig <christian.koenig@amd.com> Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c107
4 files changed, 119 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 2800cf70b557..f1cb7d2fa411 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -85,7 +85,8 @@ amdgpu-y += amdgpu_cgs.o
85 85
86# GPU scheduler 86# GPU scheduler
87amdgpu-y += \ 87amdgpu-y += \
88 ../scheduler/gpu_scheduler.o 88 ../scheduler/gpu_scheduler.o \
89 amdgpu_sched.o
89 90
90amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o 91amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
91amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o 92amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 776339c2a95e..6bf16d95e7e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -416,6 +416,7 @@ struct amdgpu_user_fence {
416 struct amdgpu_bo *bo; 416 struct amdgpu_bo *bo;
417 /* write-back address offset to bo start */ 417 /* write-back address offset to bo start */
418 uint32_t offset; 418 uint32_t offset;
419 uint64_t sequence;
419}; 420};
420 421
421int amdgpu_fence_driver_init(struct amdgpu_device *adev); 422int amdgpu_fence_driver_init(struct amdgpu_device *adev);
@@ -859,6 +860,8 @@ enum amdgpu_ring_type {
859 AMDGPU_RING_TYPE_VCE 860 AMDGPU_RING_TYPE_VCE
860}; 861};
861 862
863extern struct amd_sched_backend_ops amdgpu_sched_ops;
864
862struct amdgpu_ring { 865struct amdgpu_ring {
863 struct amdgpu_device *adev; 866 struct amdgpu_device *adev;
864 const struct amdgpu_ring_funcs *funcs; 867 const struct amdgpu_ring_funcs *funcs;
@@ -1232,6 +1235,11 @@ struct amdgpu_cs_parser {
1232 1235
1233 /* user fence */ 1236 /* user fence */
1234 struct amdgpu_user_fence uf; 1237 struct amdgpu_user_fence uf;
1238
1239 struct mutex job_lock;
1240 struct work_struct job_work;
1241 int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
1242 int (*run_job)(struct amdgpu_cs_parser *sched_job);
1235}; 1243};
1236 1244
1237static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) 1245static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 4edeb90e7227..be43ae412ae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -905,7 +905,8 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
905 905
906 if (amdgpu_enable_scheduler) { 906 if (amdgpu_enable_scheduler) {
907 ring->scheduler = amd_sched_create((void *)ring->adev, 907 ring->scheduler = amd_sched_create((void *)ring->adev,
908 NULL, ring->idx, 5, 0); 908 &amdgpu_sched_ops,
909 ring->idx, 5, 0);
909 if (!ring->scheduler) 910 if (!ring->scheduler)
910 DRM_ERROR("Failed to create scheduler on ring %d.\n", 911 DRM_ERROR("Failed to create scheduler on ring %d.\n",
911 ring->idx); 912 ring->idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 000000000000..1f7bf31da7fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,107 @@
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "amdgpu.h"
29
30static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched,
31 struct amd_context_entity *c_entity,
32 void *job)
33{
34 int r = 0;
35 struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
36 if (sched_job->prepare_job)
37 r = sched_job->prepare_job(sched_job);
38 if (r) {
39 DRM_ERROR("Prepare job error\n");
40 schedule_work(&sched_job->job_work);
41 }
42 return r;
43}
44
45static void amdgpu_sched_run_job(struct amd_gpu_scheduler *sched,
46 struct amd_context_entity *c_entity,
47 void *job)
48{
49 int r = 0;
50 struct amdgpu_cs_parser *sched_job = (struct amdgpu_cs_parser *)job;
51
52 mutex_lock(&sched_job->job_lock);
53 r = amdgpu_ib_schedule(sched_job->adev,
54 sched_job->num_ibs,
55 sched_job->ibs,
56 sched_job->filp);
57 if (r)
58 goto err;
59
60 if (sched_job->run_job) {
61 r = sched_job->run_job(sched_job);
62 if (r)
63 goto err;
64 }
65 mutex_unlock(&sched_job->job_lock);
66 return;
67err:
68 DRM_ERROR("Run job error\n");
69 mutex_unlock(&sched_job->job_lock);
70 schedule_work(&sched_job->job_work);
71}
72
73static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, void *job)
74{
75 struct amdgpu_cs_parser *sched_job = NULL;
76 struct amdgpu_fence *fence = NULL;
77 struct amdgpu_ring *ring = NULL;
78 struct amdgpu_device *adev = NULL;
79 struct amd_context_entity *c_entity = NULL;
80
81 if (!job)
82 return;
83 sched_job = (struct amdgpu_cs_parser *)job;
84 fence = sched_job->ibs[sched_job->num_ibs - 1].fence;
85 if (!fence)
86 return;
87 ring = fence->ring;
88 adev = ring->adev;
89
90 if (sched_job->ctx) {
91 c_entity = &sched_job->ctx->rings[ring->idx].c_entity;
92 atomic64_set(&c_entity->last_signaled_v_seq,
93 sched_job->uf.sequence);
94 }
95
96 /* wake up users waiting for time stamp */
97 wake_up_all(&c_entity->wait_queue);
98
99 schedule_work(&sched_job->job_work);
100}
101
102struct amd_sched_backend_ops amdgpu_sched_ops = {
103 .prepare_job = amdgpu_sched_prepare_job,
104 .run_job = amdgpu_sched_run_job,
105 .process_job = amdgpu_sched_process_job
106};
107