diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 88 |
1 files changed, 34 insertions, 54 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c index a86e38158afa..5b1ae18f5e8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | |||
@@ -27,81 +27,58 @@ | |||
27 | #include <drm/drmP.h> | 27 | #include <drm/drmP.h> |
28 | #include "amdgpu.h" | 28 | #include "amdgpu.h" |
29 | 29 | ||
30 | static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched, | ||
31 | struct amd_sched_entity *entity, | ||
32 | struct amd_sched_job *job) | ||
33 | { | ||
34 | int r = 0; | ||
35 | struct amdgpu_cs_parser *sched_job; | ||
36 | if (!job || !job->data) { | ||
37 | DRM_ERROR("job is null\n"); | ||
38 | return -EINVAL; | ||
39 | } | ||
40 | |||
41 | sched_job = (struct amdgpu_cs_parser *)job->data; | ||
42 | if (sched_job->prepare_job) { | ||
43 | r = sched_job->prepare_job(sched_job); | ||
44 | if (r) { | ||
45 | DRM_ERROR("Prepare job error\n"); | ||
46 | schedule_work(&sched_job->job_work); | ||
47 | } | ||
48 | } | ||
49 | return r; | ||
50 | } | ||
51 | |||
52 | static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, | 30 | static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, |
53 | struct amd_sched_entity *entity, | 31 | struct amd_sched_entity *entity, |
54 | struct amd_sched_job *job) | 32 | struct amd_sched_job *job) |
55 | { | 33 | { |
56 | int r = 0; | 34 | int r = 0; |
57 | struct amdgpu_cs_parser *sched_job; | 35 | struct amdgpu_job *sched_job; |
58 | struct amdgpu_fence *fence; | 36 | struct amdgpu_fence *fence; |
59 | 37 | ||
60 | if (!job || !job->data) { | 38 | if (!job) { |
61 | DRM_ERROR("job is null\n"); | 39 | DRM_ERROR("job is null\n"); |
62 | return NULL; | 40 | return NULL; |
63 | } | 41 | } |
64 | sched_job = (struct amdgpu_cs_parser *)job->data; | 42 | sched_job = (struct amdgpu_job *)job; |
65 | mutex_lock(&sched_job->job_lock); | 43 | mutex_lock(&sched_job->job_lock); |
66 | r = amdgpu_ib_schedule(sched_job->adev, | 44 | r = amdgpu_ib_schedule(sched_job->adev, |
67 | sched_job->num_ibs, | 45 | sched_job->num_ibs, |
68 | sched_job->ibs, | 46 | sched_job->ibs, |
69 | sched_job->filp); | 47 | sched_job->owner); |
70 | if (r) | 48 | if (r) |
71 | goto err; | 49 | goto err; |
72 | fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); | 50 | fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); |
73 | 51 | ||
74 | if (sched_job->run_job) { | ||
75 | r = sched_job->run_job(sched_job); | ||
76 | if (r) | ||
77 | goto err; | ||
78 | } | ||
79 | |||
80 | mutex_unlock(&sched_job->job_lock); | 52 | mutex_unlock(&sched_job->job_lock); |
81 | return &fence->base; | 53 | return &fence->base; |
82 | 54 | ||
83 | err: | 55 | err: |
84 | DRM_ERROR("Run job error\n"); | 56 | DRM_ERROR("Run job error\n"); |
85 | mutex_unlock(&sched_job->job_lock); | 57 | mutex_unlock(&sched_job->job_lock); |
86 | schedule_work(&sched_job->job_work); | 58 | sched->ops->process_job(sched, (struct amd_sched_job *)sched_job); |
87 | return NULL; | 59 | return NULL; |
88 | } | 60 | } |
89 | 61 | ||
90 | static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, | 62 | static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, |
91 | struct amd_sched_job *job) | 63 | struct amd_sched_job *job) |
92 | { | 64 | { |
93 | struct amdgpu_cs_parser *sched_job; | 65 | struct amdgpu_job *sched_job; |
94 | 66 | ||
95 | if (!job || !job->data) { | 67 | if (!job) { |
96 | DRM_ERROR("job is null\n"); | 68 | DRM_ERROR("job is null\n"); |
97 | return; | 69 | return; |
98 | } | 70 | } |
99 | sched_job = (struct amdgpu_cs_parser *)job->data; | 71 | sched_job = (struct amdgpu_job *)job; |
100 | schedule_work(&sched_job->job_work); | 72 | mutex_lock(&sched_job->job_lock); |
73 | if (sched_job->free_job) | ||
74 | sched_job->free_job(sched_job); | ||
75 | mutex_unlock(&sched_job->job_lock); | ||
76 | /* after processing job, free memory */ | ||
77 | fence_put(&sched_job->base.s_fence->base); | ||
78 | kfree(sched_job); | ||
101 | } | 79 | } |
102 | 80 | ||
103 | struct amd_sched_backend_ops amdgpu_sched_ops = { | 81 | struct amd_sched_backend_ops amdgpu_sched_ops = { |
104 | .prepare_job = amdgpu_sched_prepare_job, | ||
105 | .run_job = amdgpu_sched_run_job, | 82 | .run_job = amdgpu_sched_run_job, |
106 | .process_job = amdgpu_sched_process_job | 83 | .process_job = amdgpu_sched_process_job |
107 | }; | 84 | }; |
@@ -110,31 +87,34 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | |||
110 | struct amdgpu_ring *ring, | 87 | struct amdgpu_ring *ring, |
111 | struct amdgpu_ib *ibs, | 88 | struct amdgpu_ib *ibs, |
112 | unsigned num_ibs, | 89 | unsigned num_ibs, |
113 | int (*free_job)(struct amdgpu_cs_parser *), | 90 | int (*free_job)(struct amdgpu_job *), |
114 | void *owner, | 91 | void *owner, |
115 | struct fence **f) | 92 | struct fence **f) |
116 | { | 93 | { |
117 | int r = 0; | 94 | int r = 0; |
118 | if (amdgpu_enable_scheduler) { | 95 | if (amdgpu_enable_scheduler) { |
119 | struct amdgpu_cs_parser *sched_job = | 96 | struct amdgpu_job *job = |
120 | amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, | 97 | kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL); |
121 | ibs, num_ibs); | 98 | if (!job) |
122 | if(!sched_job) { | ||
123 | return -ENOMEM; | 99 | return -ENOMEM; |
124 | } | 100 | job->base.sched = ring->scheduler; |
125 | sched_job->free_job = free_job; | 101 | job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; |
126 | mutex_lock(&sched_job->job_lock); | 102 | job->adev = adev; |
127 | r = amd_sched_push_job(ring->scheduler, | 103 | job->ibs = ibs; |
128 | &adev->kernel_ctx.rings[ring->idx].entity, | 104 | job->num_ibs = num_ibs; |
129 | sched_job, &sched_job->s_fence); | 105 | job->owner = owner; |
106 | mutex_init(&job->job_lock); | ||
107 | job->free_job = free_job; | ||
108 | mutex_lock(&job->job_lock); | ||
109 | r = amd_sched_push_job((struct amd_sched_job *)job); | ||
130 | if (r) { | 110 | if (r) { |
131 | mutex_unlock(&sched_job->job_lock); | 111 | mutex_unlock(&job->job_lock); |
132 | kfree(sched_job); | 112 | kfree(job); |
133 | return r; | 113 | return r; |
134 | } | 114 | } |
135 | ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq; | 115 | ibs[num_ibs - 1].sequence = job->base.s_fence->v_seq; |
136 | *f = fence_get(&sched_job->s_fence->base); | 116 | *f = fence_get(&job->base.s_fence->base); |
137 | mutex_unlock(&sched_job->job_lock); | 117 | mutex_unlock(&job->job_lock); |
138 | } else { | 118 | } else { |
139 | r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); | 119 | r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); |
140 | if (r) | 120 | if (r) |