diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | 145 |
1 files changed, 145 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c new file mode 100644 index 000000000000..a86e38158afa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c | |||
@@ -0,0 +1,145 @@ | |||
1 | /* | ||
2 | * Copyright 2015 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * | ||
23 | */ | ||
24 | #include <linux/kthread.h> | ||
25 | #include <linux/wait.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <drm/drmP.h> | ||
28 | #include "amdgpu.h" | ||
29 | |||
30 | static int amdgpu_sched_prepare_job(struct amd_gpu_scheduler *sched, | ||
31 | struct amd_sched_entity *entity, | ||
32 | struct amd_sched_job *job) | ||
33 | { | ||
34 | int r = 0; | ||
35 | struct amdgpu_cs_parser *sched_job; | ||
36 | if (!job || !job->data) { | ||
37 | DRM_ERROR("job is null\n"); | ||
38 | return -EINVAL; | ||
39 | } | ||
40 | |||
41 | sched_job = (struct amdgpu_cs_parser *)job->data; | ||
42 | if (sched_job->prepare_job) { | ||
43 | r = sched_job->prepare_job(sched_job); | ||
44 | if (r) { | ||
45 | DRM_ERROR("Prepare job error\n"); | ||
46 | schedule_work(&sched_job->job_work); | ||
47 | } | ||
48 | } | ||
49 | return r; | ||
50 | } | ||
51 | |||
52 | static struct fence *amdgpu_sched_run_job(struct amd_gpu_scheduler *sched, | ||
53 | struct amd_sched_entity *entity, | ||
54 | struct amd_sched_job *job) | ||
55 | { | ||
56 | int r = 0; | ||
57 | struct amdgpu_cs_parser *sched_job; | ||
58 | struct amdgpu_fence *fence; | ||
59 | |||
60 | if (!job || !job->data) { | ||
61 | DRM_ERROR("job is null\n"); | ||
62 | return NULL; | ||
63 | } | ||
64 | sched_job = (struct amdgpu_cs_parser *)job->data; | ||
65 | mutex_lock(&sched_job->job_lock); | ||
66 | r = amdgpu_ib_schedule(sched_job->adev, | ||
67 | sched_job->num_ibs, | ||
68 | sched_job->ibs, | ||
69 | sched_job->filp); | ||
70 | if (r) | ||
71 | goto err; | ||
72 | fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence); | ||
73 | |||
74 | if (sched_job->run_job) { | ||
75 | r = sched_job->run_job(sched_job); | ||
76 | if (r) | ||
77 | goto err; | ||
78 | } | ||
79 | |||
80 | mutex_unlock(&sched_job->job_lock); | ||
81 | return &fence->base; | ||
82 | |||
83 | err: | ||
84 | DRM_ERROR("Run job error\n"); | ||
85 | mutex_unlock(&sched_job->job_lock); | ||
86 | schedule_work(&sched_job->job_work); | ||
87 | return NULL; | ||
88 | } | ||
89 | |||
90 | static void amdgpu_sched_process_job(struct amd_gpu_scheduler *sched, | ||
91 | struct amd_sched_job *job) | ||
92 | { | ||
93 | struct amdgpu_cs_parser *sched_job; | ||
94 | |||
95 | if (!job || !job->data) { | ||
96 | DRM_ERROR("job is null\n"); | ||
97 | return; | ||
98 | } | ||
99 | sched_job = (struct amdgpu_cs_parser *)job->data; | ||
100 | schedule_work(&sched_job->job_work); | ||
101 | } | ||
102 | |||
103 | struct amd_sched_backend_ops amdgpu_sched_ops = { | ||
104 | .prepare_job = amdgpu_sched_prepare_job, | ||
105 | .run_job = amdgpu_sched_run_job, | ||
106 | .process_job = amdgpu_sched_process_job | ||
107 | }; | ||
108 | |||
109 | int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | ||
110 | struct amdgpu_ring *ring, | ||
111 | struct amdgpu_ib *ibs, | ||
112 | unsigned num_ibs, | ||
113 | int (*free_job)(struct amdgpu_cs_parser *), | ||
114 | void *owner, | ||
115 | struct fence **f) | ||
116 | { | ||
117 | int r = 0; | ||
118 | if (amdgpu_enable_scheduler) { | ||
119 | struct amdgpu_cs_parser *sched_job = | ||
120 | amdgpu_cs_parser_create(adev, owner, &adev->kernel_ctx, | ||
121 | ibs, num_ibs); | ||
122 | if(!sched_job) { | ||
123 | return -ENOMEM; | ||
124 | } | ||
125 | sched_job->free_job = free_job; | ||
126 | mutex_lock(&sched_job->job_lock); | ||
127 | r = amd_sched_push_job(ring->scheduler, | ||
128 | &adev->kernel_ctx.rings[ring->idx].entity, | ||
129 | sched_job, &sched_job->s_fence); | ||
130 | if (r) { | ||
131 | mutex_unlock(&sched_job->job_lock); | ||
132 | kfree(sched_job); | ||
133 | return r; | ||
134 | } | ||
135 | ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq; | ||
136 | *f = fence_get(&sched_job->s_fence->base); | ||
137 | mutex_unlock(&sched_job->job_lock); | ||
138 | } else { | ||
139 | r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner); | ||
140 | if (r) | ||
141 | return r; | ||
142 | *f = fence_get(&ibs[num_ibs - 1].fence->base); | ||
143 | } | ||
144 | return 0; | ||
145 | } | ||