aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
authorOded Gabbay <oded.gabbay@amd.com>2015-01-12 07:26:10 -0500
committerOded Gabbay <oded.gabbay@amd.com>2015-01-12 07:26:10 -0500
commit45c9a5e4297b9a07d94ff8195ff6f21ba3581ad6 (patch)
tree0bce60eee553c065f94c4ddba4c067d6dc823044 /drivers/gpu/drm/amd
parent9216ed294053be68a673754a0f8da88aa7fb7941 (diff)
drm/amdkfd: Encapsulate DQM functions in ops structure
This patch does some re-org on the device_queue_manager structure. It takes out all the function pointers from the structure and puts them in a new structure, called device_queue_manager_ops. Then, it puts an instance of that structure inside device_queue_manager. This re-org is done to prepare the DQM module to support more than one AMD APU (Kaveri). Signed-off-by: Oded Gabbay <oded.gabbay@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c68
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c16
6 files changed, 65 insertions, 54 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index b008fd67ace9..38b6150a19ee 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -439,7 +439,7 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
439 (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) 439 (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
440 ? cache_policy_coherent : cache_policy_noncoherent; 440 ? cache_policy_coherent : cache_policy_noncoherent;
441 441
442 if (!dev->dqm->set_cache_memory_policy(dev->dqm, 442 if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
443 &pdd->qpd, 443 &pdd->qpd,
444 default_policy, 444 default_policy,
445 alternate_policy, 445 alternate_policy,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a23ed2440080..a770ec6f22ca 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -253,7 +253,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
253 goto device_queue_manager_error; 253 goto device_queue_manager_error;
254 } 254 }
255 255
256 if (kfd->dqm->start(kfd->dqm) != 0) { 256 if (kfd->dqm->ops.start(kfd->dqm) != 0) {
257 dev_err(kfd_device, 257 dev_err(kfd_device,
258 "Error starting queuen manager for device (%x:%x)\n", 258 "Error starting queuen manager for device (%x:%x)\n",
259 kfd->pdev->vendor, kfd->pdev->device); 259 kfd->pdev->vendor, kfd->pdev->device);
@@ -307,7 +307,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
307 BUG_ON(kfd == NULL); 307 BUG_ON(kfd == NULL);
308 308
309 if (kfd->init_complete) { 309 if (kfd->init_complete) {
310 kfd->dqm->stop(kfd->dqm); 310 kfd->dqm->ops.stop(kfd->dqm);
311 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL); 311 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
312 amd_iommu_free_device(kfd->pdev); 312 amd_iommu_free_device(kfd->pdev);
313 } 313 }
@@ -328,7 +328,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
328 return -ENXIO; 328 return -ENXIO;
329 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, 329 amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
330 iommu_pasid_shutdown_callback); 330 iommu_pasid_shutdown_callback);
331 kfd->dqm->start(kfd->dqm); 331 kfd->dqm->ops.start(kfd->dqm);
332 } 332 }
333 333
334 return 0; 334 return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index c83f01153440..12c84488551e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -271,7 +271,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
271 271
272 BUG_ON(!dqm || !q || !qpd); 272 BUG_ON(!dqm || !q || !qpd);
273 273
274 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); 274 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
275 if (mqd == NULL) 275 if (mqd == NULL)
276 return -ENOMEM; 276 return -ENOMEM;
277 277
@@ -305,14 +305,14 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
305 mutex_lock(&dqm->lock); 305 mutex_lock(&dqm->lock);
306 306
307 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { 307 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
308 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); 308 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
309 if (mqd == NULL) { 309 if (mqd == NULL) {
310 retval = -ENOMEM; 310 retval = -ENOMEM;
311 goto out; 311 goto out;
312 } 312 }
313 deallocate_hqd(dqm, q); 313 deallocate_hqd(dqm, q);
314 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { 314 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
315 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); 315 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
316 if (mqd == NULL) { 316 if (mqd == NULL) {
317 retval = -ENOMEM; 317 retval = -ENOMEM;
318 goto out; 318 goto out;
@@ -348,7 +348,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
348 BUG_ON(!dqm || !q || !q->mqd); 348 BUG_ON(!dqm || !q || !q->mqd);
349 349
350 mutex_lock(&dqm->lock); 350 mutex_lock(&dqm->lock);
351 mqd = dqm->get_mqd_manager(dqm, q->properties.type); 351 mqd = dqm->ops.get_mqd_manager(dqm, q->properties.type);
352 if (mqd == NULL) { 352 if (mqd == NULL) {
353 mutex_unlock(&dqm->lock); 353 mutex_unlock(&dqm->lock);
354 return -ENOMEM; 354 return -ENOMEM;
@@ -515,7 +515,7 @@ static int init_pipelines(struct device_queue_manager *dqm,
515 515
516 memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num); 516 memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
517 517
518 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); 518 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
519 if (mqd == NULL) { 519 if (mqd == NULL) {
520 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); 520 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
521 return -ENOMEM; 521 return -ENOMEM;
@@ -646,7 +646,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
646 struct mqd_manager *mqd; 646 struct mqd_manager *mqd;
647 int retval; 647 int retval;
648 648
649 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); 649 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
650 if (!mqd) 650 if (!mqd)
651 return -ENOMEM; 651 return -ENOMEM;
652 652
@@ -849,7 +849,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
849 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 849 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
850 select_sdma_engine_id(q); 850 select_sdma_engine_id(q);
851 851
852 mqd = dqm->get_mqd_manager(dqm, 852 mqd = dqm->ops.get_mqd_manager(dqm,
853 get_mqd_type_from_queue_type(q->properties.type)); 853 get_mqd_type_from_queue_type(q->properties.type));
854 854
855 if (mqd == NULL) { 855 if (mqd == NULL) {
@@ -994,7 +994,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
994 994
995 /* remove queue from list to prevent rescheduling after preemption */ 995 /* remove queue from list to prevent rescheduling after preemption */
996 mutex_lock(&dqm->lock); 996 mutex_lock(&dqm->lock);
997 mqd = dqm->get_mqd_manager(dqm, 997 mqd = dqm->ops.get_mqd_manager(dqm,
998 get_mqd_type_from_queue_type(q->properties.type)); 998 get_mqd_type_from_queue_type(q->properties.type));
999 if (!mqd) { 999 if (!mqd) {
1000 retval = -ENOMEM; 1000 retval = -ENOMEM;
@@ -1116,40 +1116,40 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1116 case KFD_SCHED_POLICY_HWS: 1116 case KFD_SCHED_POLICY_HWS:
1117 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: 1117 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1118 /* initialize dqm for cp scheduling */ 1118 /* initialize dqm for cp scheduling */
1119 dqm->create_queue = create_queue_cpsch; 1119 dqm->ops.create_queue = create_queue_cpsch;
1120 dqm->initialize = initialize_cpsch; 1120 dqm->ops.initialize = initialize_cpsch;
1121 dqm->start = start_cpsch; 1121 dqm->ops.start = start_cpsch;
1122 dqm->stop = stop_cpsch; 1122 dqm->ops.stop = stop_cpsch;
1123 dqm->destroy_queue = destroy_queue_cpsch; 1123 dqm->ops.destroy_queue = destroy_queue_cpsch;
1124 dqm->update_queue = update_queue; 1124 dqm->ops.update_queue = update_queue;
1125 dqm->get_mqd_manager = get_mqd_manager_nocpsch; 1125 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1126 dqm->register_process = register_process_nocpsch; 1126 dqm->ops.register_process = register_process_nocpsch;
1127 dqm->unregister_process = unregister_process_nocpsch; 1127 dqm->ops.unregister_process = unregister_process_nocpsch;
1128 dqm->uninitialize = uninitialize_nocpsch; 1128 dqm->ops.uninitialize = uninitialize_nocpsch;
1129 dqm->create_kernel_queue = create_kernel_queue_cpsch; 1129 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1130 dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch; 1130 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1131 dqm->set_cache_memory_policy = set_cache_memory_policy; 1131 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1132 break; 1132 break;
1133 case KFD_SCHED_POLICY_NO_HWS: 1133 case KFD_SCHED_POLICY_NO_HWS:
1134 /* initialize dqm for no cp scheduling */ 1134 /* initialize dqm for no cp scheduling */
1135 dqm->start = start_nocpsch; 1135 dqm->ops.start = start_nocpsch;
1136 dqm->stop = stop_nocpsch; 1136 dqm->ops.stop = stop_nocpsch;
1137 dqm->create_queue = create_queue_nocpsch; 1137 dqm->ops.create_queue = create_queue_nocpsch;
1138 dqm->destroy_queue = destroy_queue_nocpsch; 1138 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1139 dqm->update_queue = update_queue; 1139 dqm->ops.update_queue = update_queue;
1140 dqm->get_mqd_manager = get_mqd_manager_nocpsch; 1140 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1141 dqm->register_process = register_process_nocpsch; 1141 dqm->ops.register_process = register_process_nocpsch;
1142 dqm->unregister_process = unregister_process_nocpsch; 1142 dqm->ops.unregister_process = unregister_process_nocpsch;
1143 dqm->initialize = initialize_nocpsch; 1143 dqm->ops.initialize = initialize_nocpsch;
1144 dqm->uninitialize = uninitialize_nocpsch; 1144 dqm->ops.uninitialize = uninitialize_nocpsch;
1145 dqm->set_cache_memory_policy = set_cache_memory_policy; 1145 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1146 break; 1146 break;
1147 default: 1147 default:
1148 BUG(); 1148 BUG();
1149 break; 1149 break;
1150 } 1150 }
1151 1151
1152 if (dqm->initialize(dqm) != 0) { 1152 if (dqm->ops.initialize(dqm) != 0) {
1153 kfree(dqm); 1153 kfree(dqm);
1154 return NULL; 1154 return NULL;
1155 } 1155 }
@@ -1161,7 +1161,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
1161{ 1161{
1162 BUG_ON(!dqm); 1162 BUG_ON(!dqm);
1163 1163
1164 dqm->uninitialize(dqm); 1164 dqm->ops.uninitialize(dqm);
1165 kfree(dqm); 1165 kfree(dqm);
1166} 1166}
1167 1167
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 554c06ee8892..72d2ca056e19 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -46,7 +46,7 @@ struct device_process_node {
46}; 46};
47 47
48/** 48/**
49 * struct device_queue_manager 49 * struct device_queue_manager_ops
50 * 50 *
51 * @create_queue: Queue creation routine. 51 * @create_queue: Queue creation routine.
52 * 52 *
@@ -81,15 +81,9 @@ struct device_process_node {
81 * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the 81 * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
82 * memory apertures. 82 * memory apertures.
83 * 83 *
84 * This struct is a base class for the kfd queues scheduler in the
85 * device level. The device base class should expose the basic operations
86 * for queue creation and queue destruction. This base class hides the
87 * scheduling mode of the driver and the specific implementation of the
88 * concrete device. This class is the only class in the queues scheduler
89 * that configures the H/W.
90 */ 84 */
91 85
92struct device_queue_manager { 86struct device_queue_manager_ops {
93 int (*create_queue)(struct device_queue_manager *dqm, 87 int (*create_queue)(struct device_queue_manager *dqm,
94 struct queue *q, 88 struct queue *q,
95 struct qcm_process_device *qpd, 89 struct qcm_process_device *qpd,
@@ -124,7 +118,22 @@ struct device_queue_manager {
124 enum cache_policy alternate_policy, 118 enum cache_policy alternate_policy,
125 void __user *alternate_aperture_base, 119 void __user *alternate_aperture_base,
126 uint64_t alternate_aperture_size); 120 uint64_t alternate_aperture_size);
121};
122
123/**
124 * struct device_queue_manager
125 *
126 * This struct is a base class for the kfd queues scheduler in the
127 * device level. The device base class should expose the basic operations
128 * for queue creation and queue destruction. This base class hides the
129 * scheduling mode of the driver and the specific implementation of the
130 * concrete device. This class is the only class in the queues scheduler
131 * that configures the H/W.
132 *
133 */
127 134
135struct device_queue_manager {
136 struct device_queue_manager_ops ops;
128 137
129 struct mqd_manager *mqds[KFD_MQD_TYPE_MAX]; 138 struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
130 struct packet_manager packets; 139 struct packet_manager packets;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 773c213f2f9a..add0fb4cc658 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -56,7 +56,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
56 switch (type) { 56 switch (type) {
57 case KFD_QUEUE_TYPE_DIQ: 57 case KFD_QUEUE_TYPE_DIQ:
58 case KFD_QUEUE_TYPE_HIQ: 58 case KFD_QUEUE_TYPE_HIQ:
59 kq->mqd = dev->dqm->get_mqd_manager(dev->dqm, 59 kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
60 KFD_MQD_TYPE_HIQ); 60 KFD_MQD_TYPE_HIQ);
61 break; 61 break;
62 default: 62 default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 948b1ca8e7a2..513eeb6e402a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -178,7 +178,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
178 178
179 if (list_empty(&pqm->queues)) { 179 if (list_empty(&pqm->queues)) {
180 pdd->qpd.pqm = pqm; 180 pdd->qpd.pqm = pqm;
181 dev->dqm->register_process(dev->dqm, &pdd->qpd); 181 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
182 } 182 }
183 183
184 pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL); 184 pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
@@ -204,7 +204,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
204 goto err_create_queue; 204 goto err_create_queue;
205 pqn->q = q; 205 pqn->q = q;
206 pqn->kq = NULL; 206 pqn->kq = NULL;
207 retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, 207 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
208 &q->properties.vmid); 208 &q->properties.vmid);
209 print_queue(q); 209 print_queue(q);
210 break; 210 break;
@@ -217,7 +217,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
217 kq->queue->properties.queue_id = *qid; 217 kq->queue->properties.queue_id = *qid;
218 pqn->kq = kq; 218 pqn->kq = kq;
219 pqn->q = NULL; 219 pqn->q = NULL;
220 retval = dev->dqm->create_kernel_queue(dev->dqm, kq, &pdd->qpd); 220 retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
221 kq, &pdd->qpd);
221 break; 222 break;
222 default: 223 default:
223 BUG(); 224 BUG();
@@ -285,13 +286,13 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
285 if (pqn->kq) { 286 if (pqn->kq) {
286 /* destroy kernel queue (DIQ) */ 287 /* destroy kernel queue (DIQ) */
287 dqm = pqn->kq->dev->dqm; 288 dqm = pqn->kq->dev->dqm;
288 dqm->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); 289 dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
289 kernel_queue_uninit(pqn->kq); 290 kernel_queue_uninit(pqn->kq);
290 } 291 }
291 292
292 if (pqn->q) { 293 if (pqn->q) {
293 dqm = pqn->q->device->dqm; 294 dqm = pqn->q->device->dqm;
294 retval = dqm->destroy_queue(dqm, &pdd->qpd, pqn->q); 295 retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
295 if (retval != 0) 296 if (retval != 0)
296 return retval; 297 return retval;
297 298
@@ -303,7 +304,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
303 clear_bit(qid, pqm->queue_slot_bitmap); 304 clear_bit(qid, pqm->queue_slot_bitmap);
304 305
305 if (list_empty(&pqm->queues)) 306 if (list_empty(&pqm->queues))
306 dqm->unregister_process(dqm, &pdd->qpd); 307 dqm->ops.unregister_process(dqm, &pdd->qpd);
307 308
308 return retval; 309 return retval;
309} 310}
@@ -324,7 +325,8 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
324 pqn->q->properties.queue_percent = p->queue_percent; 325 pqn->q->properties.queue_percent = p->queue_percent;
325 pqn->q->properties.priority = p->priority; 326 pqn->q->properties.priority = p->priority;
326 327
327 retval = pqn->q->device->dqm->update_queue(pqn->q->device->dqm, pqn->q); 328 retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
329 pqn->q);
328 if (retval != 0) 330 if (retval != 0)
329 return retval; 331 return retval;
330 332