aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOded Gabbay <oded.gabbay@amd.com>2015-01-12 08:53:44 -0500
committerOded Gabbay <oded.gabbay@amd.com>2015-01-12 08:53:44 -0500
commit443fbd5f115feba160a8d7ed6ac708cb91e3b955 (patch)
tree49c3cfd1ac5b30995cad16098d6706a9c834fa84
parenta22fc85495575d81c36db24b12f66fd314b7ced1 (diff)
drm/amdkfd: Encapsulate KQ functions in ops structure
This patch does some re-org on the kernel_queue structure. It takes out all the function pointers from the structure and puts them in a new structure, called kernel_queue_ops. Then, it puts an instance of that structure inside kernel_queue. This re-org is done to prepare the KQ module to support more than one AMD APU (Kaveri). Signed-off-by: Oded Gabbay <oded.gabbay@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c26
3 files changed, 54 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index add0fb4cc658..731635dace90 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -293,14 +293,14 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
293 if (!kq) 293 if (!kq)
294 return NULL; 294 return NULL;
295 295
296 kq->initialize = initialize; 296 kq->ops.initialize = initialize;
297 kq->uninitialize = uninitialize; 297 kq->ops.uninitialize = uninitialize;
298 kq->acquire_packet_buffer = acquire_packet_buffer; 298 kq->ops.acquire_packet_buffer = acquire_packet_buffer;
299 kq->submit_packet = submit_packet; 299 kq->ops.submit_packet = submit_packet;
300 kq->sync_with_hw = sync_with_hw; 300 kq->ops.sync_with_hw = sync_with_hw;
301 kq->rollback_packet = rollback_packet; 301 kq->ops.rollback_packet = rollback_packet;
302 302
303 if (kq->initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { 303 if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
304 pr_err("kfd: failed to init kernel queue\n"); 304 pr_err("kfd: failed to init kernel queue\n");
305 kfree(kq); 305 kfree(kq);
306 return NULL; 306 return NULL;
@@ -312,7 +312,7 @@ void kernel_queue_uninit(struct kernel_queue *kq)
312{ 312{
313 BUG_ON(!kq); 313 BUG_ON(!kq);
314 314
315 kq->uninitialize(kq); 315 kq->ops.uninitialize(kq);
316 kfree(kq); 316 kfree(kq);
317} 317}
318 318
@@ -329,12 +329,12 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
329 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); 329 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
330 BUG_ON(!kq); 330 BUG_ON(!kq);
331 331
332 retval = kq->acquire_packet_buffer(kq, 5, &buffer); 332 retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
333 BUG_ON(retval != 0); 333 BUG_ON(retval != 0);
334 for (i = 0; i < 5; i++) 334 for (i = 0; i < 5; i++)
335 buffer[i] = kq->nop_packet; 335 buffer[i] = kq->nop_packet;
336 kq->submit_packet(kq); 336 kq->ops.submit_packet(kq);
337 kq->sync_with_hw(kq, 1000); 337 kq->ops.sync_with_hw(kq, 1000);
338 338
339 pr_debug("kfd: ending kernel queue test\n"); 339 pr_debug("kfd: ending kernel queue test\n");
340} 340}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index dcd2bdb68d44..e01b77b28500 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -28,8 +28,31 @@
28#include <linux/types.h> 28#include <linux/types.h>
29#include "kfd_priv.h" 29#include "kfd_priv.h"
30 30
31struct kernel_queue { 31/**
32 /* interface */ 32 * struct kernel_queue_ops
33 *
34 * @initialize: Initialize a kernel queue, including allocations of GART memory
35 * needed for the queue.
36 *
37 * @uninitialize: Uninitialize a kernel queue and free all its memory usages.
38 *
39 * @acquire_packet_buffer: Returns a pointer to the location in the kernel
40 * queue ring buffer where the calling function can write its packet. It is
41 * Guaranteed that there is enough space for that packet. It also updates the
42 * pending write pointer to that location so subsequent calls to
43 * acquire_packet_buffer will get a correct write pointer
44 *
45 * @submit_packet: Update the write pointer and doorbell of a kernel queue.
46 *
47 * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
48 * queue are equal, which means the CP has read all the submitted packets.
49 *
50 * @rollback_packet: This routine is called if we failed to build an acquired
51 * packet for some reason. It just overwrites the pending wptr with the current
52 * one
53 *
54 */
55struct kernel_queue_ops {
33 bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev, 56 bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
34 enum kfd_queue_type type, unsigned int queue_size); 57 enum kfd_queue_type type, unsigned int queue_size);
35 void (*uninitialize)(struct kernel_queue *kq); 58 void (*uninitialize)(struct kernel_queue *kq);
@@ -41,6 +64,10 @@ struct kernel_queue {
41 int (*sync_with_hw)(struct kernel_queue *kq, 64 int (*sync_with_hw)(struct kernel_queue *kq,
42 unsigned long timeout_ms); 65 unsigned long timeout_ms);
43 void (*rollback_packet)(struct kernel_queue *kq); 66 void (*rollback_packet)(struct kernel_queue *kq);
67};
68
69struct kernel_queue {
70 struct kernel_queue_ops ops;
44 71
45 /* data */ 72 /* data */
46 struct kfd_dev *dev; 73 struct kfd_dev *dev;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 3cda952ac2f8..5fb5c032d5d8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -348,7 +348,7 @@ int pm_send_set_resources(struct packet_manager *pm,
348 pr_debug("kfd: In func %s\n", __func__); 348 pr_debug("kfd: In func %s\n", __func__);
349 349
350 mutex_lock(&pm->lock); 350 mutex_lock(&pm->lock);
351 pm->priv_queue->acquire_packet_buffer(pm->priv_queue, 351 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
352 sizeof(*packet) / sizeof(uint32_t), 352 sizeof(*packet) / sizeof(uint32_t),
353 (unsigned int **)&packet); 353 (unsigned int **)&packet);
354 if (packet == NULL) { 354 if (packet == NULL) {
@@ -375,8 +375,8 @@ int pm_send_set_resources(struct packet_manager *pm,
375 packet->queue_mask_lo = lower_32_bits(res->queue_mask); 375 packet->queue_mask_lo = lower_32_bits(res->queue_mask);
376 packet->queue_mask_hi = upper_32_bits(res->queue_mask); 376 packet->queue_mask_hi = upper_32_bits(res->queue_mask);
377 377
378 pm->priv_queue->submit_packet(pm->priv_queue); 378 pm->priv_queue->ops.submit_packet(pm->priv_queue);
379 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT); 379 pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
380 380
381 mutex_unlock(&pm->lock); 381 mutex_unlock(&pm->lock);
382 382
@@ -402,7 +402,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
402 packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t); 402 packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
403 mutex_lock(&pm->lock); 403 mutex_lock(&pm->lock);
404 404
405 retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue, 405 retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
406 packet_size_dwords, &rl_buffer); 406 packet_size_dwords, &rl_buffer);
407 if (retval != 0) 407 if (retval != 0)
408 goto fail_acquire_packet_buffer; 408 goto fail_acquire_packet_buffer;
@@ -412,15 +412,15 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
412 if (retval != 0) 412 if (retval != 0)
413 goto fail_create_runlist; 413 goto fail_create_runlist;
414 414
415 pm->priv_queue->submit_packet(pm->priv_queue); 415 pm->priv_queue->ops.submit_packet(pm->priv_queue);
416 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT); 416 pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
417 417
418 mutex_unlock(&pm->lock); 418 mutex_unlock(&pm->lock);
419 419
420 return retval; 420 return retval;
421 421
422fail_create_runlist: 422fail_create_runlist:
423 pm->priv_queue->rollback_packet(pm->priv_queue); 423 pm->priv_queue->ops.rollback_packet(pm->priv_queue);
424fail_acquire_packet_buffer: 424fail_acquire_packet_buffer:
425 mutex_unlock(&pm->lock); 425 mutex_unlock(&pm->lock);
426fail_create_runlist_ib: 426fail_create_runlist_ib:
@@ -438,7 +438,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
438 BUG_ON(!pm || !fence_address); 438 BUG_ON(!pm || !fence_address);
439 439
440 mutex_lock(&pm->lock); 440 mutex_lock(&pm->lock);
441 retval = pm->priv_queue->acquire_packet_buffer( 441 retval = pm->priv_queue->ops.acquire_packet_buffer(
442 pm->priv_queue, 442 pm->priv_queue,
443 sizeof(struct pm4_query_status) / sizeof(uint32_t), 443 sizeof(struct pm4_query_status) / sizeof(uint32_t),
444 (unsigned int **)&packet); 444 (unsigned int **)&packet);
@@ -459,8 +459,8 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
459 packet->data_hi = upper_32_bits((uint64_t)fence_value); 459 packet->data_hi = upper_32_bits((uint64_t)fence_value);
460 packet->data_lo = lower_32_bits((uint64_t)fence_value); 460 packet->data_lo = lower_32_bits((uint64_t)fence_value);
461 461
462 pm->priv_queue->submit_packet(pm->priv_queue); 462 pm->priv_queue->ops.submit_packet(pm->priv_queue);
463 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT); 463 pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
464 mutex_unlock(&pm->lock); 464 mutex_unlock(&pm->lock);
465 465
466 return 0; 466 return 0;
@@ -482,7 +482,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
482 BUG_ON(!pm); 482 BUG_ON(!pm);
483 483
484 mutex_lock(&pm->lock); 484 mutex_lock(&pm->lock);
485 retval = pm->priv_queue->acquire_packet_buffer( 485 retval = pm->priv_queue->ops.acquire_packet_buffer(
486 pm->priv_queue, 486 pm->priv_queue,
487 sizeof(struct pm4_unmap_queues) / sizeof(uint32_t), 487 sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
488 &buffer); 488 &buffer);
@@ -537,8 +537,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
537 break; 537 break;
538 }; 538 };
539 539
540 pm->priv_queue->submit_packet(pm->priv_queue); 540 pm->priv_queue->ops.submit_packet(pm->priv_queue);
541 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT); 541 pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
542 542
543 mutex_unlock(&pm->lock); 543 mutex_unlock(&pm->lock);
544 return 0; 544 return 0;