diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2018-07-11 05:30:45 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-07-31 04:25:41 -0400 |
commit | 2d454db04fcc0c03e05b4665831e5780240d79b8 (patch) | |
tree | c18fd4bc302ea68e57e1e1d95c0f253e800bc043 /drivers/gpu/nvgpu/gk20a | |
parent | d32692ae2427693daf85b3c7b4e24cd36471dec6 (diff) |
gpu: nvgpu: falcon queue support
-Renamed "struct pmu_queue" to "struct
nvgpu_falcon_queue" & moved to falcon.h
-Renamed pmu_queue_* functions to flcn_queue_* &
moved to new file falcon_queue.c
-Created ops for queue functions in struct
nvgpu_falcon_queue to support different queue
types like DMEM/FB-Q.
-Created ops in nvgpu_falcon_engine_dependency_ops
to add engine specific queue functionality & assigned
correct HAL functions in hal*.c file.
-Made changes in dependent functions as needed to replace
struct pmu_queue & calling queue functions using
nvgpu_falcon_queue data structure.
-Replaced input param "struct nvgpu_pmu *pmu" with
"struct gk20a *g" for pmu ops pmu_queue_head/pmu_queue_tail
& also for functions gk20a_pmu_queue_head()/
gk20a_pmu_queue_tail().
-Made changes in nvgpu_pmu_queue_init() to use nvgpu_falcon_queue
for PMU queue.
-Modified Makefile to include falcon_queue.o
-Modified Makefile.sources to include falcon_queue.c
Change-Id: I956328f6631b7154267fd5a29eaa1826190d99d1
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1776070
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/flcn_gk20a.c | 3 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 10 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 11 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | 4 |
4 files changed, 15 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c index 9ca7d91b..c55b90b6 100644 --- a/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/flcn_gk20a.c | |||
@@ -649,12 +649,15 @@ void gk20a_falcon_dump_stats(struct nvgpu_falcon *flcn) | |||
649 | 649 | ||
650 | static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn) | 650 | static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn) |
651 | { | 651 | { |
652 | struct gk20a *g = flcn->g; | ||
652 | struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops = | 653 | struct nvgpu_falcon_engine_dependency_ops *flcn_eng_dep_ops = |
653 | &flcn->flcn_engine_dep_ops; | 654 | &flcn->flcn_engine_dep_ops; |
654 | 655 | ||
655 | switch (flcn->flcn_id) { | 656 | switch (flcn->flcn_id) { |
656 | case FALCON_ID_PMU: | 657 | case FALCON_ID_PMU: |
657 | flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset; | 658 | flcn_eng_dep_ops->reset_eng = nvgpu_pmu_reset; |
659 | flcn_eng_dep_ops->queue_head = g->ops.pmu.pmu_queue_head; | ||
660 | flcn_eng_dep_ops->queue_tail = g->ops.pmu.pmu_queue_tail; | ||
658 | break; | 661 | break; |
659 | default: | 662 | default: |
660 | /* NULL assignment make sure | 663 | /* NULL assignment make sure |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index cfc9128d..6d19d8a3 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -771,7 +771,7 @@ struct gpu_ops { | |||
771 | u32 (*pmu_allocation_get_fb_size)( | 771 | u32 (*pmu_allocation_get_fb_size)( |
772 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); | 772 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr); |
773 | void (*get_pmu_init_msg_pmu_queue_params)( | 773 | void (*get_pmu_init_msg_pmu_queue_params)( |
774 | struct pmu_queue *queue, u32 id, | 774 | struct nvgpu_falcon_queue *queue, u32 id, |
775 | void *pmu_init_msg); | 775 | void *pmu_init_msg); |
776 | void *(*get_pmu_msg_pmu_init_msg_ptr)( | 776 | void *(*get_pmu_msg_pmu_init_msg_ptr)( |
777 | struct pmu_init_msg *init); | 777 | struct pmu_init_msg *init); |
@@ -1003,10 +1003,10 @@ struct gpu_ops { | |||
1003 | u32 (*pmu_get_queue_head_size)(void); | 1003 | u32 (*pmu_get_queue_head_size)(void); |
1004 | u32 (*pmu_get_queue_tail_size)(void); | 1004 | u32 (*pmu_get_queue_tail_size)(void); |
1005 | u32 (*pmu_get_queue_tail)(u32 i); | 1005 | u32 (*pmu_get_queue_tail)(u32 i); |
1006 | int (*pmu_queue_head)(struct nvgpu_pmu *pmu, | 1006 | int (*pmu_queue_head)(struct gk20a *g, |
1007 | struct pmu_queue *queue, u32 *head, bool set); | 1007 | struct nvgpu_falcon_queue *queue, u32 *head, bool set); |
1008 | int (*pmu_queue_tail)(struct nvgpu_pmu *pmu, | 1008 | int (*pmu_queue_tail)(struct gk20a *g, |
1009 | struct pmu_queue *queue, u32 *tail, bool set); | 1009 | struct nvgpu_falcon_queue *queue, u32 *tail, bool set); |
1010 | void (*pmu_msgq_tail)(struct nvgpu_pmu *pmu, | 1010 | void (*pmu_msgq_tail)(struct nvgpu_pmu *pmu, |
1011 | u32 *tail, bool set); | 1011 | u32 *tail, bool set); |
1012 | u32 (*pmu_mutex_size)(void); | 1012 | u32 (*pmu_mutex_size)(void); |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 11db5b23..bf4673bf 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -377,10 +377,9 @@ int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) | |||
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
379 | 379 | ||
380 | int gk20a_pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | 380 | int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, |
381 | u32 *head, bool set) | 381 | u32 *head, bool set) |
382 | { | 382 | { |
383 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
384 | u32 queue_head_size = 0; | 383 | u32 queue_head_size = 0; |
385 | 384 | ||
386 | if (g->ops.pmu.pmu_get_queue_head_size) | 385 | if (g->ops.pmu.pmu_get_queue_head_size) |
@@ -414,10 +413,9 @@ int gk20a_pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | |||
414 | return 0; | 413 | return 0; |
415 | } | 414 | } |
416 | 415 | ||
417 | int gk20a_pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | 416 | int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, |
418 | u32 *tail, bool set) | 417 | u32 *tail, bool set) |
419 | { | 418 | { |
420 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
421 | u32 queue_tail_size = 0; | 419 | u32 queue_tail_size = 0; |
422 | 420 | ||
423 | if (g->ops.pmu.pmu_get_queue_tail_size) | 421 | if (g->ops.pmu.pmu_get_queue_tail_size) |
@@ -692,7 +690,7 @@ bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu) | |||
692 | void gk20a_pmu_isr(struct gk20a *g) | 690 | void gk20a_pmu_isr(struct gk20a *g) |
693 | { | 691 | { |
694 | struct nvgpu_pmu *pmu = &g->pmu; | 692 | struct nvgpu_pmu *pmu = &g->pmu; |
695 | struct pmu_queue *queue; | 693 | struct nvgpu_falcon_queue *queue; |
696 | u32 intr, mask; | 694 | u32 intr, mask; |
697 | bool recheck = false; | 695 | bool recheck = false; |
698 | 696 | ||
@@ -749,9 +747,10 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
749 | 747 | ||
750 | if (recheck) { | 748 | if (recheck) { |
751 | queue = &pmu->queue[PMU_MESSAGE_QUEUE]; | 749 | queue = &pmu->queue[PMU_MESSAGE_QUEUE]; |
752 | if (!nvgpu_pmu_queue_is_empty(pmu, queue)) | 750 | if (!nvgpu_flcn_queue_is_empty(pmu->flcn, queue)) { |
753 | gk20a_writel(g, pwr_falcon_irqsset_r(), | 751 | gk20a_writel(g, pwr_falcon_irqsset_r(), |
754 | pwr_falcon_irqsset_swgen0_set_f()); | 752 | pwr_falcon_irqsset_swgen0_set_f()); |
753 | } | ||
755 | } | 754 | } |
756 | 755 | ||
757 | nvgpu_mutex_release(&pmu->isr_mutex); | 756 | nvgpu_mutex_release(&pmu->isr_mutex); |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index 27d27007..d9c53c28 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | |||
@@ -49,9 +49,9 @@ void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id); | |||
49 | int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); | 49 | int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); |
50 | int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); | 50 | int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); |
51 | 51 | ||
52 | int gk20a_pmu_queue_head(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | 52 | int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, |
53 | u32 *head, bool set); | 53 | u32 *head, bool set); |
54 | int gk20a_pmu_queue_tail(struct nvgpu_pmu *pmu, struct pmu_queue *queue, | 54 | int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, |
55 | u32 *tail, bool set); | 55 | u32 *tail, bool set); |
56 | void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set); | 56 | void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set); |
57 | 57 | ||