summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorDeepak Goyal <dgoyal@nvidia.com>2017-01-10 23:23:29 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-01-18 19:46:50 -0500
commita69fa0e96cb8ca253ec3468f288f410219129b9a (patch)
tree3b7b1e9e3cd6524013cbabf05130caf532064904 /drivers/gpu/nvgpu/gk20a
parent8e53d790902b8a40098a5851584ae7ba58b357b6 (diff)
nvgpu: pmu: Use ops to get PMU queue HEAD/TAIL.
pmu_queue_head() & pmu_queue_tail() are updated to use gops to include chip specific PMU queue head/tail registers. JIRA GV11B-30 Change-Id: I9c3d6a4601ba2767f9ada95642052044e2b79747 Signed-off-by: Deepak Goyal <dgoyal@nvidia.com> Reviewed-on: http://git-master/r/1283266 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h4
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c30
2 files changed, 25 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 6ca5855a..7df2c2e0 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -618,6 +618,10 @@ struct gpu_ops {
618 int (*pmu_setup_hw_and_bootstrap)(struct gk20a *g); 618 int (*pmu_setup_hw_and_bootstrap)(struct gk20a *g);
619 int (*pmu_nsbootstrap)(struct pmu_gk20a *pmu); 619 int (*pmu_nsbootstrap)(struct pmu_gk20a *pmu);
620 int (*pmu_setup_elpg)(struct gk20a *g); 620 int (*pmu_setup_elpg)(struct gk20a *g);
621 u32 (*pmu_get_queue_head)(u32 i);
622 u32 (*pmu_get_queue_head_size)(void);
623 u32 (*pmu_get_queue_tail_size)(void);
624 u32 (*pmu_get_queue_tail)(u32 i);
621 int (*init_wpr_region)(struct gk20a *g); 625 int (*init_wpr_region)(struct gk20a *g);
622 int (*load_lsfalcon_ucode)(struct gk20a *g, u32 falconidmask); 626 int (*load_lsfalcon_ucode)(struct gk20a *g, u32 falconidmask);
623 void (*write_dmatrfbase)(struct gk20a *g, u32 addr); 627 void (*write_dmatrfbase)(struct gk20a *g, u32 addr);
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index c9eb25fe..26ed3a49 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -2625,21 +2625,25 @@ static int pmu_queue_head(struct pmu_gk20a *pmu, struct pmu_queue *queue,
2625 u32 *head, bool set) 2625 u32 *head, bool set)
2626{ 2626{
2627 struct gk20a *g = gk20a_from_pmu(pmu); 2627 struct gk20a *g = gk20a_from_pmu(pmu);
2628 u32 queue_head_size = 0;
2628 2629
2629 BUG_ON(!head); 2630 if (g->ops.pmu.pmu_get_queue_head_size)
2631 queue_head_size = g->ops.pmu.pmu_get_queue_head_size();
2632
2633 BUG_ON(!head || !queue_head_size);
2630 2634
2631 if (PMU_IS_COMMAND_QUEUE(queue->id)) { 2635 if (PMU_IS_COMMAND_QUEUE(queue->id)) {
2632 2636
2633 if (queue->index >= pwr_pmu_queue_head__size_1_v()) 2637 if (queue->index >= queue_head_size)
2634 return -EINVAL; 2638 return -EINVAL;
2635 2639
2636 if (!set) 2640 if (!set)
2637 *head = pwr_pmu_queue_head_address_v( 2641 *head = pwr_pmu_queue_head_address_v(
2638 gk20a_readl(g, 2642 gk20a_readl(g,
2639 pwr_pmu_queue_head_r(queue->index))); 2643 g->ops.pmu.pmu_get_queue_head(queue->index)));
2640 else 2644 else
2641 gk20a_writel(g, 2645 gk20a_writel(g,
2642 pwr_pmu_queue_head_r(queue->index), 2646 g->ops.pmu.pmu_get_queue_head(queue->index),
2643 pwr_pmu_queue_head_address_f(*head)); 2647 pwr_pmu_queue_head_address_f(*head));
2644 } else { 2648 } else {
2645 if (!set) 2649 if (!set)
@@ -2658,21 +2662,25 @@ static int pmu_queue_tail(struct pmu_gk20a *pmu, struct pmu_queue *queue,
2658 u32 *tail, bool set) 2662 u32 *tail, bool set)
2659{ 2663{
2660 struct gk20a *g = gk20a_from_pmu(pmu); 2664 struct gk20a *g = gk20a_from_pmu(pmu);
2665 u32 queue_tail_size = 0;
2666
2667 if (g->ops.pmu.pmu_get_queue_tail_size)
2668 queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size();
2661 2669
2662 BUG_ON(!tail); 2670 BUG_ON(!tail || !queue_tail_size);
2663 2671
2664 if (PMU_IS_COMMAND_QUEUE(queue->id)) { 2672 if (PMU_IS_COMMAND_QUEUE(queue->id)) {
2665 2673
2666 if (queue->index >= pwr_pmu_queue_tail__size_1_v()) 2674 if (queue->index >= queue_tail_size)
2667 return -EINVAL; 2675 return -EINVAL;
2668 2676
2669 if (!set) 2677 if (!set)
2670 *tail = pwr_pmu_queue_tail_address_v( 2678 *tail = pwr_pmu_queue_tail_address_v(
2671 gk20a_readl(g, 2679 gk20a_readl(g,
2672 pwr_pmu_queue_tail_r(queue->index))); 2680 g->ops.pmu.pmu_get_queue_tail(queue->index)));
2673 else 2681 else
2674 gk20a_writel(g, 2682 gk20a_writel(g,
2675 pwr_pmu_queue_tail_r(queue->index), 2683 g->ops.pmu.pmu_get_queue_tail(queue->index),
2676 pwr_pmu_queue_tail_address_f(*tail)); 2684 pwr_pmu_queue_tail_address_f(*tail));
2677 2685
2678 } else { 2686 } else {
@@ -3445,6 +3453,10 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
3445 gops->pmu.prepare_ucode = gk20a_prepare_ucode; 3453 gops->pmu.prepare_ucode = gk20a_prepare_ucode;
3446 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1; 3454 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
3447 gops->pmu.pmu_nsbootstrap = pmu_bootstrap; 3455 gops->pmu.pmu_nsbootstrap = pmu_bootstrap;
3456 gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r;
3457 gops->pmu.pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v;
3458 gops->pmu.pmu_get_queue_tail = pwr_pmu_queue_tail_r;
3459 gops->pmu.pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v;
3448 gops->pmu.pmu_setup_elpg = NULL; 3460 gops->pmu.pmu_setup_elpg = NULL;
3449 gops->pmu.init_wpr_region = NULL; 3461 gops->pmu.init_wpr_region = NULL;
3450 gops->pmu.load_lsfalcon_ucode = NULL; 3462 gops->pmu.load_lsfalcon_ucode = NULL;