summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-10-27 05:06:59 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:54 -0400
commit2d5ff668cbc6a932df2c9cf79627d1d340e5c2c0 (patch)
tree1d9bc4b774a9c2cea339891eaef3af5b87ee354d /drivers/gpu/nvgpu/gk20a
parent23a182aaa61d120c965f1bce09609cc14d4e14eb (diff)
gpu: nvgpu: GR and LTC HAL to use const structs
Convert GR and LTC HALs to use const structs, and initialize them with macros. Bug 1567274 Change-Id: Ia3f24a5eccb27578d9cba69755f636818d11275c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/590371
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/cde_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c13
-rw-r--r--drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c3
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h114
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c5
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c174
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_ops_gk20a.h62
-rw-r--r--drivers/gpu/nvgpu/gk20a/hal.c10
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_common.c6
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_gk20a.c31
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c4
15 files changed, 228 insertions, 220 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
index 7931b83f..22a422a3 100644
--- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c
@@ -50,7 +50,7 @@ static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx)
50 } 50 }
51 51
52 for (i = 0; i < cde_ctx->num_obj_ids; i++) 52 for (i = 0; i < cde_ctx->num_obj_ids; i++)
53 gk20a_free_obj_ctx(cde_ctx->ch, 53 gr_gk20a_free_obj_ctx(cde_ctx->ch,
54 &(struct nvgpu_free_obj_ctx_args) 54 &(struct nvgpu_free_obj_ctx_args)
55 { cde_ctx->obj_ids[i] }); 55 { cde_ctx->obj_ids[i] });
56 56
@@ -401,7 +401,7 @@ static int gk20a_init_cde_required_class(struct gk20a_cde_ctx *cde_ctx,
401 alloc_obj_ctx.class_num = required_class; 401 alloc_obj_ctx.class_num = required_class;
402 alloc_obj_ctx.padding = 0; 402 alloc_obj_ctx.padding = 0;
403 403
404 err = gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx); 404 err = gr_gk20a_alloc_obj_ctx(cde_ctx->ch, &alloc_obj_ctx);
405 if (err) { 405 if (err) {
406 gk20a_warn(&cde_ctx->pdev->dev, "cde: failed to allocate ctx. err=%d", 406 gk20a_warn(&cde_ctx->pdev->dev, "cde: failed to allocate ctx. err=%d",
407 err); 407 err);
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 9f2e0017..777d7ca9 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -28,6 +28,7 @@
28 28
29#include "debug_gk20a.h" 29#include "debug_gk20a.h"
30 30
31#include "gr_ops.h"
31#include "gk20a.h" 32#include "gk20a.h"
32#include "dbg_gpu_gk20a.h" 33#include "dbg_gpu_gk20a.h"
33#include "fence_gk20a.h" 34#include "fence_gk20a.h"
@@ -638,7 +639,7 @@ void gk20a_free_channel(struct channel_gk20a *ch, bool finish)
638 gk20a_free_error_notifiers(ch); 639 gk20a_free_error_notifiers(ch);
639 640
640 /* release channel ctx */ 641 /* release channel ctx */
641 g->ops.gr.free_channel_ctx(ch); 642 g->ops.gr->free_channel_ctx(ch);
642 643
643 gk20a_gr_flush_channel_tlb(gr); 644 gk20a_gr_flush_channel_tlb(gr);
644 645
@@ -1558,8 +1559,8 @@ int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
1558 1559
1559#ifdef CONFIG_DEBUG_FS 1560#ifdef CONFIG_DEBUG_FS
1560 /* update debug settings */ 1561 /* update debug settings */
1561 if (g->ops.ltc.sync_debugfs) 1562 if (g->ops.ltc->sync_debugfs)
1562 g->ops.ltc.sync_debugfs(g); 1563 g->ops.ltc->sync_debugfs(g);
1563#endif 1564#endif
1564 1565
1565 gk20a_dbg_info("channel %d", c->hw_chid); 1566 gk20a_dbg_info("channel %d", c->hw_chid);
@@ -2080,7 +2081,7 @@ static int gk20a_channel_zcull_bind(struct channel_gk20a *ch,
2080 2081
2081 gk20a_dbg_fn(""); 2082 gk20a_dbg_fn("");
2082 2083
2083 return g->ops.gr.bind_ctxsw_zcull(g, gr, ch, 2084 return g->ops.gr->bind_ctxsw_zcull(g, gr, ch,
2084 args->gpu_va, args->mode); 2085 args->gpu_va, args->mode);
2085} 2086}
2086 2087
@@ -2300,7 +2301,7 @@ long gk20a_channel_ioctl(struct file *filp,
2300 __func__, cmd); 2301 __func__, cmd);
2301 return err; 2302 return err;
2302 } 2303 }
2303 err = ch->g->ops.gr.alloc_obj_ctx(ch, 2304 err = ch->g->ops.gr->alloc_obj_ctx(ch,
2304 (struct nvgpu_alloc_obj_ctx_args *)buf); 2305 (struct nvgpu_alloc_obj_ctx_args *)buf);
2305 gk20a_idle(dev); 2306 gk20a_idle(dev);
2306 break; 2307 break;
@@ -2312,7 +2313,7 @@ long gk20a_channel_ioctl(struct file *filp,
2312 __func__, cmd); 2313 __func__, cmd);
2313 return err; 2314 return err;
2314 } 2315 }
2315 err = ch->g->ops.gr.free_obj_ctx(ch, 2316 err = ch->g->ops.gr->free_obj_ctx(ch,
2316 (struct nvgpu_free_obj_ctx_args *)buf); 2317 (struct nvgpu_free_obj_ctx_args *)buf);
2317 gk20a_idle(dev); 2318 gk20a_idle(dev);
2318 break; 2319 break;
diff --git a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
index 93831844..aae77647 100644
--- a/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ctrl_gk20a.c
@@ -21,6 +21,7 @@
21#include <linux/nvgpu.h> 21#include <linux/nvgpu.h>
22#include <uapi/linux/nvgpu.h> 22#include <uapi/linux/nvgpu.h>
23 23
24#include "gr_ops.h"
24#include "gk20a.h" 25#include "gk20a.h"
25#include "fence_gk20a.h" 26#include "fence_gk20a.h"
26 27
@@ -274,7 +275,7 @@ long gk20a_ctrl_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg
274 if (zcull_info == NULL) 275 if (zcull_info == NULL)
275 return -ENOMEM; 276 return -ENOMEM;
276 277
277 err = g->ops.gr.get_zcull_info(g, &g->gr, zcull_info); 278 err = g->ops.gr->get_zcull_info(g, &g->gr, zcull_info);
278 if (err) { 279 if (err) {
279 kfree(zcull_info); 280 kfree(zcull_info);
280 break; 281 break;
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index ed730174..347765aa 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1265,8 +1265,8 @@ void gk20a_fifo_recover(struct gk20a *g, u32 __engine_ids,
1265 if (verbose) 1265 if (verbose)
1266 gk20a_debug_dump(g->dev); 1266 gk20a_debug_dump(g->dev);
1267 1267
1268 if (g->ops.ltc.flush) 1268 if (g->ops.ltc->flush)
1269 g->ops.ltc.flush(g); 1269 g->ops.ltc->flush(g);
1270 1270
1271 /* store faulted engines in advance */ 1271 /* store faulted engines in advance */
1272 g->fifo.mmu_fault_engines = 0; 1272 g->fifo.mmu_fault_engines = 0;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c
index 85864bcd..226b5ae3 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.c
@@ -612,7 +612,7 @@ static irqreturn_t gk20a_intr_thread_stall(int irq, void *dev_id)
612 if (mc_intr_0 & mc_intr_0_priv_ring_pending_f()) 612 if (mc_intr_0 & mc_intr_0_priv_ring_pending_f())
613 gk20a_priv_ring_isr(g); 613 gk20a_priv_ring_isr(g);
614 if (mc_intr_0 & mc_intr_0_ltc_pending_f()) 614 if (mc_intr_0 & mc_intr_0_ltc_pending_f())
615 g->ops.ltc.isr(g); 615 g->ops.ltc->isr(g);
616 if (mc_intr_0 & mc_intr_0_pbus_pending_f()) 616 if (mc_intr_0 & mc_intr_0_pbus_pending_f())
617 gk20a_pbus_isr(g); 617 gk20a_pbus_isr(g);
618 618
@@ -880,8 +880,8 @@ static int gk20a_pm_finalize_poweron(struct device *dev)
880 goto done; 880 goto done;
881 } 881 }
882 882
883 if (g->ops.ltc.init_fs_state) 883 if (g->ops.ltc->init_fs_state)
884 g->ops.ltc.init_fs_state(g); 884 g->ops.ltc->init_fs_state(g);
885 885
886 err = gk20a_init_mm_support(g); 886 err = gk20a_init_mm_support(g);
887 if (err) { 887 if (err) {
@@ -1817,7 +1817,7 @@ int gk20a_init_gpu_characteristics(struct gk20a *g)
1817{ 1817{
1818 struct nvgpu_gpu_characteristics *gpu = &g->gpu_characteristics; 1818 struct nvgpu_gpu_characteristics *gpu = &g->gpu_characteristics;
1819 1819
1820 gpu->L2_cache_size = g->ops.ltc.determine_L2_size_bytes(g); 1820 gpu->L2_cache_size = g->ops.ltc->determine_L2_size_bytes(g);
1821 gpu->on_board_video_memory_size = 0; /* integrated GPU */ 1821 gpu->on_board_video_memory_size = 0; /* integrated GPU */
1822 1822
1823 gpu->num_gpc = g->gr.gpc_count; 1823 gpu->num_gpc = g->gr.gpc_count;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 5669e1c5..2c3fb400 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -33,7 +33,7 @@ struct acr_gm20b;
33#include <linux/tegra-soc.h> 33#include <linux/tegra-soc.h>
34 34
35#include "../../../arch/arm/mach-tegra/iomap.h" 35#include "../../../arch/arm/mach-tegra/iomap.h"
36 36#include "nvgpu_gpuid.h"
37#include "as_gk20a.h" 37#include "as_gk20a.h"
38#include "clk_gk20a.h" 38#include "clk_gk20a.h"
39#include "fifo_gk20a.h" 39#include "fifo_gk20a.h"
@@ -60,81 +60,33 @@ enum gk20a_cbc_op {
60 gk20a_cbc_op_invalidate, 60 gk20a_cbc_op_invalidate,
61}; 61};
62 62
63struct gpu_ltc_ops {
64 int (*determine_L2_size_bytes)(struct gk20a *gk20a);
65 void (*set_max_ways_evict_last)(struct gk20a *g, u32 max_ways);
66 int (*init_comptags)(struct gk20a *g, struct gr_gk20a *gr);
67 int (*cbc_ctrl)(struct gk20a *g, enum gk20a_cbc_op op,
68 u32 min, u32 max);
69 void (*set_zbc_color_entry)(struct gk20a *g,
70 struct zbc_entry *color_val,
71 u32 index);
72 void (*set_zbc_depth_entry)(struct gk20a *g,
73 struct zbc_entry *depth_val,
74 u32 index);
75 void (*init_cbc)(struct gk20a *g, struct gr_gk20a *gr);
76 void (*sync_debugfs)(struct gk20a *g);
77 void (*init_fs_state)(struct gk20a *g);
78 void (*elpg_flush)(struct gk20a *g);
79 void (*isr)(struct gk20a *g);
80 u32 (*cbc_fix_config)(struct gk20a *g, int base);
81 void (*flush)(struct gk20a *g);
82};
83
84struct gpu_ltc_ops;
85struct gpu_gr_ops;
86
63struct gpu_ops { 87struct gpu_ops {
64 struct { 88 const struct gpu_ltc_ops *ltc;
65 int (*determine_L2_size_bytes)(struct gk20a *gk20a); 89 const struct gpu_gr_ops *gr;
66 void (*set_max_ways_evict_last)(struct gk20a *g, u32 max_ways);
67 int (*init_comptags)(struct gk20a *g, struct gr_gk20a *gr);
68 int (*cbc_ctrl)(struct gk20a *g, enum gk20a_cbc_op op,
69 u32 min, u32 max);
70 void (*set_zbc_color_entry)(struct gk20a *g,
71 struct zbc_entry *color_val,
72 u32 index);
73 void (*set_zbc_depth_entry)(struct gk20a *g,
74 struct zbc_entry *depth_val,
75 u32 index);
76 void (*init_cbc)(struct gk20a *g, struct gr_gk20a *gr);
77 void (*sync_debugfs)(struct gk20a *g);
78 void (*init_fs_state)(struct gk20a *g);
79 void (*elpg_flush)(struct gk20a *g);
80 void (*isr)(struct gk20a *g);
81 u32 (*cbc_fix_config)(struct gk20a *g, int base);
82 void (*flush)(struct gk20a *g);
83 } ltc;
84 struct {
85 int (*init_fs_state)(struct gk20a *g);
86 void (*access_smpc_reg)(struct gk20a *g, u32 quad, u32 offset);
87 void (*bundle_cb_defaults)(struct gk20a *g);
88 void (*cb_size_default)(struct gk20a *g);
89 int (*calc_global_ctx_buffer_size)(struct gk20a *g);
90 void (*commit_global_attrib_cb)(struct gk20a *g,
91 struct channel_ctx_gk20a *ch_ctx,
92 u64 addr, bool patch);
93 void (*commit_global_bundle_cb)(struct gk20a *g,
94 struct channel_ctx_gk20a *ch_ctx,
95 u64 addr, u64 size, bool patch);
96 int (*commit_global_cb_manager)(struct gk20a *g,
97 struct channel_gk20a *ch,
98 bool patch);
99 void (*commit_global_pagepool)(struct gk20a *g,
100 struct channel_ctx_gk20a *ch_ctx,
101 u64 addr, u32 size, bool patch);
102 void (*init_gpc_mmu)(struct gk20a *g);
103 int (*handle_sw_method)(struct gk20a *g, u32 addr,
104 u32 class_num, u32 offset, u32 data);
105 void (*set_alpha_circular_buffer_size)(struct gk20a *g,
106 u32 data);
107 void (*set_circular_buffer_size)(struct gk20a *g, u32 data);
108 void (*enable_hww_exceptions)(struct gk20a *g);
109 bool (*is_valid_class)(struct gk20a *g, u32 class_num);
110 void (*get_sm_dsm_perf_regs)(struct gk20a *g,
111 u32 *num_sm_dsm_perf_regs,
112 u32 **sm_dsm_perf_regs,
113 u32 *perf_register_stride);
114 void (*get_sm_dsm_perf_ctrl_regs)(struct gk20a *g,
115 u32 *num_sm_dsm_perf_regs,
116 u32 **sm_dsm_perf_regs,
117 u32 *perf_register_stride);
118 void (*set_hww_esr_report_mask)(struct gk20a *g);
119 int (*setup_alpha_beta_tables)(struct gk20a *g,
120 struct gr_gk20a *gr);
121 int (*falcon_load_ucode)(struct gk20a *g,
122 u64 addr_base,
123 struct gk20a_ctxsw_ucode_segments *segments,
124 u32 reg_offset);
125 int (*load_ctxsw_ucode)(struct gk20a *g);
126 u32 (*get_gpc_tpc_mask)(struct gk20a *g, u32 gpc_index);
127 void (*free_channel_ctx)(struct channel_gk20a *c);
128 int (*alloc_obj_ctx)(struct channel_gk20a *c,
129 struct nvgpu_alloc_obj_ctx_args *args);
130 int (*free_obj_ctx)(struct channel_gk20a *c,
131 struct nvgpu_free_obj_ctx_args *args);
132 int (*bind_ctxsw_zcull)(struct gk20a *g, struct gr_gk20a *gr,
133 struct channel_gk20a *c, u64 zcull_va,
134 u32 mode);
135 int (*get_zcull_info)(struct gk20a *g, struct gr_gk20a *gr,
136 struct gr_zcull_info *zcull_params);
137 } gr;
138 const char *name; 90 const char *name;
139 struct { 91 struct {
140 void (*init_fs_state)(struct gk20a *g); 92 void (*init_fs_state)(struct gk20a *g);
@@ -720,18 +672,6 @@ int __gk20a_do_unidle(struct platform_device *pdev);
720const struct firmware * 672const struct firmware *
721gk20a_request_firmware(struct gk20a *g, const char *fw_name); 673gk20a_request_firmware(struct gk20a *g, const char *fw_name);
722 674
723#define NVGPU_GPU_ARCHITECTURE_SHIFT 4
724
725/* constructs unique and compact GPUID from nvgpu_gpu_characteristics
726 * arch/impl fields */
727#define GK20A_GPUID(arch, impl) ((u32) ((arch) | (impl)))
728
729#define GK20A_GPUID_GK20A \
730 GK20A_GPUID(NVGPU_GPU_ARCH_GK100, NVGPU_GPU_IMPL_GK20A)
731
732#define GK20A_GPUID_GM20B \
733 GK20A_GPUID(NVGPU_GPU_ARCH_GM200, NVGPU_GPU_IMPL_GM20B)
734
735int gk20a_init_gpu_characteristics(struct gk20a *g); 675int gk20a_init_gpu_characteristics(struct gk20a *g);
736 676
737int gk20a_user_init(struct platform_device *dev); 677int gk20a_user_init(struct platform_device *dev);
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
index 52a34086..2abb0e9d 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
@@ -26,6 +26,7 @@
26 26
27#include <mach/clk.h> 27#include <mach/clk.h>
28 28
29#include "gr_ops.h"
29#include "gk20a.h" 30#include "gk20a.h"
30#include "gr_gk20a.h" 31#include "gr_gk20a.h"
31#include "fifo_gk20a.h" 32#include "fifo_gk20a.h"
@@ -623,9 +624,9 @@ static ssize_t tpc_fs_mask_read(struct device *device,
623 u32 tpc_fs_mask = 0; 624 u32 tpc_fs_mask = 0;
624 625
625 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { 626 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
626 if (g->ops.gr.get_gpc_tpc_mask) 627 if (g->ops.gr->get_gpc_tpc_mask)
627 tpc_fs_mask |= 628 tpc_fs_mask |=
628 g->ops.gr.get_gpc_tpc_mask(g, gpc_index) << 629 g->ops.gr->get_gpc_tpc_mask(g, gpc_index) <<
629 (gr->max_tpc_per_gpc_count * gpc_index); 630 (gr->max_tpc_per_gpc_count * gpc_index);
630 } 631 }
631 632
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 524547e7..dddec803 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -27,6 +27,7 @@
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <linux/nvhost.h> 28#include <linux/nvhost.h>
29 29
30#include "gr_ops.h"
30#include "gk20a.h" 31#include "gk20a.h"
31#include "kind_gk20a.h" 32#include "kind_gk20a.h"
32#include "gr_ctx_gk20a.h" 33#include "gr_ctx_gk20a.h"
@@ -65,11 +66,6 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
65 struct channel_gk20a *c); 66 struct channel_gk20a *c);
66static void gr_gk20a_unmap_global_ctx_buffers(struct channel_gk20a *c); 67static void gr_gk20a_unmap_global_ctx_buffers(struct channel_gk20a *c);
67 68
68/* channel gr ctx buffer */
69static int gr_gk20a_alloc_channel_gr_ctx(struct gk20a *g,
70 struct channel_gk20a *c);
71static void gr_gk20a_free_channel_gr_ctx(struct channel_gk20a *c);
72
73/* channel patch ctx buffer */ 69/* channel patch ctx buffer */
74static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, 70static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
75 struct channel_gk20a *c); 71 struct channel_gk20a *c);
@@ -751,7 +747,7 @@ clean_up:
751 return ret; 747 return ret;
752} 748}
753 749
754static int gr_gk20a_commit_global_cb_manager(struct gk20a *g, 750int gr_gk20a_commit_global_cb_manager(struct gk20a *g,
755 struct channel_gk20a *c, bool patch) 751 struct channel_gk20a *c, bool patch)
756{ 752{
757 struct gr_gk20a *gr = &g->gr; 753 struct gr_gk20a *gr = &g->gr;
@@ -856,7 +852,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
856 gk20a_dbg_info("pagepool buffer addr : 0x%016llx, size : %d", 852 gk20a_dbg_info("pagepool buffer addr : 0x%016llx, size : %d",
857 addr, size); 853 addr, size);
858 854
859 g->ops.gr.commit_global_pagepool(g, ch_ctx, addr, size, patch); 855 g->ops.gr->commit_global_pagepool(g, ch_ctx, addr, size, patch);
860 856
861 /* global bundle cb */ 857 /* global bundle cb */
862 addr = (u64_lo32(ch_ctx->global_ctx_buffer_va[CIRCULAR_VA]) >> 858 addr = (u64_lo32(ch_ctx->global_ctx_buffer_va[CIRCULAR_VA]) >>
@@ -869,7 +865,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
869 gk20a_dbg_info("bundle cb addr : 0x%016llx, size : %d", 865 gk20a_dbg_info("bundle cb addr : 0x%016llx, size : %d",
870 addr, size); 866 addr, size);
871 867
872 g->ops.gr.commit_global_bundle_cb(g, ch_ctx, addr, size, patch); 868 g->ops.gr->commit_global_bundle_cb(g, ch_ctx, addr, size, patch);
873 869
874 /* global attrib cb */ 870 /* global attrib cb */
875 addr = (u64_lo32(ch_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) >> 871 addr = (u64_lo32(ch_ctx->global_ctx_buffer_va[ATTRIBUTE_VA]) >>
@@ -878,7 +874,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
878 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v())); 874 (32 - gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v()));
879 875
880 gk20a_dbg_info("attrib cb addr : 0x%016llx", addr); 876 gk20a_dbg_info("attrib cb addr : 0x%016llx", addr);
881 g->ops.gr.commit_global_attrib_cb(g, ch_ctx, addr, patch); 877 g->ops.gr->commit_global_attrib_cb(g, ch_ctx, addr, patch);
882 878
883 if (patch) 879 if (patch)
884 gr_gk20a_ctx_patch_write_end(g, ch_ctx); 880 gr_gk20a_ctx_patch_write_end(g, ch_ctx);
@@ -886,7 +882,7 @@ static int gr_gk20a_commit_global_ctx_buffers(struct gk20a *g,
886 return 0; 882 return 0;
887} 883}
888 884
889static void gr_gk20a_commit_global_attrib_cb(struct gk20a *g, 885void gr_gk20a_commit_global_attrib_cb(struct gk20a *g,
890 struct channel_ctx_gk20a *ch_ctx, 886 struct channel_ctx_gk20a *ch_ctx,
891 u64 addr, bool patch) 887 u64 addr, bool patch)
892{ 888{
@@ -899,7 +895,7 @@ static void gr_gk20a_commit_global_attrib_cb(struct gk20a *g,
899 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(), patch); 895 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(), patch);
900} 896}
901 897
902static void gr_gk20a_commit_global_bundle_cb(struct gk20a *g, 898void gr_gk20a_commit_global_bundle_cb(struct gk20a *g,
903 struct channel_ctx_gk20a *ch_ctx, 899 struct channel_ctx_gk20a *ch_ctx,
904 u64 addr, u64 size, bool patch) 900 u64 addr, u64 size, bool patch)
905{ 901{
@@ -1153,7 +1149,7 @@ static inline u32 clear_count_bits(u32 num, u32 clear_count)
1153 return num; 1149 return num;
1154} 1150}
1155 1151
1156static int gr_gk20a_setup_alpha_beta_tables(struct gk20a *g, 1152int gr_gk20a_setup_alpha_beta_tables(struct gk20a *g,
1157 struct gr_gk20a *gr) 1153 struct gr_gk20a *gr)
1158{ 1154{
1159 u32 table_index_bits = 5; 1155 u32 table_index_bits = 5;
@@ -1257,7 +1253,7 @@ static u32 gr_gk20a_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
1257 return 0x1; 1253 return 0x1;
1258} 1254}
1259 1255
1260static int gr_gk20a_ctx_state_floorsweep(struct gk20a *g) 1256int gr_gk20a_init_fs_state(struct gk20a *g)
1261{ 1257{
1262 struct gr_gk20a *gr = &g->gr; 1258 struct gr_gk20a *gr = &g->gr;
1263 u32 tpc_index, gpc_index; 1259 u32 tpc_index, gpc_index;
@@ -1319,14 +1315,14 @@ static int gr_gk20a_ctx_state_floorsweep(struct gk20a *g)
1319 1315
1320 /* gr__setup_pd_mapping stubbed for gk20a */ 1316 /* gr__setup_pd_mapping stubbed for gk20a */
1321 gr_gk20a_setup_rop_mapping(g, gr); 1317 gr_gk20a_setup_rop_mapping(g, gr);
1322 if (g->ops.gr.setup_alpha_beta_tables) 1318 if (g->ops.gr->setup_alpha_beta_tables)
1323 g->ops.gr.setup_alpha_beta_tables(g, gr); 1319 g->ops.gr->setup_alpha_beta_tables(g, gr);
1324 1320
1325 if (gr->num_fbps == 1) 1321 if (gr->num_fbps == 1)
1326 max_ways_evict = 9; 1322 max_ways_evict = 9;
1327 1323
1328 if (max_ways_evict != INVALID_MAX_WAYS) 1324 if (max_ways_evict != INVALID_MAX_WAYS)
1329 g->ops.ltc.set_max_ways_evict_last(g, max_ways_evict); 1325 g->ops.ltc->set_max_ways_evict_last(g, max_ways_evict);
1330 1326
1331 for (gpc_index = 0; 1327 for (gpc_index = 0;
1332 gpc_index < gr_pd_dist_skip_table__size_1_v() * 4; 1328 gpc_index < gr_pd_dist_skip_table__size_1_v() * 4;
@@ -2097,7 +2093,7 @@ void gr_gk20a_load_ctxsw_ucode_boot(struct gk20a *g, u64 addr_base,
2097 gr_fecs_bootvec_vec_f(segments->boot_entry)); 2093 gr_fecs_bootvec_vec_f(segments->boot_entry));
2098} 2094}
2099 2095
2100int gr_gk20a_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base, 2096int gr_gk20a_falcon_load_ucode(struct gk20a *g, u64 addr_base,
2101 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset) 2097 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset)
2102{ 2098{
2103 gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(), 2099 gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(),
@@ -2123,10 +2119,10 @@ static void gr_gk20a_load_falcon_with_bootloader(struct gk20a *g)
2123 2119
2124 gr_gk20a_load_falcon_bind_instblk(g); 2120 gr_gk20a_load_falcon_bind_instblk(g);
2125 2121
2126 g->ops.gr.falcon_load_ucode(g, addr_base, 2122 g->ops.gr->falcon_load_ucode(g, addr_base,
2127 &g->ctxsw_ucode_info.fecs, 0); 2123 &g->ctxsw_ucode_info.fecs, 0);
2128 2124
2129 g->ops.gr.falcon_load_ucode(g, addr_base, 2125 g->ops.gr->falcon_load_ucode(g, addr_base,
2130 &g->ctxsw_ucode_info.gpccs, 2126 &g->ctxsw_ucode_info.gpccs,
2131 gr_gpcs_gpccs_falcon_hwcfg_r() - 2127 gr_gpcs_gpccs_falcon_hwcfg_r() -
2132 gr_fecs_falcon_hwcfg_r()); 2128 gr_fecs_falcon_hwcfg_r());
@@ -2297,7 +2293,7 @@ static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
2297 2293
2298 gk20a_dbg_fn(""); 2294 gk20a_dbg_fn("");
2299 2295
2300 attr_buffer_size = g->ops.gr.calc_global_ctx_buffer_size(g); 2296 attr_buffer_size = g->ops.gr->calc_global_ctx_buffer_size(g);
2301 2297
2302 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size); 2298 gk20a_dbg_info("cb_buffer_size : %d", cb_buffer_size);
2303 2299
@@ -2632,7 +2628,7 @@ void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *tsg)
2632 __gr_gk20a_free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx); 2628 __gr_gk20a_free_gr_ctx(tsg->g, tsg->vm, tsg->tsg_gr_ctx);
2633} 2629}
2634 2630
2635static void gr_gk20a_free_channel_gr_ctx(struct channel_gk20a *c) 2631void gr_gk20a_free_channel_gr_ctx(struct channel_gk20a *c)
2636{ 2632{
2637 __gr_gk20a_free_gr_ctx(c->g, c->vm, c->ch_ctx.gr_ctx); 2633 __gr_gk20a_free_gr_ctx(c->g, c->vm, c->ch_ctx.gr_ctx);
2638} 2634}
@@ -2719,7 +2715,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c)
2719 } 2715 }
2720} 2716}
2721 2717
2722void gk20a_free_channel_ctx(struct channel_gk20a *c) 2718void gr_gk20a_free_channel_ctx(struct channel_gk20a *c)
2723{ 2719{
2724 gr_gk20a_unmap_global_ctx_buffers(c); 2720 gr_gk20a_unmap_global_ctx_buffers(c);
2725 gr_gk20a_free_channel_patch_ctx(c); 2721 gr_gk20a_free_channel_patch_ctx(c);
@@ -2734,7 +2730,7 @@ void gk20a_free_channel_ctx(struct channel_gk20a *c)
2734 c->first_init = false; 2730 c->first_init = false;
2735} 2731}
2736 2732
2737static bool gr_gk20a_is_valid_class(struct gk20a *g, u32 class_num) 2733bool gr_gk20a_is_valid_class(struct gk20a *g, u32 class_num)
2738{ 2734{
2739 bool valid = false; 2735 bool valid = false;
2740 2736
@@ -2753,7 +2749,7 @@ static bool gr_gk20a_is_valid_class(struct gk20a *g, u32 class_num)
2753 return valid; 2749 return valid;
2754} 2750}
2755 2751
2756int gk20a_alloc_obj_ctx(struct channel_gk20a *c, 2752int gr_gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2757 struct nvgpu_alloc_obj_ctx_args *args) 2753 struct nvgpu_alloc_obj_ctx_args *args)
2758{ 2754{
2759 struct gk20a *g = c->g; 2755 struct gk20a *g = c->g;
@@ -2772,7 +2768,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
2772 return -EINVAL; 2768 return -EINVAL;
2773 } 2769 }
2774 2770
2775 if (!g->ops.gr.is_valid_class(g, args->class_num)) { 2771 if (!g->ops.gr->is_valid_class(g, args->class_num)) {
2776 gk20a_err(dev_from_gk20a(g), 2772 gk20a_err(dev_from_gk20a(g),
2777 "invalid obj class 0x%x", args->class_num); 2773 "invalid obj class 0x%x", args->class_num);
2778 err = -EINVAL; 2774 err = -EINVAL;
@@ -2916,7 +2912,7 @@ out:
2916 return err; 2912 return err;
2917} 2913}
2918 2914
2919int gk20a_free_obj_ctx(struct channel_gk20a *c, 2915int gr_gk20a_free_obj_ctx(struct channel_gk20a *c,
2920 struct nvgpu_free_obj_ctx_args *args) 2916 struct nvgpu_free_obj_ctx_args *args)
2921{ 2917{
2922 unsigned long timeout = gk20a_get_gr_idle_timeout(c->g); 2918 unsigned long timeout = gk20a_get_gr_idle_timeout(c->g);
@@ -3008,7 +3004,7 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr)
3008 gk20a_allocator_destroy(&gr->comp_tags); 3004 gk20a_allocator_destroy(&gr->comp_tags);
3009} 3005}
3010 3006
3011static void gr_gk20a_bundle_cb_defaults(struct gk20a *g) 3007void gr_gk20a_bundle_cb_defaults(struct gk20a *g)
3012{ 3008{
3013 struct gr_gk20a *gr = &g->gr; 3009 struct gr_gk20a *gr = &g->gr;
3014 3010
@@ -3089,9 +3085,9 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3089 gr->gpc_ppc_count[gpc_index] = gr->pe_count_per_gpc; 3085 gr->gpc_ppc_count[gpc_index] = gr->pe_count_per_gpc;
3090 gr->ppc_count += gr->gpc_ppc_count[gpc_index]; 3086 gr->ppc_count += gr->gpc_ppc_count[gpc_index];
3091 3087
3092 if (g->ops.gr.get_gpc_tpc_mask) 3088 if (g->ops.gr->get_gpc_tpc_mask)
3093 gr->gpc_tpc_mask[gpc_index] = 3089 gr->gpc_tpc_mask[gpc_index] =
3094 g->ops.gr.get_gpc_tpc_mask(g, gpc_index); 3090 g->ops.gr->get_gpc_tpc_mask(g, gpc_index);
3095 3091
3096 for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) { 3092 for (pes_index = 0; pes_index < gr->pe_count_per_gpc; pes_index++) {
3097 3093
@@ -3174,9 +3170,9 @@ static int gr_gk20a_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
3174 pes_index, gpc_index, 3170 pes_index, gpc_index,
3175 gr->pes_tpc_mask[pes_index][gpc_index]); 3171 gr->pes_tpc_mask[pes_index][gpc_index]);
3176 3172
3177 g->ops.gr.bundle_cb_defaults(g); 3173 g->ops.gr->bundle_cb_defaults(g);
3178 g->ops.gr.cb_size_default(g); 3174 g->ops.gr->cb_size_default(g);
3179 g->ops.gr.calc_global_ctx_buffer_size(g); 3175 g->ops.gr->calc_global_ctx_buffer_size(g);
3180 gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v(); 3176 gr->timeslice_mode = gr_gpcs_ppcs_cbm_cfg_timeslice_mode_enable_v();
3181 3177
3182 gk20a_dbg_info("bundle_cb_default_size: %d", 3178 gk20a_dbg_info("bundle_cb_default_size: %d",
@@ -3506,7 +3502,7 @@ static int gr_gk20a_add_zbc_color(struct gk20a *g, struct gr_gk20a *gr,
3506 } 3502 }
3507 3503
3508 /* update l2 table */ 3504 /* update l2 table */
3509 g->ops.ltc.set_zbc_color_entry(g, color_val, index); 3505 g->ops.ltc->set_zbc_color_entry(g, color_val, index);
3510 3506
3511 /* update ds table */ 3507 /* update ds table */
3512 gk20a_writel(g, gr_ds_zbc_color_r_r(), 3508 gk20a_writel(g, gr_ds_zbc_color_r_r(),
@@ -3572,7 +3568,7 @@ static int gr_gk20a_add_zbc_depth(struct gk20a *g, struct gr_gk20a *gr,
3572 } 3568 }
3573 3569
3574 /* update l2 table */ 3570 /* update l2 table */
3575 g->ops.ltc.set_zbc_depth_entry(g, depth_val, index); 3571 g->ops.ltc->set_zbc_depth_entry(g, depth_val, index);
3576 3572
3577 /* update ds table */ 3573 /* update ds table */
3578 gk20a_writel(g, gr_ds_zbc_z_r(), 3574 gk20a_writel(g, gr_ds_zbc_z_r(),
@@ -4172,7 +4168,7 @@ void gr_gk20a_enable_hww_exceptions(struct gk20a *g)
4172 gr_ds_hww_report_mask_sph23_err_report_f()); 4168 gr_ds_hww_report_mask_sph23_err_report_f());
4173} 4169}
4174 4170
4175static void gr_gk20a_set_hww_esr_report_mask(struct gk20a *g) 4171void gr_gk20a_set_hww_esr_report_mask(struct gk20a *g)
4176{ 4172{
4177 /* setup sm warp esr report masks */ 4173 /* setup sm warp esr report masks */
4178 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(), 4174 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(),
@@ -4240,8 +4236,8 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4240 fb_mmu_debug_rd_vol_false_f() | 4236 fb_mmu_debug_rd_vol_false_f() |
4241 fb_mmu_debug_rd_addr_f(addr)); 4237 fb_mmu_debug_rd_addr_f(addr));
4242 4238
4243 if (g->ops.gr.init_gpc_mmu) 4239 if (g->ops.gr->init_gpc_mmu)
4244 g->ops.gr.init_gpc_mmu(g); 4240 g->ops.gr->init_gpc_mmu(g);
4245 4241
4246 /* load gr floorsweeping registers */ 4242 /* load gr floorsweeping registers */
4247 data = gk20a_readl(g, gr_gpc0_ppc0_pes_vsc_strem_r()); 4243 data = gk20a_readl(g, gr_gpc0_ppc0_pes_vsc_strem_r());
@@ -4284,8 +4280,8 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4284 gr_fecs_host_int_enable_umimp_illegal_method_enable_f() | 4280 gr_fecs_host_int_enable_umimp_illegal_method_enable_f() |
4285 gr_fecs_host_int_enable_watchdog_enable_f()); 4281 gr_fecs_host_int_enable_watchdog_enable_f());
4286 4282
4287 g->ops.gr.enable_hww_exceptions(g); 4283 g->ops.gr->enable_hww_exceptions(g);
4288 g->ops.gr.set_hww_esr_report_mask(g); 4284 g->ops.gr->set_hww_esr_report_mask(g);
4289 4285
4290 /* enable TPC exceptions per GPC */ 4286 /* enable TPC exceptions per GPC */
4291 gk20a_gr_enable_gpc_exceptions(g); 4287 gk20a_gr_enable_gpc_exceptions(g);
@@ -4310,7 +4306,7 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4310 else 4306 else
4311 gr_gk20a_load_zbc_default_table(g, gr); 4307 gr_gk20a_load_zbc_default_table(g, gr);
4312 4308
4313 g->ops.ltc.init_cbc(g, gr); 4309 g->ops.ltc->init_cbc(g, gr);
4314 4310
4315 /* load ctx init */ 4311 /* load ctx init */
4316 for (i = 0; i < sw_ctx_load->count; i++) 4312 for (i = 0; i < sw_ctx_load->count; i++)
@@ -4329,11 +4325,11 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
4329 gr_fe_go_idle_timeout_count_disabled_f()); 4325 gr_fe_go_idle_timeout_count_disabled_f());
4330 4326
4331 /* override a few ctx state registers */ 4327 /* override a few ctx state registers */
4332 g->ops.gr.commit_global_cb_manager(g, NULL, false); 4328 g->ops.gr->commit_global_cb_manager(g, NULL, false);
4333 gr_gk20a_commit_global_timeslice(g, NULL, false); 4329 gr_gk20a_commit_global_timeslice(g, NULL, false);
4334 4330
4335 /* floorsweep anything left */ 4331 /* floorsweep anything left */
4336 g->ops.gr.init_fs_state(g); 4332 g->ops.gr->init_fs_state(g);
4337 4333
4338 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT); 4334 err = gr_gk20a_wait_idle(g, end_jiffies, GR_IDLE_CHECK_DEFAULT);
4339 if (err) 4335 if (err)
@@ -4454,7 +4450,7 @@ int gr_gk20a_init_ctxsw(struct gk20a *g)
4454 struct gr_gk20a *gr = &g->gr; 4450 struct gr_gk20a *gr = &g->gr;
4455 u32 err = 0; 4451 u32 err = 0;
4456 4452
4457 err = g->ops.gr.load_ctxsw_ucode(g); 4453 err = g->ops.gr->load_ctxsw_ucode(g);
4458 if (err) 4454 if (err)
4459 goto out; 4455 goto out;
4460 4456
@@ -4596,7 +4592,7 @@ static int gk20a_init_gr_setup_sw(struct gk20a *g)
4596 gk20a_dbg_info("total ram pages : %lu", totalram_pages); 4592 gk20a_dbg_info("total ram pages : %lu", totalram_pages);
4597 gr->max_comptag_mem = totalram_pages 4593 gr->max_comptag_mem = totalram_pages
4598 >> (10 - (PAGE_SHIFT - 10)); 4594 >> (10 - (PAGE_SHIFT - 10));
4599 err = g->ops.ltc.init_comptags(g, gr); 4595 err = g->ops.ltc->init_comptags(g, gr);
4600 if (err) 4596 if (err)
4601 goto clean_up; 4597 goto clean_up;
4602 4598
@@ -4813,7 +4809,7 @@ void gk20a_gr_set_shader_exceptions(struct gk20a *g, u32 data)
4813 } 4809 }
4814} 4810}
4815 4811
4816static void gk20a_gr_set_circular_buffer_size(struct gk20a *g, u32 data) 4812void gr_gk20a_set_circular_buffer_size(struct gk20a *g, u32 data)
4817{ 4813{
4818 struct gr_gk20a *gr = &g->gr; 4814 struct gr_gk20a *gr = &g->gr;
4819 u32 gpc_index, ppc_index, stride, val, offset; 4815 u32 gpc_index, ppc_index, stride, val, offset;
@@ -4864,7 +4860,7 @@ static void gk20a_gr_set_circular_buffer_size(struct gk20a *g, u32 data)
4864 } 4860 }
4865} 4861}
4866 4862
4867static void gk20a_gr_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) 4863void gr_gk20a_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
4868{ 4864{
4869 struct gr_gk20a *gr = &g->gr; 4865 struct gr_gk20a *gr = &g->gr;
4870 u32 gpc_index, ppc_index, stride, val; 4866 u32 gpc_index, ppc_index, stride, val;
@@ -4973,7 +4969,7 @@ int gk20a_gr_reset(struct gk20a *g)
4973 return 0; 4969 return 0;
4974} 4970}
4975 4971
4976static int gr_gk20a_handle_sw_method(struct gk20a *g, u32 addr, 4972int gr_gk20a_handle_sw_method(struct gk20a *g, u32 addr,
4977 u32 class_num, u32 offset, u32 data) 4973 u32 class_num, u32 offset, u32 data)
4978{ 4974{
4979 gk20a_dbg_fn(""); 4975 gk20a_dbg_fn("");
@@ -4994,10 +4990,10 @@ static int gr_gk20a_handle_sw_method(struct gk20a *g, u32 addr,
4994 gk20a_gr_set_shader_exceptions(g, data); 4990 gk20a_gr_set_shader_exceptions(g, data);
4995 break; 4991 break;
4996 case NVA297_SET_CIRCULAR_BUFFER_SIZE: 4992 case NVA297_SET_CIRCULAR_BUFFER_SIZE:
4997 g->ops.gr.set_circular_buffer_size(g, data); 4993 g->ops.gr->set_circular_buffer_size(g, data);
4998 break; 4994 break;
4999 case NVA297_SET_ALPHA_CIRCULAR_BUFFER_SIZE: 4995 case NVA297_SET_ALPHA_CIRCULAR_BUFFER_SIZE:
5000 g->ops.gr.set_alpha_circular_buffer_size(g, data); 4996 g->ops.gr->set_alpha_circular_buffer_size(g, data);
5001 break; 4997 break;
5002 default: 4998 default:
5003 goto fail; 4999 goto fail;
@@ -5039,7 +5035,7 @@ static int gk20a_gr_intr_illegal_notify_pending(struct gk20a *g,
5039static int gk20a_gr_handle_illegal_method(struct gk20a *g, 5035static int gk20a_gr_handle_illegal_method(struct gk20a *g,
5040 struct gr_isr_data *isr_data) 5036 struct gr_isr_data *isr_data)
5041{ 5037{
5042 int ret = g->ops.gr.handle_sw_method(g, isr_data->addr, 5038 int ret = g->ops.gr->handle_sw_method(g, isr_data->addr,
5043 isr_data->class_num, isr_data->offset, 5039 isr_data->class_num, isr_data->offset,
5044 isr_data->data_lo); 5040 isr_data->data_lo);
5045 if (ret) 5041 if (ret)
@@ -6237,7 +6233,7 @@ int gr_gk20a_ctx_patch_smpc(struct gk20a *g,
6237 return 0; 6233 return 0;
6238} 6234}
6239 6235
6240static void gr_gk20a_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset) 6236void gr_gk20a_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset)
6241{ 6237{
6242 u32 reg; 6238 u32 reg;
6243 u32 quad_ctrl; 6239 u32 quad_ctrl;
@@ -6409,7 +6405,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6409 * by computing it from the base gpc/tpc strides. Then make sure 6405 * by computing it from the base gpc/tpc strides. Then make sure
6410 * it is a real match. 6406 * it is a real match.
6411 */ 6407 */
6412 g->ops.gr.get_sm_dsm_perf_regs(g, &num_sm_dsm_perf_regs, 6408 g->ops.gr->get_sm_dsm_perf_regs(g, &num_sm_dsm_perf_regs,
6413 &sm_dsm_perf_regs, 6409 &sm_dsm_perf_regs,
6414 &perf_register_stride); 6410 &perf_register_stride);
6415 6411
@@ -6440,7 +6436,7 @@ static int gr_gk20a_find_priv_offset_in_ext_buffer(struct gk20a *g,
6440 6436
6441 /* Didn't find reg in supported group 1. 6437 /* Didn't find reg in supported group 1.
6442 * so try the second group now */ 6438 * so try the second group now */
6443 g->ops.gr.get_sm_dsm_perf_ctrl_regs(g, &num_sm_dsm_perf_ctrl_regs, 6439 g->ops.gr->get_sm_dsm_perf_ctrl_regs(g, &num_sm_dsm_perf_ctrl_regs,
6444 &sm_dsm_perf_ctrl_regs, 6440 &sm_dsm_perf_ctrl_regs,
6445 &control_register_stride); 6441 &control_register_stride);
6446 6442
@@ -6897,8 +6893,8 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
6897 6893
6898 /* if this is a quad access, setup for special access*/ 6894 /* if this is a quad access, setup for special access*/
6899 if (ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD) 6895 if (ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD)
6900 && g->ops.gr.access_smpc_reg) 6896 && g->ops.gr->access_smpc_reg)
6901 g->ops.gr.access_smpc_reg(g, 6897 g->ops.gr->access_smpc_reg(g,
6902 ctx_ops[i].quad, 6898 ctx_ops[i].quad,
6903 ctx_ops[i].offset); 6899 ctx_ops[i].offset);
6904 offset = ctx_ops[i].offset; 6900 offset = ctx_ops[i].offset;
@@ -7001,8 +6997,8 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7001 6997
7002 /* if this is a quad access, setup for special access*/ 6998 /* if this is a quad access, setup for special access*/
7003 if (ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD) && 6999 if (ctx_ops[i].type == REGOP(TYPE_GR_CTX_QUAD) &&
7004 g->ops.gr.access_smpc_reg) 7000 g->ops.gr->access_smpc_reg)
7005 g->ops.gr.access_smpc_reg(g, ctx_ops[i].quad, 7001 g->ops.gr->access_smpc_reg(g, ctx_ops[i].quad,
7006 ctx_ops[i].offset); 7002 ctx_ops[i].offset);
7007 7003
7008 for (j = 0; j < num_offsets; j++) { 7004 for (j = 0; j < num_offsets; j++) {
@@ -7083,7 +7079,7 @@ int gr_gk20a_exec_ctx_ops(struct channel_gk20a *ch,
7083 return err; 7079 return err;
7084} 7080}
7085 7081
7086static void gr_gk20a_cb_size_default(struct gk20a *g) 7082void gr_gk20a_cb_size_default(struct gk20a *g)
7087{ 7083{
7088 struct gr_gk20a *gr = &g->gr; 7084 struct gr_gk20a *gr = &g->gr;
7089 7085
@@ -7093,7 +7089,7 @@ static void gr_gk20a_cb_size_default(struct gk20a *g)
7093 gr_gpc0_ppc0_cbm_cfg2_size_default_v(); 7089 gr_gpc0_ppc0_cbm_cfg2_size_default_v();
7094} 7090}
7095 7091
7096static int gr_gk20a_calc_global_ctx_buffer_size(struct gk20a *g) 7092int gr_gk20a_calc_global_ctx_buffer_size(struct gk20a *g)
7097{ 7093{
7098 struct gr_gk20a *gr = &g->gr; 7094 struct gr_gk20a *gr = &g->gr;
7099 int size; 7095 int size;
@@ -7140,35 +7136,37 @@ void gk20a_init_gr(struct gk20a *g)
7140 init_waitqueue_head(&g->gr.init_wq); 7136 init_waitqueue_head(&g->gr.init_wq);
7141} 7137}
7142 7138
7139#include "gr_ops_gk20a.h"
7140static const struct gpu_gr_ops gk20a_gr_ops = {
7141 __set_gr_gk20a_op(access_smpc_reg),
7142 __set_gr_gk20a_op(bundle_cb_defaults),
7143 __set_gr_gk20a_op(cb_size_default),
7144 __set_gr_gk20a_op(calc_global_ctx_buffer_size),
7145 __set_gr_gk20a_op(commit_global_attrib_cb),
7146 __set_gr_gk20a_op(commit_global_bundle_cb),
7147 __set_gr_gk20a_op(commit_global_cb_manager),
7148 __set_gr_gk20a_op(commit_global_pagepool),
7149 __set_gr_gk20a_op(handle_sw_method),
7150 __set_gr_gk20a_op(set_alpha_circular_buffer_size),
7151 __set_gr_gk20a_op(set_circular_buffer_size),
7152 __set_gr_gk20a_op(enable_hww_exceptions),
7153 __set_gr_gk20a_op(is_valid_class),
7154 __set_gr_gk20a_op(get_sm_dsm_perf_regs),
7155 __set_gr_gk20a_op(get_sm_dsm_perf_ctrl_regs),
7156 __set_gr_gk20a_op(init_fs_state),
7157 __set_gr_gk20a_op(set_hww_esr_report_mask),
7158 __set_gr_gk20a_op(setup_alpha_beta_tables),
7159 __set_gr_gk20a_op(falcon_load_ucode),
7160 __set_gr_gk20a_op(load_ctxsw_ucode),
7161 __set_gr_gk20a_op(get_gpc_tpc_mask),
7162 __set_gr_gk20a_op(free_channel_ctx),
7163 __set_gr_gk20a_op(alloc_obj_ctx),
7164 __set_gr_gk20a_op(free_obj_ctx),
7165 __set_gr_gk20a_op(bind_ctxsw_zcull),
7166 __set_gr_gk20a_op(get_zcull_info)
7167};
7168
7143void gk20a_init_gr_ops(struct gpu_ops *gops) 7169void gk20a_init_gr_ops(struct gpu_ops *gops)
7144{ 7170{
7145 gops->gr.access_smpc_reg = gr_gk20a_access_smpc_reg; 7171 gops->gr = &gk20a_gr_ops;
7146 gops->gr.bundle_cb_defaults = gr_gk20a_bundle_cb_defaults;
7147 gops->gr.cb_size_default = gr_gk20a_cb_size_default;
7148 gops->gr.calc_global_ctx_buffer_size =
7149 gr_gk20a_calc_global_ctx_buffer_size;
7150 gops->gr.commit_global_attrib_cb = gr_gk20a_commit_global_attrib_cb;
7151 gops->gr.commit_global_bundle_cb = gr_gk20a_commit_global_bundle_cb;
7152 gops->gr.commit_global_cb_manager = gr_gk20a_commit_global_cb_manager;
7153 gops->gr.commit_global_pagepool = gr_gk20a_commit_global_pagepool;
7154 gops->gr.handle_sw_method = gr_gk20a_handle_sw_method;
7155 gops->gr.set_alpha_circular_buffer_size =
7156 gk20a_gr_set_circular_buffer_size;
7157 gops->gr.set_circular_buffer_size =
7158 gk20a_gr_set_alpha_circular_buffer_size;
7159 gops->gr.enable_hww_exceptions = gr_gk20a_enable_hww_exceptions;
7160 gops->gr.is_valid_class = gr_gk20a_is_valid_class;
7161 gops->gr.get_sm_dsm_perf_regs = gr_gk20a_get_sm_dsm_perf_regs;
7162 gops->gr.get_sm_dsm_perf_ctrl_regs = gr_gk20a_get_sm_dsm_perf_ctrl_regs;
7163 gops->gr.init_fs_state = gr_gk20a_ctx_state_floorsweep;
7164 gops->gr.set_hww_esr_report_mask = gr_gk20a_set_hww_esr_report_mask;
7165 gops->gr.setup_alpha_beta_tables = gr_gk20a_setup_alpha_beta_tables;
7166 gops->gr.falcon_load_ucode = gr_gk20a_load_ctxsw_ucode_segments;
7167 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode;
7168 gops->gr.get_gpc_tpc_mask = gr_gk20a_get_gpc_tpc_mask;
7169 gops->gr.free_channel_ctx = gk20a_free_channel_ctx;
7170 gops->gr.alloc_obj_ctx = gk20a_alloc_obj_ctx;
7171 gops->gr.free_obj_ctx = gk20a_free_obj_ctx;
7172 gops->gr.bind_ctxsw_zcull = gr_gk20a_bind_ctxsw_zcull;
7173 gops->gr.get_zcull_info = gr_gk20a_get_zcull_info;
7174} 7172}
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index 7db6bccf..508edc79 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -350,11 +350,11 @@ int gr_gk20a_init_ctx_vars(struct gk20a *g, struct gr_gk20a *gr);
350struct nvgpu_alloc_obj_ctx_args; 350struct nvgpu_alloc_obj_ctx_args;
351struct nvgpu_free_obj_ctx_args; 351struct nvgpu_free_obj_ctx_args;
352 352
353int gk20a_alloc_obj_ctx(struct channel_gk20a *c, 353int gr_gk20a_alloc_obj_ctx(struct channel_gk20a *c,
354 struct nvgpu_alloc_obj_ctx_args *args); 354 struct nvgpu_alloc_obj_ctx_args *args);
355int gk20a_free_obj_ctx(struct channel_gk20a *c, 355int gr_gk20a_free_obj_ctx(struct channel_gk20a *c,
356 struct nvgpu_free_obj_ctx_args *args); 356 struct nvgpu_free_obj_ctx_args *args);
357void gk20a_free_channel_ctx(struct channel_gk20a *c); 357void gr_gk20a_free_channel_ctx(struct channel_gk20a *c);
358 358
359int gk20a_gr_isr(struct gk20a *g); 359int gk20a_gr_isr(struct gk20a *g);
360int gk20a_gr_nonstall_isr(struct gk20a *g); 360int gk20a_gr_nonstall_isr(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gk20a/gr_ops_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_ops_gk20a.h
new file mode 100644
index 00000000..df0cf020
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/gr_ops_gk20a.h
@@ -0,0 +1,62 @@
1/*
2 * GPK20A GPU graphics ops
3 *
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _GR_OPS_GK20A_H_
17#define _GR_OPS_GK20A_H_
18
19#include "gr_ops.h"
20
21#define __gr_gk20a_op(X) gr_gk20a_ ## X
22#define __set_gr_gk20a_op(X) . X = gr_gk20a_ ## X
23
24int __gr_gk20a_op(init_fs_state)(struct gk20a *);
25void __gr_gk20a_op(access_smpc_reg)(struct gk20a *, u32, u32);
26void __gr_gk20a_op(bundle_cb_defaults)(struct gk20a *);
27void __gr_gk20a_op(cb_size_default)(struct gk20a *);
28int __gr_gk20a_op(calc_global_ctx_buffer_size)(struct gk20a *);
29void __gr_gk20a_op(commit_global_attrib_cb)(struct gk20a *,
30 struct channel_ctx_gk20a *, u64 , bool);
31void __gr_gk20a_op(commit_global_bundle_cb)(struct gk20a *,
32 struct channel_ctx_gk20a *, u64, u64, bool);
33int __gr_gk20a_op(commit_global_cb_manager)(struct gk20a *,
34 struct channel_gk20a *, bool);
35void __gr_gk20a_op(commit_global_pagepool)(struct gk20a *,
36 struct channel_ctx_gk20a *, u64 , u32, bool);
37void __gr_gk20a_op(init_gpc_mmu)(struct gk20a *);
38int __gr_gk20a_op(handle_sw_method)(struct gk20a *, u32 , u32, u32, u32);
39void __gr_gk20a_op(set_alpha_circular_buffer_size)(struct gk20a *, u32);
40void __gr_gk20a_op(set_circular_buffer_size)(struct gk20a *, u32);
41void __gr_gk20a_op(enable_hww_exceptions)(struct gk20a *);
42bool __gr_gk20a_op(is_valid_class)(struct gk20a *, u32);
43void __gr_gk20a_op(get_sm_dsm_perf_regs)(struct gk20a *, u32 *, u32 **, u32 *);
44void __gr_gk20a_op(get_sm_dsm_perf_ctrl_regs)(struct gk20a *,
45 u32 *, u32 **, u32 *);
46void __gr_gk20a_op(set_hww_esr_report_mask)(struct gk20a *);
47int __gr_gk20a_op(setup_alpha_beta_tables)(struct gk20a *, struct gr_gk20a *);
48int __gr_gk20a_op(falcon_load_ucode)(struct gk20a *, u64,
49 struct gk20a_ctxsw_ucode_segments *, u32);
50int __gr_gk20a_op(load_ctxsw_ucode)(struct gk20a *);
51u32 __gr_gk20a_op(get_gpc_tpc_mask)(struct gk20a *, u32);
52void __gr_gk20a_op(free_channel_ctx)(struct channel_gk20a *);
53int __gr_gk20a_op(alloc_obj_ctx)(struct channel_gk20a *c,
54 struct nvgpu_alloc_obj_ctx_args *);
55int __gr_gk20a_op(free_obj_ctx)(struct channel_gk20a *c,
56 struct nvgpu_free_obj_ctx_args *);
57int __gr_gk20a_op(bind_ctxsw_zcull)(struct gk20a *,
58 struct gr_gk20a *, struct channel_gk20a *, u64, u32);
59int __gr_gk20a_op(get_zcull_info)(struct gk20a *,
60 struct gr_gk20a *, struct gr_zcull_info *);
61
62#endif
diff --git a/drivers/gpu/nvgpu/gk20a/hal.c b/drivers/gpu/nvgpu/gk20a/hal.c
index 8d1a29dd..84b8d819 100644
--- a/drivers/gpu/nvgpu/gk20a/hal.c
+++ b/drivers/gpu/nvgpu/gk20a/hal.c
@@ -20,13 +20,13 @@
20int gpu_init_hal(struct gk20a *g) 20int gpu_init_hal(struct gk20a *g)
21{ 21{
22 u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl; 22 u32 ver = g->gpu_characteristics.arch + g->gpu_characteristics.impl;
23 gk20a_dbg_fn("ver=0x%x", ver);
23 switch (ver) { 24 switch (ver) {
24 case GK20A_GPUID_GK20A: 25 case NVGPU_GPUID_GK20A:
25 gk20a_dbg_info("gk20a detected"); 26 if (gk20a_init_hal(&g->ops))
26 gk20a_init_hal(&g->ops); 27 return -ENODEV;
27 break; 28 break;
28 case GK20A_GPUID_GM20B: 29 case NVGPU_GPUID_GM20B:
29 gk20a_dbg_info("gm20b detected");
30 if (gm20b_init_hal(&g->ops)) 30 if (gm20b_init_hal(&g->ops))
31 return -ENODEV; 31 return -ENODEV;
32 break; 32 break;
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c
index e0ab3f9b..badf640e 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_common.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c
@@ -189,9 +189,9 @@ static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
189 compbit_base_post_divide++; 189 compbit_base_post_divide++;
190 190
191 /* Bug 1477079 indicates sw adjustment on the posted divided base. */ 191 /* Bug 1477079 indicates sw adjustment on the posted divided base. */
192 if (g->ops.ltc.cbc_fix_config) 192 if (g->ops.ltc->cbc_fix_config)
193 compbit_base_post_divide = 193 compbit_base_post_divide =
194 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); 194 g->ops.ltc->cbc_fix_config(g, compbit_base_post_divide);
195 195
196 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(), 196 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
197 compbit_base_post_divide); 197 compbit_base_post_divide);
@@ -204,7 +204,7 @@ static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
204 204
205 gr->compbit_store.base_hw = compbit_base_post_divide; 205 gr->compbit_store.base_hw = compbit_base_post_divide;
206 206
207 g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate, 207 g->ops.ltc->cbc_ctrl(g, gk20a_cbc_op_invalidate,
208 0, max_comptag_lines - 1); 208 0, max_comptag_lines - 1);
209 209
210} 210}
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
index aa094dc7..92dac449 100644
--- a/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/ltc_gk20a.c
@@ -265,19 +265,24 @@ static int gk20a_determine_L2_size_bytes(struct gk20a *g)
265 return cache_size; 265 return cache_size;
266} 266}
267 267
268void gk20a_init_ltc(struct gpu_ops *gops) 268static const struct gpu_ltc_ops gk20a_ltc_ops = {
269{ 269 .determine_L2_size_bytes = gk20a_determine_L2_size_bytes,
270 gops->ltc.determine_L2_size_bytes = gk20a_determine_L2_size_bytes; 270 .set_max_ways_evict_last = gk20a_ltc_set_max_ways_evict_last,
271 gops->ltc.set_max_ways_evict_last = gk20a_ltc_set_max_ways_evict_last; 271 .init_comptags = gk20a_ltc_init_comptags,
272 gops->ltc.init_comptags = gk20a_ltc_init_comptags; 272 .cbc_ctrl = gk20a_ltc_cbc_ctrl,
273 gops->ltc.cbc_ctrl = gk20a_ltc_cbc_ctrl; 273 .set_zbc_color_entry = gk20a_ltc_set_zbc_color_entry,
274 gops->ltc.set_zbc_color_entry = gk20a_ltc_set_zbc_color_entry; 274 .set_zbc_depth_entry = gk20a_ltc_set_zbc_depth_entry,
275 gops->ltc.set_zbc_depth_entry = gk20a_ltc_set_zbc_depth_entry; 275 .init_cbc = gk20a_ltc_init_cbc,
276 gops->ltc.init_cbc = gk20a_ltc_init_cbc;
277#ifdef CONFIG_DEBUG_FS 276#ifdef CONFIG_DEBUG_FS
278 gops->ltc.sync_debugfs = gk20a_ltc_sync_debugfs; 277 .sync_debugfs = gk20a_ltc_sync_debugfs,
279#endif 278#endif
280 gops->ltc.elpg_flush = gk20a_mm_g_elpg_flush_locked; 279 .elpg_flush = gk20a_mm_g_elpg_flush_locked,
281 gops->ltc.init_fs_state = gk20a_ltc_init_fs_state; 280 .init_fs_state = gk20a_ltc_init_fs_state,
282 gops->ltc.isr = gk20a_ltc_isr; 281 .isr = gk20a_ltc_isr
282
283};
284
285void gk20a_init_ltc(struct gpu_ops *gops)
286{
287 gops->ltc = &gk20a_ltc_ops;
283} 288}
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 9a8e11af..e3046177 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -1100,7 +1100,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
1100 COMP_TAG_LINE_SIZE_SHIFT; 1100 COMP_TAG_LINE_SIZE_SHIFT;
1101 1101
1102 /* init/clear the ctag buffer */ 1102 /* init/clear the ctag buffer */
1103 g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_clear, 1103 g->ops.ltc->cbc_ctrl(g, gk20a_cbc_op_clear,
1104 ctag_offset, ctag_offset + ctag_lines - 1); 1104 ctag_offset, ctag_offset + ctag_lines - 1);
1105 } 1105 }
1106 1106
@@ -3079,7 +3079,7 @@ int gk20a_mm_suspend(struct gk20a *g)
3079{ 3079{
3080 gk20a_dbg_fn(""); 3080 gk20a_dbg_fn("");
3081 3081
3082 g->ops.ltc.elpg_flush(g); 3082 g->ops.ltc->elpg_flush(g);
3083 3083
3084 gk20a_dbg_fn("done"); 3084 gk20a_dbg_fn("done");
3085 return 0; 3085 return 0;
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 0580f19d..1bf4bea0 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -1818,7 +1818,7 @@ int gk20a_init_pmu_reset_enable_hw(struct gk20a *g)
1818 return 0; 1818 return 0;
1819} 1819}
1820 1820
1821static int gk20a_prepare_ucode(struct gk20a *g) 1821static int gk20a_prepare_pmu_ucode(struct gk20a *g)
1822{ 1822{
1823 struct pmu_gk20a *pmu = &g->pmu; 1823 struct pmu_gk20a *pmu = &g->pmu;
1824 int i, err = 0; 1824 int i, err = 0;
@@ -2259,7 +2259,7 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
2259 2259
2260void gk20a_init_pmu_ops(struct gpu_ops *gops) 2260void gk20a_init_pmu_ops(struct gpu_ops *gops)
2261{ 2261{
2262 gops->pmu.prepare_ucode = gk20a_prepare_ucode; 2262 gops->pmu.prepare_ucode = gk20a_prepare_pmu_ucode;
2263 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1; 2263 gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1;
2264 gops->pmu.pmu_setup_elpg = NULL; 2264 gops->pmu.pmu_setup_elpg = NULL;
2265 gops->pmu.init_wpr_region = NULL; 2265 gops->pmu.init_wpr_region = NULL;