summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-10-27 05:06:59 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:54 -0400
commit2d5ff668cbc6a932df2c9cf79627d1d340e5c2c0 (patch)
tree1d9bc4b774a9c2cea339891eaef3af5b87ee354d /drivers/gpu/nvgpu/gm20b
parent23a182aaa61d120c965f1bce09609cc14d4e14eb (diff)
gpu: nvgpu: GR and LTC HAL to use const structs
Convert GR and LTC HALs to use const structs, and initialize them with macros. Bug 1567274 Change-Id: Ia3f24a5eccb27578d9cba69755f636818d11275c Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/590371
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b')
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.c91
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.h8
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_ops_gm20b.h79
-rw-r--r--drivers/gpu/nvgpu/gm20b/ltc_gm20b.c38
4 files changed, 144 insertions, 72 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
index 8a3de4e8..7b69c5c8 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * GM20B GPC MMU 2 * GM20B GPU GR
3 * 3 *
4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
@@ -16,6 +16,7 @@
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/delay.h> /* for mdelay */ 17#include <linux/delay.h> /* for mdelay */
18 18
19#include "gr_ops.h"
19#include "gk20a/gk20a.h" 20#include "gk20a/gk20a.h"
20#include "gk20a/gr_gk20a.h" 21#include "gk20a/gr_gk20a.h"
21 22
@@ -28,7 +29,7 @@
28#include "pmu_gm20b.h" 29#include "pmu_gm20b.h"
29#include "acr_gm20b.h" 30#include "acr_gm20b.h"
30 31
31static void gr_gm20b_init_gpc_mmu(struct gk20a *g) 32void gr_gm20b_init_gpc_mmu(struct gk20a *g)
32{ 33{
33 u32 temp; 34 u32 temp;
34 35
@@ -64,7 +65,7 @@ static void gr_gm20b_init_gpc_mmu(struct gk20a *g)
64 gk20a_readl(g, fb_fbhub_num_active_ltcs_r())); 65 gk20a_readl(g, fb_fbhub_num_active_ltcs_r()));
65} 66}
66 67
67static void gr_gm20b_bundle_cb_defaults(struct gk20a *g) 68void gr_gm20b_bundle_cb_defaults(struct gk20a *g)
68{ 69{
69 struct gr_gk20a *gr = &g->gr; 70 struct gr_gk20a *gr = &g->gr;
70 71
@@ -76,7 +77,7 @@ static void gr_gm20b_bundle_cb_defaults(struct gk20a *g)
76 gr_pd_ab_dist_cfg2_token_limit_init_v(); 77 gr_pd_ab_dist_cfg2_token_limit_init_v();
77} 78}
78 79
79static void gr_gm20b_cb_size_default(struct gk20a *g) 80void gr_gm20b_cb_size_default(struct gk20a *g)
80{ 81{
81 struct gr_gk20a *gr = &g->gr; 82 struct gr_gk20a *gr = &g->gr;
82 83
@@ -86,7 +87,7 @@ static void gr_gm20b_cb_size_default(struct gk20a *g)
86 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(); 87 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v();
87} 88}
88 89
89static int gr_gm20b_calc_global_ctx_buffer_size(struct gk20a *g) 90int gr_gm20b_calc_global_ctx_buffer_size(struct gk20a *g)
90{ 91{
91 struct gr_gk20a *gr = &g->gr; 92 struct gr_gk20a *gr = &g->gr;
92 int size; 93 int size;
@@ -107,7 +108,7 @@ static int gr_gm20b_calc_global_ctx_buffer_size(struct gk20a *g)
107 return size; 108 return size;
108} 109}
109 110
110static void gr_gk20a_commit_global_attrib_cb(struct gk20a *g, 111void gr_gm20b_commit_global_attrib_cb(struct gk20a *g,
111 struct channel_ctx_gk20a *ch_ctx, 112 struct channel_ctx_gk20a *ch_ctx,
112 u64 addr, bool patch) 113 u64 addr, bool patch)
113{ 114{
@@ -124,7 +125,7 @@ static void gr_gk20a_commit_global_attrib_cb(struct gk20a *g,
124 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(), patch); 125 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(), patch);
125} 126}
126 127
127static void gr_gm20b_commit_global_bundle_cb(struct gk20a *g, 128void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
128 struct channel_ctx_gk20a *ch_ctx, 129 struct channel_ctx_gk20a *ch_ctx,
129 u64 addr, u64 size, bool patch) 130 u64 addr, u64 size, bool patch)
130{ 131{
@@ -160,7 +161,7 @@ static void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
160 161
161} 162}
162 163
163static int gr_gm20b_commit_global_cb_manager(struct gk20a *g, 164int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
164 struct channel_gk20a *c, bool patch) 165 struct channel_gk20a *c, bool patch)
165{ 166{
166 struct gr_gk20a *gr = &g->gr; 167 struct gr_gk20a *gr = &g->gr;
@@ -247,7 +248,7 @@ static int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
247 return 0; 248 return 0;
248} 249}
249 250
250static void gr_gm20b_commit_global_pagepool(struct gk20a *g, 251void gr_gm20b_commit_global_pagepool(struct gk20a *g,
251 struct channel_ctx_gk20a *ch_ctx, 252 struct channel_ctx_gk20a *ch_ctx,
252 u64 addr, u32 size, bool patch) 253 u64 addr, u32 size, bool patch)
253{ 254{
@@ -259,7 +260,7 @@ static void gr_gm20b_commit_global_pagepool(struct gk20a *g,
259 260
260} 261}
261 262
262static int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr, 263int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
263 u32 class_num, u32 offset, u32 data) 264 u32 class_num, u32 offset, u32 data)
264{ 265{
265 gk20a_dbg_fn(""); 266 gk20a_dbg_fn("");
@@ -280,10 +281,10 @@ static int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
280 gk20a_gr_set_shader_exceptions(g, data); 281 gk20a_gr_set_shader_exceptions(g, data);
281 break; 282 break;
282 case NVB197_SET_CIRCULAR_BUFFER_SIZE: 283 case NVB197_SET_CIRCULAR_BUFFER_SIZE:
283 g->ops.gr.set_circular_buffer_size(g, data); 284 g->ops.gr->set_circular_buffer_size(g, data);
284 break; 285 break;
285 case NVB197_SET_ALPHA_CIRCULAR_BUFFER_SIZE: 286 case NVB197_SET_ALPHA_CIRCULAR_BUFFER_SIZE:
286 g->ops.gr.set_alpha_circular_buffer_size(g, data); 287 g->ops.gr->set_alpha_circular_buffer_size(g, data);
287 break; 288 break;
288 default: 289 default:
289 goto fail; 290 goto fail;
@@ -295,7 +296,7 @@ fail:
295 return -EINVAL; 296 return -EINVAL;
296} 297}
297 298
298static void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) 299void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data)
299{ 300{
300 struct gr_gk20a *gr = &g->gr; 301 struct gr_gk20a *gr = &g->gr;
301 u32 gpc_index, ppc_index, stride, val; 302 u32 gpc_index, ppc_index, stride, val;
@@ -395,7 +396,7 @@ void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data)
395 } 396 }
396} 397}
397 398
398static void gr_gm20b_enable_hww_exceptions(struct gk20a *g) 399void gr_gm20b_enable_hww_exceptions(struct gk20a *g)
399{ 400{
400 gr_gk20a_enable_hww_exceptions(g); 401 gr_gk20a_enable_hww_exceptions(g);
401 402
@@ -406,7 +407,7 @@ static void gr_gm20b_enable_hww_exceptions(struct gk20a *g)
406 gr_ds_hww_report_mask_2_sph24_err_report_f()); 407 gr_ds_hww_report_mask_2_sph24_err_report_f());
407} 408}
408 409
409static void gr_gm20b_set_hww_esr_report_mask(struct gk20a *g) 410void gr_gm20b_set_hww_esr_report_mask(struct gk20a *g)
410{ 411{
411 /* setup sm warp esr report masks */ 412 /* setup sm warp esr report masks */
412 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(), 413 gk20a_writel(g, gr_gpcs_tpcs_sm_hww_warp_esr_report_mask_r(),
@@ -439,7 +440,7 @@ static void gr_gm20b_set_hww_esr_report_mask(struct gk20a *g)
439 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_multiple_warp_errors_report_f()); 440 gr_gpcs_tpcs_sm_hww_global_esr_report_mask_multiple_warp_errors_report_f());
440} 441}
441 442
442static bool gr_gm20b_is_valid_class(struct gk20a *g, u32 class_num) 443bool gr_gm20b_is_valid_class(struct gk20a *g, u32 class_num)
443{ 444{
444 bool valid = false; 445 bool valid = false;
445 446
@@ -459,7 +460,7 @@ static bool gr_gm20b_is_valid_class(struct gk20a *g, u32 class_num)
459 return valid; 460 return valid;
460} 461}
461 462
462static void gr_gm20b_get_sm_dsm_perf_regs(struct gk20a *g, 463void gr_gm20b_get_sm_dsm_perf_regs(struct gk20a *g,
463 u32 *num_sm_dsm_perf_regs, 464 u32 *num_sm_dsm_perf_regs,
464 u32 **sm_dsm_perf_regs, 465 u32 **sm_dsm_perf_regs,
465 u32 *perf_register_stride) 466 u32 *perf_register_stride)
@@ -470,7 +471,7 @@ static void gr_gm20b_get_sm_dsm_perf_regs(struct gk20a *g,
470 *perf_register_stride = ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(); 471 *perf_register_stride = ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v();
471} 472}
472 473
473static void gr_gm20b_get_sm_dsm_perf_ctrl_regs(struct gk20a *g, 474void gr_gm20b_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
474 u32 *num_sm_dsm_perf_regs, 475 u32 *num_sm_dsm_perf_regs,
475 u32 **sm_dsm_perf_regs, 476 u32 **sm_dsm_perf_regs,
476 u32 *ctrl_register_stride) 477 u32 *ctrl_register_stride)
@@ -481,7 +482,7 @@ static void gr_gm20b_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
481 *ctrl_register_stride = ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(); 482 *ctrl_register_stride = ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v();
482} 483}
483 484
484static u32 gr_gm20b_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) 485u32 gr_gm20b_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
485{ 486{
486 u32 val; 487 u32 val;
487 struct gr_gk20a *gr = &g->gr; 488 struct gr_gk20a *gr = &g->gr;
@@ -492,7 +493,7 @@ static u32 gr_gm20b_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index)
492 return (~val) & ((0x1 << gr->max_tpc_per_gpc_count) - 1); 493 return (~val) & ((0x1 << gr->max_tpc_per_gpc_count) - 1);
493} 494}
494 495
495static int gr_gm20b_ctx_state_floorsweep(struct gk20a *g) 496int gr_gm20b_init_fs_state(struct gk20a *g)
496{ 497{
497 struct gr_gk20a *gr = &g->gr; 498 struct gr_gk20a *gr = &g->gr;
498 u32 tpc_index, gpc_index; 499 u32 tpc_index, gpc_index;
@@ -595,7 +596,7 @@ static int gr_gm20b_ctx_state_floorsweep(struct gk20a *g)
595 return 0; 596 return 0;
596} 597}
597 598
598static int gr_gm20b_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base, 599int gr_gm20b_falcon_load_ucode(struct gk20a *g, u64 addr_base,
599 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset) 600 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset)
600{ 601{
601 gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(), 602 gk20a_writel(g, reg_offset + gr_fecs_dmactl_r(),
@@ -622,7 +623,7 @@ static void gr_gm20b_load_gpccs_with_bootloader(struct gk20a *g)
622 623
623 gr_gk20a_load_falcon_bind_instblk(g); 624 gr_gk20a_load_falcon_bind_instblk(g);
624 625
625 g->ops.gr.falcon_load_ucode(g, addr_base, 626 g->ops.gr->falcon_load_ucode(g, addr_base,
626 &g->ctxsw_ucode_info.gpccs, 627 &g->ctxsw_ucode_info.gpccs,
627 gr_gpcs_gpccs_falcon_hwcfg_r() - 628 gr_gpcs_gpccs_falcon_hwcfg_r() -
628 gr_fecs_falcon_hwcfg_r()); 629 gr_fecs_falcon_hwcfg_r());
@@ -648,7 +649,7 @@ static int gr_gm20b_ctx_wait_lsf_ready(struct gk20a *g, u32 timeout, u32 val)
648 return -ETIMEDOUT; 649 return -ETIMEDOUT;
649} 650}
650 651
651static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g) 652int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
652{ 653{
653 u32 err; 654 u32 err;
654 gk20a_dbg_fn(""); 655 gk20a_dbg_fn("");
@@ -710,42 +711,30 @@ static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
710} 711}
711#else 712#else
712 713
713static int gr_gm20b_load_ctxsw_ucode(struct gk20a *g) 714int gr_gm20b_load_ctxsw_ucode(struct gk20a *g)
714{ 715{
715 return -EPERM; 716 return -EPERM;
716} 717}
717 718
718#endif 719#endif
719 720
721#include "gk20a/gr_ops_gk20a.h"
722#include "gr_ops_gm20b.h"
723
724static struct gpu_gr_ops gm20b_gr_ops = {
725 __set_gr_gm20b_ops(),
726 __set_gr_gk20a_op(load_ctxsw_ucode)
727};
728
729static struct gpu_gr_ops gm20b_gr_privsecurity_ops = {
730 __set_gr_gm20b_ops(),
731 __set_gr_gm20b_op(load_ctxsw_ucode)
732};
733
720void gm20b_init_gr(struct gpu_ops *gops) 734void gm20b_init_gr(struct gpu_ops *gops)
721{ 735{
722 gops->gr.init_gpc_mmu = gr_gm20b_init_gpc_mmu;
723 gops->gr.bundle_cb_defaults = gr_gm20b_bundle_cb_defaults;
724 gops->gr.cb_size_default = gr_gm20b_cb_size_default;
725 gops->gr.calc_global_ctx_buffer_size =
726 gr_gm20b_calc_global_ctx_buffer_size;
727 gops->gr.commit_global_attrib_cb = gr_gk20a_commit_global_attrib_cb;
728 gops->gr.commit_global_bundle_cb = gr_gm20b_commit_global_bundle_cb;
729 gops->gr.commit_global_cb_manager = gr_gm20b_commit_global_cb_manager;
730 gops->gr.commit_global_pagepool = gr_gm20b_commit_global_pagepool;
731 gops->gr.handle_sw_method = gr_gm20b_handle_sw_method;
732 gops->gr.set_alpha_circular_buffer_size = gr_gm20b_set_alpha_circular_buffer_size;
733 gops->gr.set_circular_buffer_size = gr_gm20b_set_circular_buffer_size;
734 gops->gr.enable_hww_exceptions = gr_gm20b_enable_hww_exceptions;
735 gops->gr.is_valid_class = gr_gm20b_is_valid_class;
736 gops->gr.get_sm_dsm_perf_regs = gr_gm20b_get_sm_dsm_perf_regs;
737 gops->gr.get_sm_dsm_perf_ctrl_regs = gr_gm20b_get_sm_dsm_perf_ctrl_regs;
738 gops->gr.init_fs_state = gr_gm20b_ctx_state_floorsweep;
739 gops->gr.set_hww_esr_report_mask = gr_gm20b_set_hww_esr_report_mask;
740 gops->gr.falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments;
741 if (gops->privsecurity) 736 if (gops->privsecurity)
742 gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode; 737 gops->gr = &gm20b_gr_privsecurity_ops;
743 else 738 else
744 gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode; 739 gops->gr = &gm20b_gr_ops;
745 gops->gr.get_gpc_tpc_mask = gr_gm20b_get_gpc_tpc_mask;
746 gops->gr.free_channel_ctx = gk20a_free_channel_ctx;
747 gops->gr.alloc_obj_ctx = gk20a_alloc_obj_ctx;
748 gops->gr.free_obj_ctx = gk20a_free_obj_ctx;
749 gops->gr.bind_ctxsw_zcull = gr_gk20a_bind_ctxsw_zcull;
750 gops->gr.get_zcull_info = gr_gk20a_get_zcull_info;
751} 740}
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.h b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
index 8348b9d9..e822b33c 100644
--- a/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * GM20B GPC MMU 2 * GM20B GPU GR
3 * 3 *
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
@@ -13,8 +13,8 @@
13 * more details. 13 * more details.
14 */ 14 */
15 15
16#ifndef _NVHOST_GM20B_GR_MMU_H 16#ifndef _NVGPU_GR_GM20B_H_
17#define _NVHOST_GM20B_GR_MMU_H 17#define _NVGPU_GR_GM20B_H_
18struct gk20a; 18struct gk20a;
19 19
20enum { 20enum {
@@ -29,5 +29,7 @@ enum {
29#define NVB1C0_SET_SHADER_EXCEPTIONS 0x1528 29#define NVB1C0_SET_SHADER_EXCEPTIONS 0x1528
30 30
31#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0 31#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0
32
33struct gpu_ops;
32void gm20b_init_gr(struct gpu_ops *gops); 34void gm20b_init_gr(struct gpu_ops *gops);
33#endif 35#endif
diff --git a/drivers/gpu/nvgpu/gm20b/gr_ops_gm20b.h b/drivers/gpu/nvgpu/gm20b/gr_ops_gm20b.h
new file mode 100644
index 00000000..9477da75
--- /dev/null
+++ b/drivers/gpu/nvgpu/gm20b/gr_ops_gm20b.h
@@ -0,0 +1,79 @@
1/*
2 * GM20B GPU graphics ops
3 *
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _GR_OPS_GM20B_H_
17#define _GR_OPS_GM20B_H_
18
19#include "gr_ops.h"
20
21#define __gr_gm20b_op(X) gr_gm20b_ ## X
22#define __set_gr_gm20b_op(X) . X = gr_gm20b_ ## X
23
24void __gr_gm20b_op(init_gpc_mmu)(struct gk20a *);
25void __gr_gm20b_op(bundle_cb_defaults)(struct gk20a *);
26void __gr_gm20b_op(cb_size_default)(struct gk20a *);
27int __gr_gm20b_op(calc_global_ctx_buffer_size)(struct gk20a *);
28void __gr_gm20b_op(commit_global_bundle_cb)(struct gk20a *,
29 struct channel_ctx_gk20a *, u64, u64, bool);
30int __gr_gm20b_op(commit_global_cb_manager)(struct gk20a *,
31 struct channel_gk20a *, bool);
32void __gr_gm20b_op(commit_global_pagepool)(struct gk20a *,
33 struct channel_ctx_gk20a *, u64 , u32, bool);
34int __gr_gm20b_op(handle_sw_method)(struct gk20a *, u32 , u32, u32, u32);
35void __gr_gm20b_op(set_alpha_circular_buffer_size)(struct gk20a *, u32);
36void __gr_gm20b_op(set_circular_buffer_size)(struct gk20a *, u32);
37void __gr_gm20b_op(enable_hww_exceptions)(struct gk20a *);
38bool __gr_gm20b_op(is_valid_class)(struct gk20a *, u32);
39void __gr_gm20b_op(get_sm_dsm_perf_regs)(struct gk20a *, u32 *, u32 **, u32 *);
40void __gr_gm20b_op(get_sm_dsm_perf_ctrl_regs)(struct gk20a *,
41 u32 *, u32 **, u32 *);
42int __gr_gm20b_op(init_fs_state)(struct gk20a *);
43void __gr_gm20b_op(set_hww_esr_report_mask)(struct gk20a *);
44int __gr_gm20b_op(falcon_load_ucode)(struct gk20a *,
45 u64, struct gk20a_ctxsw_ucode_segments *, u32);
46u32 __gr_gm20b_op(get_gpc_tpc_mask)(struct gk20a *, u32);
47int __gr_gm20b_op(load_ctxsw_ucode)(struct gk20a *);
48
49#define __set_gr_gm20b_ops() \
50 /* newly defined for gm20b */ \
51 __set_gr_gm20b_op(init_gpc_mmu), \
52 __set_gr_gm20b_op(bundle_cb_defaults), \
53 __set_gr_gm20b_op(cb_size_default), \
54 __set_gr_gm20b_op(calc_global_ctx_buffer_size), \
55 __set_gr_gm20b_op(commit_global_bundle_cb), \
56 __set_gr_gm20b_op(commit_global_cb_manager), \
57 __set_gr_gm20b_op(commit_global_pagepool), \
58 __set_gr_gm20b_op(handle_sw_method), \
59 __set_gr_gm20b_op(set_alpha_circular_buffer_size), \
60 __set_gr_gm20b_op(set_circular_buffer_size), \
61 __set_gr_gm20b_op(enable_hww_exceptions), \
62 __set_gr_gm20b_op(is_valid_class), \
63 __set_gr_gm20b_op(get_sm_dsm_perf_regs), \
64 __set_gr_gm20b_op(get_sm_dsm_perf_ctrl_regs), \
65 __set_gr_gm20b_op(init_fs_state), \
66 __set_gr_gm20b_op(set_hww_esr_report_mask), \
67 __set_gr_gm20b_op(falcon_load_ucode), \
68 __set_gr_gm20b_op(get_gpc_tpc_mask), \
69 \
70 /* reused from gk20a */ \
71 __set_gr_gk20a_op(access_smpc_reg), \
72 __set_gr_gk20a_op(commit_global_attrib_cb), \
73 __set_gr_gk20a_op(free_channel_ctx), \
74 __set_gr_gk20a_op(alloc_obj_ctx), \
75 __set_gr_gk20a_op(free_obj_ctx), \
76 __set_gr_gk20a_op(bind_ctxsw_zcull), \
77 __set_gr_gk20a_op(get_zcull_info)
78
79#endif
diff --git a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
index a089b59c..2a888e88 100644
--- a/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/ltc_gm20b.c
@@ -388,24 +388,26 @@ static int gm20b_determine_L2_size_bytes(struct gk20a *g)
388 return cache_size; 388 return cache_size;
389} 389}
390 390
391void gm20b_init_ltc(struct gpu_ops *gops) 391static struct gpu_ltc_ops gm20b_ltc_ops = {
392{ 392 .determine_L2_size_bytes = gm20b_determine_L2_size_bytes,
393 /* Gk20a reused ops. */ 393 .set_max_ways_evict_last = gk20a_ltc_set_max_ways_evict_last,
394 gops->ltc.determine_L2_size_bytes = gm20b_determine_L2_size_bytes; 394 .set_zbc_color_entry = gk20a_ltc_set_zbc_color_entry,
395 gops->ltc.set_max_ways_evict_last = gk20a_ltc_set_max_ways_evict_last; 395 .set_zbc_depth_entry = gk20a_ltc_set_zbc_depth_entry,
396 gops->ltc.set_zbc_color_entry = gk20a_ltc_set_zbc_color_entry; 396 .init_cbc = gk20a_ltc_init_cbc,
397 gops->ltc.set_zbc_depth_entry = gk20a_ltc_set_zbc_depth_entry;
398 gops->ltc.init_cbc = gk20a_ltc_init_cbc;
399
400 /* GM20b specific ops. */
401 gops->ltc.init_fs_state = gm20b_ltc_init_fs_state;
402 gops->ltc.init_comptags = gm20b_ltc_init_comptags;
403 gops->ltc.cbc_ctrl = gm20b_ltc_cbc_ctrl;
404 gops->ltc.elpg_flush = gm20b_ltc_g_elpg_flush_locked;
405 gops->ltc.isr = gm20b_ltc_isr;
406 gops->ltc.cbc_fix_config = gm20b_ltc_cbc_fix_config;
407 gops->ltc.flush = gm20b_flush_ltc;
408#ifdef CONFIG_DEBUG_FS 397#ifdef CONFIG_DEBUG_FS
409 gops->ltc.sync_debugfs = gk20a_ltc_sync_debugfs; 398 .sync_debugfs = gk20a_ltc_sync_debugfs,
410#endif 399#endif
400 /* GM20b specific ops. */
401 .init_fs_state = gm20b_ltc_init_fs_state,
402 .init_comptags = gm20b_ltc_init_comptags,
403 .cbc_ctrl = gm20b_ltc_cbc_ctrl,
404 .elpg_flush = gm20b_ltc_g_elpg_flush_locked,
405 .isr = gm20b_ltc_isr,
406 .cbc_fix_config = gm20b_ltc_cbc_fix_config,
407 .flush = gm20b_flush_ltc
408};
409
410void gm20b_init_ltc(struct gpu_ops *gops)
411{
412 gops->ltc = &gm20b_ltc_ops;
411} 413}