summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/gr_gm20b.h')
-rw-r--r--drivers/gpu/nvgpu/gm20b/gr_gm20b.h137
1 files changed, 137 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/gr_gm20b.h b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
new file mode 100644
index 00000000..18e6b032
--- /dev/null
+++ b/drivers/gpu/nvgpu/gm20b/gr_gm20b.h
@@ -0,0 +1,137 @@
1/*
2 * GM20B GPC MMU
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVHOST_GM20B_GR_MMU_H
26#define _NVHOST_GM20B_GR_MMU_H
27
28struct gk20a;
29struct nvgpu_warpstate;
30
31enum {
32 MAXWELL_B = 0xB197,
33 MAXWELL_COMPUTE_B = 0xB1C0,
34 KEPLER_INLINE_TO_MEMORY_B= 0xA140,
35 MAXWELL_DMA_COPY_A = 0xB0B5,
36 MAXWELL_CHANNEL_GPFIFO_A= 0xB06F,
37};
38
39#define NVB197_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc
40#define NVB197_SET_CIRCULAR_BUFFER_SIZE 0x1280
41#define NVB197_SET_SHADER_EXCEPTIONS 0x1528
42#define NVB197_SET_RD_COALESCE 0x102c
43#define NVB1C0_SET_SHADER_EXCEPTIONS 0x1528
44#define NVB1C0_SET_RD_COALESCE 0x0228
45
46#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0
47
48void gr_gm20b_commit_global_attrib_cb(struct gk20a *g,
49 struct channel_ctx_gk20a *ch_ctx,
50 u64 addr, bool patch);
51int gr_gm20b_init_fs_state(struct gk20a *g);
52int gm20b_gr_tpc_disable_override(struct gk20a *g, u32 mask);
53void gr_gm20b_set_rd_coalesce(struct gk20a *g, u32 data);
54void gm20a_gr_disable_rd_coalesce(struct gk20a *g);
55void gr_gm20b_init_gpc_mmu(struct gk20a *g);
56void gr_gm20b_bundle_cb_defaults(struct gk20a *g);
57void gr_gm20b_cb_size_default(struct gk20a *g);
58int gr_gm20b_calc_global_ctx_buffer_size(struct gk20a *g);
59void gr_gm20b_commit_global_bundle_cb(struct gk20a *g,
60 struct channel_ctx_gk20a *ch_ctx,
61 u64 addr, u64 size, bool patch);
62int gr_gm20b_commit_global_cb_manager(struct gk20a *g,
63 struct channel_gk20a *c, bool patch);
64void gr_gm20b_commit_global_pagepool(struct gk20a *g,
65 struct channel_ctx_gk20a *ch_ctx,
66 u64 addr, u32 size, bool patch);
67int gr_gm20b_handle_sw_method(struct gk20a *g, u32 addr,
68 u32 class_num, u32 offset, u32 data);
69void gr_gm20b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data);
70void gr_gm20b_set_circular_buffer_size(struct gk20a *g, u32 data);
71void gr_gm20b_set_hww_esr_report_mask(struct gk20a *g);
72bool gr_gm20b_is_valid_class(struct gk20a *g, u32 class_num);
73bool gr_gm20b_is_valid_gfx_class(struct gk20a *g, u32 class_num);
74bool gr_gm20b_is_valid_compute_class(struct gk20a *g, u32 class_num);
75void gr_gm20b_init_sm_dsm_reg_info(void);
76void gr_gm20b_get_sm_dsm_perf_regs(struct gk20a *g,
77 u32 *num_sm_dsm_perf_regs,
78 u32 **sm_dsm_perf_regs,
79 u32 *perf_register_stride);
80void gr_gm20b_get_sm_dsm_perf_ctrl_regs(struct gk20a *g,
81 u32 *num_sm_dsm_perf_ctrl_regs,
82 u32 **sm_dsm_perf_ctrl_regs,
83 u32 *ctrl_register_stride);
84u32 gr_gm20b_get_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
85void gr_gm20b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index);
86void gr_gm20b_load_tpc_mask(struct gk20a *g);
87void gr_gm20b_program_sm_id_numbering(struct gk20a *g,
88 u32 gpc, u32 tpc, u32 smid);
89int gr_gm20b_load_smid_config(struct gk20a *g);
90int gr_gm20b_load_ctxsw_ucode_segments(struct gk20a *g, u64 addr_base,
91 struct gk20a_ctxsw_ucode_segments *segments, u32 reg_offset);
92bool gr_gm20b_is_tpc_addr(struct gk20a *g, u32 addr);
93u32 gr_gm20b_get_tpc_num(struct gk20a *g, u32 addr);
94int gr_gm20b_load_ctxsw_ucode(struct gk20a *g);
95int gr_gm20b_load_ctxsw_ucode(struct gk20a *g);
96void gr_gm20b_detect_sm_arch(struct gk20a *g);
97u32 gr_gm20b_pagepool_default_size(struct gk20a *g);
98int gr_gm20b_alloc_gr_ctx(struct gk20a *g,
99 struct gr_ctx_desc **gr_ctx, struct vm_gk20a *vm,
100 u32 class,
101 u32 flags);
102void gr_gm20b_update_ctxsw_preemption_mode(struct gk20a *g,
103 struct channel_ctx_gk20a *ch_ctx,
104 struct nvgpu_mem *mem);
105int gr_gm20b_dump_gr_status_regs(struct gk20a *g,
106 struct gk20a_debug_output *o);
107int gr_gm20b_update_pc_sampling(struct channel_gk20a *c,
108 bool enable);
109u32 gr_gm20b_get_fbp_en_mask(struct gk20a *g);
110u32 gr_gm20b_get_max_ltc_per_fbp(struct gk20a *g);
111u32 gr_gm20b_get_max_lts_per_ltc(struct gk20a *g);
112u32 *gr_gm20b_rop_l2_en_mask(struct gk20a *g);
113u32 gr_gm20b_get_max_fbps_count(struct gk20a *g);
114void gr_gm20b_init_cyclestats(struct gk20a *g);
115void gr_gm20b_enable_cde_in_fecs(struct gk20a *g, struct nvgpu_mem *mem);
116void gr_gm20b_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state);
117void gr_gm20b_get_access_map(struct gk20a *g,
118 u32 **whitelist, int *num_entries);
119int gm20b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc);
120int gm20b_gr_update_sm_error_state(struct gk20a *g,
121 struct channel_gk20a *ch, u32 sm_id,
122 struct nvgpu_gr_sm_error_state *sm_error_state);
123int gm20b_gr_clear_sm_error_state(struct gk20a *g,
124 struct channel_gk20a *ch, u32 sm_id);
125int gr_gm20b_get_preemption_mode_flags(struct gk20a *g,
126 struct nvgpu_preemption_modes_rec *preemption_modes_rec);
127bool gr_gm20b_is_ltcs_ltss_addr(struct gk20a *g, u32 addr);
128bool gr_gm20b_is_ltcn_ltss_addr(struct gk20a *g, u32 addr);
129void gr_gm20b_split_lts_broadcast_addr(struct gk20a *g, u32 addr,
130 u32 *priv_addr_table,
131 u32 *priv_addr_table_index);
132void gr_gm20b_split_ltc_broadcast_addr(struct gk20a *g, u32 addr,
133 u32 *priv_addr_table,
134 u32 *priv_addr_table_index);
135void gm20b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
136 u32 global_esr);
137#endif