summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2016-04-06 16:10:32 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-04-15 11:48:20 -0400
commit6839341bf8ffafa115cfc0427bba694ee1d131f3 (patch)
tree1f9369a3bacf0f1a2cc23371f5de988efdc07c31 /drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h
parent61e009c0f8874898335e6c47a610233c3382be47 (diff)
gpu: nvgpu: Add litter values HAL
Move per-chip constants to be returned by a chip specific function. Implement get_litter_value() for each chip. Change-Id: I2a2730fce14010924d2507f6fa15cc2ea0795113 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1121383
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h101
1 files changed, 63 insertions, 38 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h
index 0f70e8aa..248fa291 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_pri_gk20a.h
@@ -34,30 +34,37 @@ static inline u32 pri_gpccs_addr_mask(u32 addr)
34{ 34{
35 return addr & ((1 << pri_gpccs_addr_width()) - 1); 35 return addr & ((1 << pri_gpccs_addr_width()) - 1);
36} 36}
37static inline u32 pri_gpc_addr(u32 addr, u32 gpc) 37static inline u32 pri_gpc_addr(struct gk20a *g, u32 addr, u32 gpc)
38{ 38{
39 return proj_gpc_base_v() + (gpc * proj_gpc_stride_v()) + addr; 39 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE);
40 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
41 return gpc_base + (gpc * gpc_stride) + addr;
40} 42}
41static inline bool pri_is_gpc_addr_shared(u32 addr) 43static inline bool pri_is_gpc_addr_shared(struct gk20a *g, u32 addr)
42{ 44{
43 return (addr >= proj_gpc_shared_base_v()) && 45 u32 gpc_shared_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_SHARED_BASE);
44 (addr < proj_gpc_shared_base_v() + proj_gpc_stride_v()); 46 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
47 return (addr >= gpc_shared_base) &&
48 (addr < gpc_shared_base + gpc_stride);
45} 49}
46static inline bool pri_is_gpc_addr(u32 addr) 50static inline bool pri_is_gpc_addr(struct gk20a *g, u32 addr)
47{ 51{
48 return ((addr >= proj_gpc_base_v()) && 52 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE);
49 (addr < proj_gpc_base_v() + 53 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
50 proj_scal_litter_num_gpcs_v() * proj_gpc_stride_v())) || 54 u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS);
51 pri_is_gpc_addr_shared(addr); 55 return ((addr >= gpc_base) &&
56 (addr < gpc_base) + num_gpcs * gpc_stride) ||
57 pri_is_gpc_addr_shared(g, addr);
52} 58}
53static inline u32 pri_get_gpc_num(u32 addr) 59static inline u32 pri_get_gpc_num(struct gk20a *g, u32 addr)
54{ 60{
55 u32 i, start; 61 u32 i, start;
56 u32 num_gpcs = proj_scal_litter_num_gpcs_v(); 62 u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS);
57 63 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE);
64 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
58 for (i = 0; i < num_gpcs; i++) { 65 for (i = 0; i < num_gpcs; i++) {
59 start = proj_gpc_base_v() + (i * proj_gpc_stride_v()); 66 start = gpc_base + (i * gpc_stride);
60 if ((addr >= start) && (addr < (start + proj_gpc_stride_v()))) 67 if ((addr >= start) && (addr < (start + gpc_stride)))
61 return i; 68 return i;
62 } 69 }
63 return 0; 70 return 0;
@@ -73,17 +80,23 @@ static inline u32 pri_tpccs_addr_mask(u32 addr)
73{ 80{
74 return addr & ((1 << pri_tpccs_addr_width()) - 1); 81 return addr & ((1 << pri_tpccs_addr_width()) - 1);
75} 82}
76static inline u32 pri_tpc_addr(u32 addr, u32 gpc, u32 tpc) 83static inline u32 pri_tpc_addr(struct gk20a *g, u32 addr, u32 gpc, u32 tpc)
77{ 84{
78 return proj_gpc_base_v() + (gpc * proj_gpc_stride_v()) + 85 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE);
79 proj_tpc_in_gpc_base_v() + (tpc * proj_tpc_in_gpc_stride_v()) + 86 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
87 u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_BASE);
88 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
89 return gpc_base + (gpc * gpc_stride) +
90 tpc_in_gpc_base + (tpc * tpc_in_gpc_stride) +
80 addr; 91 addr;
81} 92}
82static inline bool pri_is_tpc_addr_shared(u32 addr) 93static inline bool pri_is_tpc_addr_shared(struct gk20a *g, u32 addr)
83{ 94{
84 return (addr >= proj_tpc_in_gpc_shared_base_v()) && 95 u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE);
85 (addr < (proj_tpc_in_gpc_shared_base_v() + 96 u32 tpc_in_gpc_shared_base = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_SHARED_BASE);
86 proj_tpc_in_gpc_stride_v())); 97 return (addr >= tpc_in_gpc_shared_base) &&
98 (addr < (tpc_in_gpc_shared_base +
99 tpc_in_gpc_stride));
87} 100}
88 101
89/* 102/*
@@ -97,29 +110,37 @@ static inline u32 pri_becs_addr_mask(u32 addr)
97{ 110{
98 return addr & ((1 << pri_becs_addr_width()) - 1); 111 return addr & ((1 << pri_becs_addr_width()) - 1);
99} 112}
100static inline bool pri_is_be_addr_shared(u32 addr) 113static inline bool pri_is_be_addr_shared(struct gk20a *g, u32 addr)
101{ 114{
102 return (addr >= proj_rop_shared_base_v()) && 115 u32 rop_shared_base = nvgpu_get_litter_value(g, GPU_LIT_ROP_SHARED_BASE);
103 (addr < proj_rop_shared_base_v() + proj_rop_stride_v()); 116 u32 rop_stride = nvgpu_get_litter_value(g, GPU_LIT_ROP_STRIDE);
117 return (addr >= rop_shared_base) &&
118 (addr < rop_shared_base + rop_stride);
104} 119}
105static inline u32 pri_be_shared_addr(u32 addr) 120static inline u32 pri_be_shared_addr(struct gk20a *g, u32 addr)
106{ 121{
107 return proj_rop_shared_base_v() + pri_becs_addr_mask(addr); 122 u32 rop_shared_base = nvgpu_get_litter_value(g, GPU_LIT_ROP_SHARED_BASE);
123 return rop_shared_base + pri_becs_addr_mask(addr);
108} 124}
109static inline bool pri_is_be_addr(u32 addr) 125static inline bool pri_is_be_addr(struct gk20a *g, u32 addr)
110{ 126{
111 return ((addr >= proj_rop_base_v()) && 127 u32 num_fbps = nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPS);
112 (addr < proj_rop_base_v()+proj_scal_litter_num_fbps_v() * proj_rop_stride_v())) || 128 u32 rop_base = nvgpu_get_litter_value(g, GPU_LIT_ROP_BASE);
113 pri_is_be_addr_shared(addr); 129 u32 rop_stride = nvgpu_get_litter_value(g, GPU_LIT_ROP_STRIDE);
130 return ((addr >= rop_base) &&
131 (addr < rop_base + num_fbps * rop_stride)) ||
132 pri_is_be_addr_shared(g, addr);
114} 133}
115 134
116static inline u32 pri_get_be_num(u32 addr) 135static inline u32 pri_get_be_num(struct gk20a *g, u32 addr)
117{ 136{
118 u32 i, start; 137 u32 i, start;
119 u32 num_fbps = proj_scal_litter_num_fbps_v(); 138 u32 num_fbps = nvgpu_get_litter_value(g, GPU_LIT_NUM_FBPS);
139 u32 rop_base = nvgpu_get_litter_value(g, GPU_LIT_ROP_BASE);
140 u32 rop_stride = nvgpu_get_litter_value(g, GPU_LIT_ROP_STRIDE);
120 for (i = 0; i < num_fbps; i++) { 141 for (i = 0; i < num_fbps; i++) {
121 start = proj_rop_base_v() + (i * proj_rop_stride_v()); 142 start = rop_base + (i * rop_stride);
122 if ((addr >= start) && (addr < (start + proj_rop_stride_v()))) 143 if ((addr >= start) && (addr < (start + rop_stride)))
123 return i; 144 return i;
124 } 145 }
125 return 0; 146 return 0;
@@ -136,10 +157,14 @@ static inline u32 pri_ppccs_addr_mask(u32 addr)
136{ 157{
137 return addr & ((1 << pri_ppccs_addr_width()) - 1); 158 return addr & ((1 << pri_ppccs_addr_width()) - 1);
138} 159}
139static inline u32 pri_ppc_addr(u32 addr, u32 gpc, u32 ppc) 160static inline u32 pri_ppc_addr(struct gk20a *g, u32 addr, u32 gpc, u32 ppc)
140{ 161{
141 return proj_gpc_base_v() + (gpc * proj_gpc_stride_v()) + 162 u32 gpc_base = nvgpu_get_litter_value(g, GPU_LIT_GPC_BASE);
142 proj_ppc_in_gpc_base_v() + (ppc * proj_ppc_in_gpc_stride_v()) + addr; 163 u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
164 u32 ppc_in_gpc_base = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_BASE);
165 u32 ppc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_PPC_IN_GPC_STRIDE);
166 return gpc_base + (gpc * gpc_stride) +
167 ppc_in_gpc_base + (ppc * ppc_in_gpc_stride) + addr;
143} 168}
144 169
145enum ctxsw_addr_type { 170enum ctxsw_addr_type {