summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv100
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2018-04-13 03:48:28 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-04-16 01:53:29 -0400
commita0dfb2b91112a766fb4b3e2aaafa99167151c3da (patch)
tree03c40e6c819227860204ccd8ec8b629727ac315c /drivers/gpu/nvgpu/gv100
parentb64dfdcf9edfd50a8e10aed8a8c96f85c25d59d9 (diff)
gpu: nvgpu: gv100: consider floorswept FBPA for getting unicast list
In gr_gv11b/gk20a_create_priv_addr_table() we do not consider floorswept FBPAs and just calculate the unicast list assuming all FBPAs are present This generates incorrect list of unicast addresses Fix this introducing new HAL ops.gr.split_fbpa_broadcast_addr Set gr_gv100_get_active_fpba_mask() for GV100 Set gr_gk20a_split_fbpa_broadcast_addr() for rest of the chips gr_gv100_get_active_fpba_mask() will first get active FPBA mask and generate unicast list only for active FBPAs Bug 200398811 Jira NVGPU-556 Change-Id: Idd11d6e7ad7b6836525fe41509aeccf52038321f Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1694444 GVS: Gerrit_Virtual_Submit Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv100')
-rw-r--r--drivers/gpu/nvgpu/gv100/gr_gv100.c45
-rw-r--r--drivers/gpu/nvgpu/gv100/gr_gv100.h3
-rw-r--r--drivers/gpu/nvgpu/gv100/hal_gv100.c1
3 files changed, 40 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/gv100/gr_gv100.c b/drivers/gpu/nvgpu/gv100/gr_gv100.c
index c6273733..98e61eb0 100644
--- a/drivers/gpu/nvgpu/gv100/gr_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/gr_gv100.c
@@ -28,6 +28,7 @@
28 28
29#include "gk20a/gk20a.h" 29#include "gk20a/gk20a.h"
30#include "gk20a/gr_gk20a.h" 30#include "gk20a/gr_gk20a.h"
31#include "gk20a/gr_pri_gk20a.h"
31 32
32#include "gv100/gr_gv100.h" 33#include "gv100/gr_gv100.h"
33#include "gv11b/subctx_gv11b.h" 34#include "gv11b/subctx_gv11b.h"
@@ -332,6 +333,23 @@ u32 gr_gv100_get_patch_slots(struct gk20a *g)
332 return size; 333 return size;
333} 334}
334 335
336static u32 gr_gv100_get_active_fpba_mask(struct gk20a *g, u32 num_fbpas)
337{
338 u32 active_fbpa_mask;
339
340 /*
341 * Read active fbpa mask from fuse
342 * Note that 0:enable and 1:disable in value read from fuse so we've to
343 * flip the bits.
344 * Also set unused bits to zero
345 */
346 active_fbpa_mask = nvgpu_readl(g, fuse_status_opt_fbio_r());
347 active_fbpa_mask = ~active_fbpa_mask;
348 active_fbpa_mask = active_fbpa_mask & ((1 << num_fbpas) - 1);
349
350 return active_fbpa_mask;
351}
352
335int gr_gv100_add_ctxsw_reg_pm_fbpa(struct gk20a *g, 353int gr_gv100_add_ctxsw_reg_pm_fbpa(struct gk20a *g,
336 struct ctxsw_buf_offset_map_entry *map, 354 struct ctxsw_buf_offset_map_entry *map,
337 struct aiv_list_gk20a *regs, 355 struct aiv_list_gk20a *regs,
@@ -348,15 +366,7 @@ int gr_gv100_add_ctxsw_reg_pm_fbpa(struct gk20a *g,
348 if ((cnt + (regs->count * num_fbpas)) > max_cnt) 366 if ((cnt + (regs->count * num_fbpas)) > max_cnt)
349 return -EINVAL; 367 return -EINVAL;
350 368
351 /* 369 active_fbpa_mask = gr_gv100_get_active_fpba_mask(g, num_fbpas);
352 * Read active fbpa mask from fuse
353 * Note that 0:enable and 1:disable in value read from fuse so we've to
354 * flip the bits.
355 * Also set unused bits to zero
356 */
357 active_fbpa_mask = nvgpu_readl(g, fuse_status_opt_fbio_r());
358 active_fbpa_mask = ~active_fbpa_mask;
359 active_fbpa_mask = active_fbpa_mask & ((1 << num_fbpas) - 1);
360 370
361 for (idx = 0; idx < regs->count; idx++) { 371 for (idx = 0; idx < regs->count; idx++) {
362 for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) { 372 for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) {
@@ -383,3 +393,20 @@ int gr_gv100_add_ctxsw_reg_perf_pma(struct ctxsw_buf_offset_map_entry *map,
383 return gr_gk20a_add_ctxsw_reg_perf_pma(map, regs, 393 return gr_gk20a_add_ctxsw_reg_perf_pma(map, regs,
384 count, offset, max_cnt, base, mask); 394 count, offset, max_cnt, base, mask);
385} 395}
396
397void gr_gv100_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
398 u32 num_fbpas,
399 u32 *priv_addr_table, u32 *t)
400{
401 u32 active_fbpa_mask;
402 u32 fbpa_id;
403
404 active_fbpa_mask = gr_gv100_get_active_fpba_mask(g, num_fbpas);
405
406 for (fbpa_id = 0; fbpa_id < num_fbpas; fbpa_id++) {
407 if (active_fbpa_mask & BIT(fbpa_id)) {
408 priv_addr_table[(*t)++] = pri_fbpa_addr(g,
409 pri_fbpa_addr_mask(g, addr), fbpa_id);
410 }
411 }
412}
diff --git a/drivers/gpu/nvgpu/gv100/gr_gv100.h b/drivers/gpu/nvgpu/gv100/gr_gv100.h
index 7b107db2..ccc73e28 100644
--- a/drivers/gpu/nvgpu/gv100/gr_gv100.h
+++ b/drivers/gpu/nvgpu/gv100/gr_gv100.h
@@ -43,4 +43,7 @@ int gr_gv100_add_ctxsw_reg_perf_pma(struct ctxsw_buf_offset_map_entry *map,
43 struct aiv_list_gk20a *regs, 43 struct aiv_list_gk20a *regs,
44 u32 *count, u32 *offset, 44 u32 *count, u32 *offset,
45 u32 max_cnt, u32 base, u32 mask); 45 u32 max_cnt, u32 base, u32 mask);
46void gr_gv100_split_fbpa_broadcast_addr(struct gk20a *g, u32 addr,
47 u32 num_fbpas,
48 u32 *priv_addr_table, u32 *t);
46#endif 49#endif
diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c
index fef2fb94..fc303e70 100644
--- a/drivers/gpu/nvgpu/gv100/hal_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c
@@ -434,6 +434,7 @@ static const struct gpu_ops gv100_ops = {
434 .create_priv_addr_table = gr_gv11b_create_priv_addr_table, 434 .create_priv_addr_table = gr_gv11b_create_priv_addr_table,
435 .get_pmm_per_chiplet_offset = 435 .get_pmm_per_chiplet_offset =
436 gr_gv11b_get_pmm_per_chiplet_offset, 436 gr_gv11b_get_pmm_per_chiplet_offset,
437 .split_fbpa_broadcast_addr = gr_gv100_split_fbpa_broadcast_addr,
437 }, 438 },
438 .fb = { 439 .fb = {
439 .reset = gv100_fb_reset, 440 .reset = gv100_fb_reset,