summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorSami Kiminki <skiminki@nvidia.com>2017-11-28 11:12:12 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-30 21:57:19 -0500
commitd73ad6c07da23636c00c60effeeb53ea35847ee8 (patch)
treefc92c533909c84af308b10eb91a87e455e82dae4 /drivers/gpu/nvgpu/gk20a
parent86a94230c6d57803d572bfba726e1b02db0e3dc3 (diff)
gpu: nvgpu: Alignment check for compressible fixed-address mappings
Add an alignment check for compressible-kind fixed-address mappings. If we're using page size smaller than the comptag line coverage window, the GPU VA and the physical buffer offset must be aligned in respect to that window. Bug 1995897 Bug 2011640 Bug 2011668 Change-Id: If68043ee2828d54b9398d77553d10d35cc319236 Signed-off-by: Sami Kiminki <skiminki@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1606439 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h17
1 files changed, 17 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index a361648f..c6bc129f 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -446,8 +446,25 @@ struct gpu_ops {
446 void (*init_kind_attr)(struct gk20a *g); 446 void (*init_kind_attr)(struct gk20a *g);
447 void (*set_mmu_page_size)(struct gk20a *g); 447 void (*set_mmu_page_size)(struct gk20a *g);
448 bool (*set_use_full_comp_tag_line)(struct gk20a *g); 448 bool (*set_use_full_comp_tag_line)(struct gk20a *g);
449
450 /*
451 * Compression tag line coverage. When mapping a compressible
452 * buffer, ctagline is increased when the virtual address
453 * crosses over the compression page boundary.
454 */
449 unsigned int (*compression_page_size)(struct gk20a *g); 455 unsigned int (*compression_page_size)(struct gk20a *g);
456
457 /*
458 * Minimum page size that can be used for compressible kinds.
459 */
450 unsigned int (*compressible_page_size)(struct gk20a *g); 460 unsigned int (*compressible_page_size)(struct gk20a *g);
461
462 /*
463 * Compressible kind mappings: Mask for the virtual and physical
464 * address bits that must match.
465 */
466 u32 (*compression_align_mask)(struct gk20a *g);
467
451 void (*dump_vpr_wpr_info)(struct gk20a *g); 468 void (*dump_vpr_wpr_info)(struct gk20a *g);
452 int (*vpr_info_fetch)(struct gk20a *g); 469 int (*vpr_info_fetch)(struct gk20a *g);
453 void (*read_wpr_info)(struct gk20a *g, 470 void (*read_wpr_info)(struct gk20a *g,