summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/gmmu.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-10-18 16:24:53 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-10 18:46:54 -0500
commitee4970a33f41b56f2ada6a0b5ab6f9c400e39d88 (patch)
tree65d26ac7fd8667ac10cee8330a7647e9e72a745c /drivers/gpu/nvgpu/common/mm/gmmu.c
parent6911b4d48c414279731580f1212e29e4b691b04c (diff)
gpu: nvgpu: Make buf alignment generic
Drastically simplify and move the aligment computation for buffers getting mapped into the SGT code. An SGT is all that is needed for computing the alignment. However, this did require that a new SGT op was added: nvgpu_sgt_iommuable() This function returns true if the passed SGT is IOMMU'able and must be implemented by an SGT implementation that has IOMMU'able buffers. If this function is left as NULL then it is assumed that the buffer is not IOMMU'able. Also cleanup the parameter ordering convention among all nvgpu_sgt functions. Previously there was a mishmash of different parameter orderings. This patch now standardizes on the gk20a first approach seen everywhere else in the driver. JIRA NVGPU-30 JIRA NVGPU-246 JIRA NVGPU-71 Change-Id: Ic4ab7b752847cf795c7cfafed5a07818217bba86 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1583985 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/gmmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 875bcc4e..4289104d 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -93,7 +93,7 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
93 aperture); 93 aperture);
94 nvgpu_mutex_release(&vm->update_gmmu_lock); 94 nvgpu_mutex_release(&vm->update_gmmu_lock);
95 95
96 nvgpu_sgt_free(sgt, g); 96 nvgpu_sgt_free(g, sgt);
97 97
98 if (!vaddr) { 98 if (!vaddr) {
99 nvgpu_err(g, "failed to map buffer!"); 99 nvgpu_err(g, "failed to map buffer!");
@@ -500,7 +500,7 @@ static int __nvgpu_gmmu_do_update_page_table(struct vm_gk20a *vm,
500 * IO address and will be contiguous. 500 * IO address and will be contiguous.
501 */ 501 */
502 if (attrs->aperture == APERTURE_SYSMEM && !g->mm.bypass_smmu) { 502 if (attrs->aperture == APERTURE_SYSMEM && !g->mm.bypass_smmu) {
503 u64 io_addr = nvgpu_sgt_get_gpu_addr(sgt, g, sgt->sgl, attrs); 503 u64 io_addr = nvgpu_sgt_get_gpu_addr(g, sgt, sgt->sgl, attrs);
504 504
505 io_addr += space_to_skip; 505 io_addr += space_to_skip;
506 506