summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index ee9b791a..d9324363 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -40,7 +40,7 @@ static inline int add_mem_desc(struct tegra_vgpu_mem_desc *mem_desc,
40 40
41static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, 41static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
42 u64 map_offset, 42 u64 map_offset,
43 struct nvgpu_mem_sgl *sgl, 43 struct nvgpu_sgt *sgt,
44 u64 buffer_offset, 44 u64 buffer_offset,
45 u64 size, 45 u64 size,
46 int pgsz_idx, 46 int pgsz_idx,
@@ -66,12 +66,13 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
66 void *handle = NULL; 66 void *handle = NULL;
67 size_t oob_size; 67 size_t oob_size;
68 u8 prot; 68 u8 prot;
69 void *sgl;
69 70
70 gk20a_dbg_fn(""); 71 gk20a_dbg_fn("");
71 72
72 /* FIXME: add support for sparse mappings */ 73 /* FIXME: add support for sparse mappings */
73 74
74 if (WARN_ON(!sgl) || WARN_ON(!g->mm.bypass_smmu)) 75 if (WARN_ON(!sgt) || WARN_ON(!g->mm.bypass_smmu))
75 return 0; 76 return 0;
76 77
77 if (space_to_skip & (page_size - 1)) 78 if (space_to_skip & (page_size - 1))
@@ -97,7 +98,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
97 err = -EINVAL; 98 err = -EINVAL;
98 goto fail; 99 goto fail;
99 } 100 }
100 101 sgl = sgt->sgl;
101 while (sgl) { 102 while (sgl) {
102 u64 phys_addr; 103 u64 phys_addr;
103 u64 chunk_length; 104 u64 chunk_length;
@@ -106,15 +107,15 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
106 * Cut out sgl ents for space_to_skip. 107 * Cut out sgl ents for space_to_skip.
107 */ 108 */
108 if (space_to_skip && 109 if (space_to_skip &&
109 space_to_skip >= nvgpu_mem_sgl_length(sgl)) { 110 space_to_skip >= nvgpu_sgt_get_length(sgt, sgl)) {
110 space_to_skip -= nvgpu_mem_sgl_length(sgl); 111 space_to_skip -= nvgpu_sgt_get_length(sgt, sgl);
111 sgl = nvgpu_mem_sgl_next(sgl); 112 sgl = nvgpu_sgt_get_next(sgt, sgl);
112 continue; 113 continue;
113 } 114 }
114 115
115 phys_addr = nvgpu_mem_sgl_phys(sgl) + space_to_skip; 116 phys_addr = nvgpu_sgt_get_phys(sgt, sgl) + space_to_skip;
116 chunk_length = min(size, 117 chunk_length = min(size,
117 nvgpu_mem_sgl_length(sgl) - space_to_skip); 118 nvgpu_sgt_get_length(sgt, sgl) - space_to_skip);
118 119
119 if (add_mem_desc(&mem_desc[mem_desc_count++], phys_addr, 120 if (add_mem_desc(&mem_desc[mem_desc_count++], phys_addr,
120 chunk_length, &oob_size)) { 121 chunk_length, &oob_size)) {
@@ -124,7 +125,7 @@ static u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
124 125
125 space_to_skip = 0; 126 space_to_skip = 0;
126 size -= chunk_length; 127 size -= chunk_length;
127 sgl = nvgpu_mem_sgl_next(sgl); 128 sgl = nvgpu_sgt_get_next(sgt, sgl);
128 129
129 if (size == 0) 130 if (size == 0)
130 break; 131 break;