diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b')
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index 0c00feb4..c9a47d70 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c | |||
@@ -147,16 +147,16 @@ static u64 gp10b_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl, | |||
147 | 147 | ||
148 | static u32 *pde3_from_index(struct gk20a_mm_entry *entry, u32 i) | 148 | static u32 *pde3_from_index(struct gk20a_mm_entry *entry, u32 i) |
149 | { | 149 | { |
150 | return (u32 *) (((u8 *)entry->cpu_va) + i*gmmu_new_pde__size_v()); | 150 | return (u32 *) (((u8 *)entry->mem.cpu_va) + i*gmmu_new_pde__size_v()); |
151 | } | 151 | } |
152 | 152 | ||
153 | static u64 entry_addr(struct gk20a *g, struct gk20a_mm_entry *entry) | 153 | static u64 entry_addr(struct gk20a *g, struct gk20a_mm_entry *entry) |
154 | { | 154 | { |
155 | u64 addr; | 155 | u64 addr; |
156 | if (g->mm.has_physical_mode) | 156 | if (g->mm.has_physical_mode) |
157 | addr = sg_phys(entry->sgt->sgl); | 157 | addr = sg_phys(entry->mem.sgt->sgl); |
158 | else | 158 | else |
159 | addr = g->ops.mm.get_iova_addr(g, entry->sgt->sgl, 0); | 159 | addr = g->ops.mm.get_iova_addr(g, entry->mem.sgt->sgl, 0); |
160 | 160 | ||
161 | return addr; | 161 | return addr; |
162 | } | 162 | } |
@@ -202,7 +202,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm, | |||
202 | 202 | ||
203 | static u32 *pde0_from_index(struct gk20a_mm_entry *entry, u32 i) | 203 | static u32 *pde0_from_index(struct gk20a_mm_entry *entry, u32 i) |
204 | { | 204 | { |
205 | return (u32 *) (((u8 *)entry->cpu_va) + i*gmmu_new_dual_pde__size_v()); | 205 | return (u32 *) (((u8 *)entry->mem.cpu_va) + i*gmmu_new_dual_pde__size_v()); |
206 | } | 206 | } |
207 | 207 | ||
208 | static int update_gmmu_pde0_locked(struct vm_gk20a *vm, | 208 | static int update_gmmu_pde0_locked(struct vm_gk20a *vm, |
@@ -224,8 +224,8 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm, | |||
224 | 224 | ||
225 | gk20a_dbg_fn(""); | 225 | gk20a_dbg_fn(""); |
226 | 226 | ||
227 | small_valid = entry->size && entry->pgsz == gmmu_page_size_small; | 227 | small_valid = entry->mem.size && entry->pgsz == gmmu_page_size_small; |
228 | big_valid = entry->size && entry->pgsz == gmmu_page_size_big; | 228 | big_valid = entry->mem.size && entry->pgsz == gmmu_page_size_big; |
229 | 229 | ||
230 | if (small_valid) { | 230 | if (small_valid) { |
231 | pte_addr_small = entry_addr(g, entry) | 231 | pte_addr_small = entry_addr(g, entry) |
@@ -325,8 +325,8 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm, | |||
325 | gk20a_dbg(gpu_dbg_pte, "pte_cur=%d [0x0,0x0]", i); | 325 | gk20a_dbg(gpu_dbg_pte, "pte_cur=%d [0x0,0x0]", i); |
326 | } | 326 | } |
327 | 327 | ||
328 | gk20a_mem_wr32(pte->cpu_va + i*8, 0, pte_w[0]); | 328 | gk20a_mem_wr32(pte->mem.cpu_va + i*8, 0, pte_w[0]); |
329 | gk20a_mem_wr32(pte->cpu_va + i*8, 1, pte_w[1]); | 329 | gk20a_mem_wr32(pte->mem.cpu_va + i*8, 1, pte_w[1]); |
330 | 330 | ||
331 | if (*iova) { | 331 | if (*iova) { |
332 | *iova += page_size; | 332 | *iova += page_size; |