summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2016-05-02 10:43:33 -0400
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:54:54 -0500
commit5237f4a2a143a6410cc2eac04a62511a637fd321 (patch)
tree06bbb7423bfac15c3d2c197086cca01a7b70e1aa /drivers/gpu/nvgpu/gp10b/mm_gp10b.c
parent7be0ee4bb9c1436ecf37984d2e3f5e39a48537fe (diff)
gpu: nvgpu: adapt gk20a_mm_entry for mem_desc
For upcoming vidmem refactor, replace struct gk20a_mm_entry's contents identical to struct mem_desc, with a struct mem_desc member. This makes it possible to use the page table buffers like the others too. JIRA DNVGPU-23 JIRA DNVGPU-20 Change-Id: Ia82da07b5a3bb9fb14a86bcf96a46b3a3c80bf28 Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/1139696 GVS: Gerrit_Virtual_Submit Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: Ken Adams <kadams@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index 0c00feb4..c9a47d70 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -147,16 +147,16 @@ static u64 gp10b_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl,
147 147
148static u32 *pde3_from_index(struct gk20a_mm_entry *entry, u32 i) 148static u32 *pde3_from_index(struct gk20a_mm_entry *entry, u32 i)
149{ 149{
150 return (u32 *) (((u8 *)entry->cpu_va) + i*gmmu_new_pde__size_v()); 150 return (u32 *) (((u8 *)entry->mem.cpu_va) + i*gmmu_new_pde__size_v());
151} 151}
152 152
153static u64 entry_addr(struct gk20a *g, struct gk20a_mm_entry *entry) 153static u64 entry_addr(struct gk20a *g, struct gk20a_mm_entry *entry)
154{ 154{
155 u64 addr; 155 u64 addr;
156 if (g->mm.has_physical_mode) 156 if (g->mm.has_physical_mode)
157 addr = sg_phys(entry->sgt->sgl); 157 addr = sg_phys(entry->mem.sgt->sgl);
158 else 158 else
159 addr = g->ops.mm.get_iova_addr(g, entry->sgt->sgl, 0); 159 addr = g->ops.mm.get_iova_addr(g, entry->mem.sgt->sgl, 0);
160 160
161 return addr; 161 return addr;
162} 162}
@@ -202,7 +202,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
202 202
203static u32 *pde0_from_index(struct gk20a_mm_entry *entry, u32 i) 203static u32 *pde0_from_index(struct gk20a_mm_entry *entry, u32 i)
204{ 204{
205 return (u32 *) (((u8 *)entry->cpu_va) + i*gmmu_new_dual_pde__size_v()); 205 return (u32 *) (((u8 *)entry->mem.cpu_va) + i*gmmu_new_dual_pde__size_v());
206} 206}
207 207
208static int update_gmmu_pde0_locked(struct vm_gk20a *vm, 208static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
@@ -224,8 +224,8 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
224 224
225 gk20a_dbg_fn(""); 225 gk20a_dbg_fn("");
226 226
227 small_valid = entry->size && entry->pgsz == gmmu_page_size_small; 227 small_valid = entry->mem.size && entry->pgsz == gmmu_page_size_small;
228 big_valid = entry->size && entry->pgsz == gmmu_page_size_big; 228 big_valid = entry->mem.size && entry->pgsz == gmmu_page_size_big;
229 229
230 if (small_valid) { 230 if (small_valid) {
231 pte_addr_small = entry_addr(g, entry) 231 pte_addr_small = entry_addr(g, entry)
@@ -325,8 +325,8 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
325 gk20a_dbg(gpu_dbg_pte, "pte_cur=%d [0x0,0x0]", i); 325 gk20a_dbg(gpu_dbg_pte, "pte_cur=%d [0x0,0x0]", i);
326 } 326 }
327 327
328 gk20a_mem_wr32(pte->cpu_va + i*8, 0, pte_w[0]); 328 gk20a_mem_wr32(pte->mem.cpu_va + i*8, 0, pte_w[0]);
329 gk20a_mem_wr32(pte->cpu_va + i*8, 1, pte_w[1]); 329 gk20a_mem_wr32(pte->mem.cpu_va + i*8, 1, pte_w[1]);
330 330
331 if (*iova) { 331 if (*iova) {
332 *iova += page_size; 332 *iova += page_size;