summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pramin.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-06 18:30:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-20 19:14:32 -0400
commite32f62fadfcde413bcd9b5af61ad884e27ba2bf1 (patch)
treeeff606a0826841eae6ade5906acd9da589d1179a /drivers/gpu/nvgpu/common/pramin.c
parent52bd58b560d0b3b49c03ef5c2637b67adeac8193 (diff)
gpu: nvgpu: Move Linux nvgpu_mem fields
Hide the Linux specific nvgpu_mem fields so that in subsequent patches core code can instead of using struct sg_table it can use mem_desc. Routines for accessing system specific fields will be added as needed. This is the first step in a fairly major overhaul of the GMMU mapping routines. There are numerous issues with the current design (or lack there of): massively coupled code, system dependencies, disorganization, etc. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I2e7d3ae3a07468cfc17c1c642d28ed1b0952474d Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1464076 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/pramin.c')
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index 378711fc..688e5ce8 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -87,7 +87,7 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
87 struct page_alloc_chunk *chunk = NULL; 87 struct page_alloc_chunk *chunk = NULL;
88 u32 byteoff, start_reg, until_end, n; 88 u32 byteoff, start_reg, until_end, n;
89 89
90 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 90 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
91 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, 91 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
92 page_alloc_chunk, list_entry) { 92 page_alloc_chunk, list_entry) {
93 if (offset >= chunk->length) 93 if (offset >= chunk->length)