summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-07-31 15:32:07 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-10 11:01:04 -0400
commit3c37701377459fbea2b460e1b9c65a863dfb04b2 (patch)
tree60a57a260dc8bcbd81089af8e9e966074e3bc881 /drivers/gpu/nvgpu/gk20a/mm_gk20a.h
parentb61306795b53647e8d1d4279417df1e1fe0f4b86 (diff)
gpu: nvgpu: Split VIDMEM support from mm_gk20a.c
Split VIDMEM support into its own code files organized as such: common/mm/vidmem.c - Base vidmem support common/linux/vidmem.c - Linux specific user-space interaction include/nvgpu/vidmem.h - Vidmem API definitions Also use the config to enable/disable VIDMEM support in the makefile and remove as many CONFIG_GK20A_VIDMEM preprocessor checks as possible from the source code. And lastly update a while-loop that iterated over an SGT to use the new for_each construct for iterating over SGTs. Currently this organization is not perfectly adhered to. More patches will fix that. JIRA NVGPU-30 JIRA NVGPU-138 Change-Id: Ic0f4d2cf38b65849c7dc350a69b175421477069c Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1540705 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h9
1 files changed, 0 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 9f03a495..7029e0e0 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -320,10 +320,6 @@ enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm,
320 u64 base, u64 size); 320 u64 base, u64 size);
321enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size); 321enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size);
322 322
323void set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr);
324bool is_vidmem_page_alloc(u64 addr);
325struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl);
326
327#if 0 /*related to addr bits above, concern below TBD on which is accurate */ 323#if 0 /*related to addr bits above, concern below TBD on which is accurate */
328#define bar1_instance_block_shift_gk20a() (max_physaddr_bits_gk20a() -\ 324#define bar1_instance_block_shift_gk20a() (max_physaddr_bits_gk20a() -\
329 bus_bar1_block_ptr_s()) 325 bus_bar1_block_ptr_s())
@@ -400,11 +396,6 @@ int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
400 struct channel_gk20a *ch); 396 struct channel_gk20a *ch);
401int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch); 397int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch);
402 398
403int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes);
404int gk20a_vidmem_get_space(struct gk20a *g, u64 *space);
405int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
406 void *buffer, u64 offset, u64 size, u32 cmd);
407
408void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf, 399void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf,
409 struct gk20a_comptags *comptags); 400 struct gk20a_comptags *comptags);
410dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr); 401dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr);