From 6ea52c59b0262556edb01835eaf91b3bfcdcdd71 Mon Sep 17 00:00:00 2001 From: Terje Bergstrom Date: Fri, 15 Jun 2018 09:25:01 -0700 Subject: gpu: nvgpu: Implement common nvgpu_mem_rd* functions nvgpu_mem_rd*() functions were implemented per OS. They also used nvgpu_pramin_access_batched() and implemented a big portion of logic for using PRAMIN in OS specific code. Make the implementation for the functions generic. Move all PRAMIN logic to PRAMIN and simplify the interface provided by PRAMIN. Change-Id: I1acb9e8d7d424325dc73314d5738cb2c9ebf7692 Signed-off-by: Terje Bergstrom Reviewed-on: https://git-master.nvidia.com/r/1753708 Reviewed-by: Konsta Holtta GVS: Gerrit_Virtual_Submit Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/nvgpu_mem.c | 109 ++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) (limited to 'drivers/gpu/nvgpu/common/mm') diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c index 855d455d..9f3b6cfa 100644 --- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c @@ -177,3 +177,112 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt) return align; } + +u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) +{ + u32 data = 0; + + if (mem->aperture == APERTURE_SYSMEM) { + u32 *ptr = mem->cpu_va; + + WARN_ON(!ptr); + data = ptr[w]; + } else if (mem->aperture == APERTURE_VIDMEM) { + nvgpu_pramin_rd_n(g, mem, w * sizeof(u32), sizeof(u32), &data); + } else { + WARN_ON("Accessing unallocated nvgpu_mem"); + } + + return data; +} + +u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset) +{ + WARN_ON(offset & 3); + return nvgpu_mem_rd32(g, mem, offset / sizeof(u32)); +} + +void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, + u32 offset, void *dest, u32 size) +{ + WARN_ON(offset & 3); + WARN_ON(size & 3); + + if (mem->aperture == APERTURE_SYSMEM) { + u8 *src = (u8 *)mem->cpu_va + offset; + + WARN_ON(!mem->cpu_va); + memcpy(dest, src, size); + } else if (mem->aperture == APERTURE_VIDMEM) { + nvgpu_pramin_rd_n(g, mem, offset, size, dest); + } else { + WARN_ON("Accessing unallocated nvgpu_mem"); + } +} + +void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) +{ + if (mem->aperture == APERTURE_SYSMEM) { + u32 *ptr = mem->cpu_va; + + WARN_ON(!ptr); + ptr[w] = data; + } else if (mem->aperture == APERTURE_VIDMEM) { + nvgpu_pramin_wr_n(g, mem, w * sizeof(u32), sizeof(u32), &data); + if (!mem->skip_wmb) + nvgpu_wmb(); + } else { + WARN_ON("Accessing unallocated nvgpu_mem"); + } +} + +void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data) +{ + WARN_ON(offset & 3); + nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data); +} + +void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, + void *src, u32 size) +{ + WARN_ON(offset & 3); + WARN_ON(size & 3); + + if (mem->aperture == APERTURE_SYSMEM) { + u8 *dest = (u8 *)mem->cpu_va + offset; + + WARN_ON(!mem->cpu_va); + memcpy(dest, src, size); + } else if (mem->aperture == APERTURE_VIDMEM) { + nvgpu_pramin_wr_n(g, mem, offset, size, src); + if (!mem->skip_wmb) + nvgpu_wmb(); + } else { + WARN_ON("Accessing unallocated nvgpu_mem"); + } +} + +void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, + u32 c, u32 size) +{ + WARN_ON(offset & 3); + WARN_ON(size & 3); + WARN_ON(c & ~0xff); + + c &= 0xff; + + if (mem->aperture == APERTURE_SYSMEM) { + u8 *dest = (u8 *)mem->cpu_va + offset; + + WARN_ON(!mem->cpu_va); + memset(dest, c, size); + } else if (mem->aperture == APERTURE_VIDMEM) { + u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); + + nvgpu_pramin_memset(g, mem, offset, size, repeat_value); + if (!mem->skip_wmb) + nvgpu_wmb(); + } else { + WARN_ON("Accessing unallocated nvgpu_mem"); + } +} -- cgit v1.2.2