diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2014-10-16 08:15:11 -0400 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2015-03-18 15:11:46 -0400 |
commit | 2eb6dcb4694c8b83e22c50d9fd4d3fdd85b93c46 (patch) | |
tree | 0a2d10c9873b81fd6a6821959874d4345cc6bfad /drivers/gpu/nvgpu/gm20b/mm_gm20b.c | |
parent | ecc6f27fd13e7560d124faf67d114b93d47b73de (diff) |
gpu: nvgpu: Implement 64k large page support
Implement support for 64kB large page size. Add an API to create an
address space via IOCTL so that we can accept flags, and assign one
flag for enabling 64kB large page size.
Also adds APIs to set per-context large page size. This is possible
only on Maxwell, so return error if caller tries to set large page
size on Kepler.
Default large page size is still 128kB.
Change-Id: I20b51c8f6d4a984acae8411ace3de9000c78e82f
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gm20b/mm_gm20b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/mm_gm20b.c | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c index b4622c0b..13e7859f 100644 --- a/drivers/gpu/nvgpu/gm20b/mm_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/mm_gm20b.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include "hw_gmmu_gm20b.h" | 19 | #include "hw_gmmu_gm20b.h" |
20 | #include "hw_fb_gm20b.h" | 20 | #include "hw_fb_gm20b.h" |
21 | #include "hw_gr_gm20b.h" | 21 | #include "hw_gr_gm20b.h" |
22 | #include "hw_ram_gm20b.h" | ||
22 | 23 | ||
23 | static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm, | 24 | static int allocate_gmmu_ptes_sparse(struct vm_gk20a *vm, |
24 | enum gmmu_pgsz_gk20a pgsz_idx, | 25 | enum gmmu_pgsz_gk20a pgsz_idx, |
@@ -259,6 +260,25 @@ bool gm20b_mm_mmu_debug_mode_enabled(struct gk20a *g) | |||
259 | gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(); | 260 | gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(); |
260 | } | 261 | } |
261 | 262 | ||
263 | void gm20b_mm_set_big_page_size(struct gk20a *g, void *inst_ptr, int size) | ||
264 | { | ||
265 | u32 val; | ||
266 | |||
267 | gk20a_dbg_fn(""); | ||
268 | |||
269 | gk20a_dbg_info("big page size %d\n", size); | ||
270 | val = gk20a_mem_rd32(inst_ptr, ram_in_big_page_size_w()); | ||
271 | val &= ~ram_in_big_page_size_m(); | ||
272 | |||
273 | if (size == SZ_64K) | ||
274 | val |= ram_in_big_page_size_64kb_f(); | ||
275 | else | ||
276 | val |= ram_in_big_page_size_128kb_f(); | ||
277 | |||
278 | gk20a_mem_wr32(inst_ptr, ram_in_big_page_size_w(), val); | ||
279 | gk20a_dbg_fn("done"); | ||
280 | } | ||
281 | |||
262 | void gm20b_init_mm(struct gpu_ops *gops) | 282 | void gm20b_init_mm(struct gpu_ops *gops) |
263 | { | 283 | { |
264 | gops->mm.set_sparse = gm20b_vm_put_sparse; | 284 | gops->mm.set_sparse = gm20b_vm_put_sparse; |
@@ -273,4 +293,5 @@ void gm20b_init_mm(struct gpu_ops *gops) | |||
273 | gops->mm.l2_invalidate = gk20a_mm_l2_invalidate; | 293 | gops->mm.l2_invalidate = gk20a_mm_l2_invalidate; |
274 | gops->mm.l2_flush = gk20a_mm_l2_flush; | 294 | gops->mm.l2_flush = gk20a_mm_l2_flush; |
275 | gops->mm.tlb_invalidate = gk20a_mm_tlb_invalidate; | 295 | gops->mm.tlb_invalidate = gk20a_mm_tlb_invalidate; |
296 | gops->mm.set_big_page_size = gm20b_mm_set_big_page_size; | ||
276 | } | 297 | } |