summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gk20a.h
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-10-16 08:15:11 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:46 -0400
commit2eb6dcb4694c8b83e22c50d9fd4d3fdd85b93c46 (patch)
tree0a2d10c9873b81fd6a6821959874d4345cc6bfad /drivers/gpu/nvgpu/gk20a/gk20a.h
parentecc6f27fd13e7560d124faf67d114b93d47b73de (diff)
gpu: nvgpu: Implement 64k large page support
Implement support for 64kB large page size. Add an API to create an address space via IOCTL so that we can accept flags, and assign one flag for enabling 64kB large page size. Also adds APIs to set per-context large page size. This is possible only on Maxwell, so return error if caller tries to set large page size on Kepler. Default large page size is still 128kB. Change-Id: I20b51c8f6d4a984acae8411ace3de9000c78e82f Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 8ebf6711..04a4cf66 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -141,6 +141,7 @@ struct gpu_ops {
141 void (*reset)(struct gk20a *g); 141 void (*reset)(struct gk20a *g);
142 void (*init_uncompressed_kind_map)(struct gk20a *g); 142 void (*init_uncompressed_kind_map)(struct gk20a *g);
143 void (*init_kind_attr)(struct gk20a *g); 143 void (*init_kind_attr)(struct gk20a *g);
144 void (*set_mmu_page_size)(struct gk20a *g);
144 } fb; 145 } fb;
145 struct { 146 struct {
146 void (*slcg_bus_load_gating_prod)(struct gk20a *g, bool prod); 147 void (*slcg_bus_load_gating_prod)(struct gk20a *g, bool prod);
@@ -291,13 +292,16 @@ struct gpu_ops {
291 bool va_allocated, 292 bool va_allocated,
292 int rw_flag); 293 int rw_flag);
293 void (*vm_remove)(struct vm_gk20a *vm); 294 void (*vm_remove)(struct vm_gk20a *vm);
294 int (*vm_alloc_share)(struct gk20a_as_share *as_share); 295 int (*vm_alloc_share)(struct gk20a_as_share *as_share,
296 u32 flags);
295 int (*vm_bind_channel)(struct gk20a_as_share *as_share, 297 int (*vm_bind_channel)(struct gk20a_as_share *as_share,
296 struct channel_gk20a *ch); 298 struct channel_gk20a *ch);
297 int (*fb_flush)(struct gk20a *g); 299 int (*fb_flush)(struct gk20a *g);
298 void (*l2_invalidate)(struct gk20a *g); 300 void (*l2_invalidate)(struct gk20a *g);
299 void (*l2_flush)(struct gk20a *g, bool invalidate); 301 void (*l2_flush)(struct gk20a *g, bool invalidate);
300 void (*tlb_invalidate)(struct vm_gk20a *vm); 302 void (*tlb_invalidate)(struct vm_gk20a *vm);
303 void (*set_big_page_size)(struct gk20a *g,
304 void *inst_ptr, int size);
301 } mm; 305 } mm;
302 struct { 306 struct {
303 int (*prepare_ucode)(struct gk20a *g); 307 int (*prepare_ucode)(struct gk20a *g);