summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-06-24 13:10:57 -0400
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:52:06 -0500
commit6a071e5ad5581e57a5be109d2fc0f44680207783 (patch)
tree2aac79371bd8e7ac76f42671fb257e23327ad7d0 /drivers/gpu/nvgpu/gp10b/mm_gp10b.c
parent4c074ba3021e7fd52b10a5e7267b36e07da5660a (diff)
gpu: nvgpu: gp10b: Implement priv pages
Implement support for privileged pages. Use them for kernel allocated buffers. Change-Id: I24778c2b6063b6bc8a4bfd9d97fa6de01d49569a Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/761920
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index 5371605f..9f66c21f 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -153,7 +153,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
153 u64 *iova, 153 u64 *iova,
154 u32 kind_v, u32 *ctag, 154 u32 kind_v, u32 *ctag,
155 bool cacheable, bool unmapped_pte, 155 bool cacheable, bool unmapped_pte,
156 int rw_flag, bool sparse, u32 flags) 156 int rw_flag, bool sparse, bool priv)
157{ 157{
158 u64 pte_addr = 0; 158 u64 pte_addr = 0;
159 u64 pde_addr = 0; 159 u64 pde_addr = 0;
@@ -195,7 +195,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
195 u64 *iova, 195 u64 *iova,
196 u32 kind_v, u32 *ctag, 196 u32 kind_v, u32 *ctag,
197 bool cacheable, bool unmapped_pte, 197 bool cacheable, bool unmapped_pte,
198 int rw_flag, bool sparse, u32 flags) 198 int rw_flag, bool sparse, bool priv)
199{ 199{
200 bool small_valid, big_valid; 200 bool small_valid, big_valid;
201 u32 pte_addr_small = 0, pte_addr_big = 0; 201 u32 pte_addr_small = 0, pte_addr_big = 0;
@@ -251,7 +251,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
251 u64 *iova, 251 u64 *iova,
252 u32 kind_v, u32 *ctag, 252 u32 kind_v, u32 *ctag,
253 bool cacheable, bool unmapped_pte, 253 bool cacheable, bool unmapped_pte,
254 int rw_flag, bool sparse, u32 flags) 254 int rw_flag, bool sparse, bool priv)
255{ 255{
256 struct gk20a *g = vm->mm->g; 256 struct gk20a *g = vm->mm->g;
257 u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx]; 257 u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
@@ -269,6 +269,9 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
269 gmmu_new_pte_address_sys_f(*iova 269 gmmu_new_pte_address_sys_f(*iova
270 >> gmmu_new_pte_address_shift_v()); 270 >> gmmu_new_pte_address_shift_v());
271 271
272 if (priv)
273 pte_w[0] |= gmmu_new_pte_privilege_true_f();
274
272 pte_w[1] = *iova >> (24 + gmmu_new_pte_address_shift_v()) | 275 pte_w[1] = *iova >> (24 + gmmu_new_pte_address_shift_v()) |
273 gmmu_new_pte_kind_f(kind_v) | 276 gmmu_new_pte_kind_f(kind_v) |
274 gmmu_new_pte_comptagline_f(*ctag / ctag_granularity); 277 gmmu_new_pte_comptagline_f(*ctag / ctag_granularity);