diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 38 |
1 files changed, 25 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 6fdfacdd..bb32749d 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -1594,7 +1594,8 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | |||
1594 | bool clear_ctags, | 1594 | bool clear_ctags, |
1595 | bool sparse, | 1595 | bool sparse, |
1596 | bool priv, | 1596 | bool priv, |
1597 | struct vm_gk20a_mapping_batch *batch) | 1597 | struct vm_gk20a_mapping_batch *batch, |
1598 | enum gk20a_aperture aperture) | ||
1598 | { | 1599 | { |
1599 | int err = 0; | 1600 | int err = 0; |
1600 | bool allocated = false; | 1601 | bool allocated = false; |
@@ -1642,7 +1643,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | |||
1642 | rw_flag, | 1643 | rw_flag, |
1643 | sparse, | 1644 | sparse, |
1644 | priv, | 1645 | priv, |
1645 | APERTURE_SYSMEM); /* no vidmem bufs yet */ | 1646 | aperture); |
1646 | if (err) { | 1647 | if (err) { |
1647 | gk20a_err(d, "failed to update ptes on map"); | 1648 | gk20a_err(d, "failed to update ptes on map"); |
1648 | goto fail_validate; | 1649 | goto fail_validate; |
@@ -1998,7 +1999,8 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, | |||
1998 | clear_ctags, | 1999 | clear_ctags, |
1999 | false, | 2000 | false, |
2000 | false, | 2001 | false, |
2001 | batch); | 2002 | batch, |
2003 | APERTURE_SYSMEM); /* no vidmem yet */ | ||
2002 | if (!map_offset) | 2004 | if (!map_offset) |
2003 | goto clean_up; | 2005 | goto clean_up; |
2004 | 2006 | ||
@@ -2256,7 +2258,8 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm, | |||
2256 | false, /* clear_ctags */ | 2258 | false, /* clear_ctags */ |
2257 | false, /* sparse */ | 2259 | false, /* sparse */ |
2258 | false, /* priv */ | 2260 | false, /* priv */ |
2259 | NULL); /* mapping_batch handle */ | 2261 | NULL, /* mapping_batch handle */ |
2262 | g->gr.compbit_store.mem.aperture); | ||
2260 | 2263 | ||
2261 | if (!mapped_buffer->ctag_map_win_addr) { | 2264 | if (!mapped_buffer->ctag_map_win_addr) { |
2262 | mutex_unlock(&vm->update_gmmu_lock); | 2265 | mutex_unlock(&vm->update_gmmu_lock); |
@@ -2295,7 +2298,8 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm, | |||
2295 | u64 size, | 2298 | u64 size, |
2296 | u32 flags, | 2299 | u32 flags, |
2297 | int rw_flag, | 2300 | int rw_flag, |
2298 | bool priv) | 2301 | bool priv, |
2302 | enum gk20a_aperture aperture) | ||
2299 | { | 2303 | { |
2300 | struct gk20a *g = gk20a_from_vm(vm); | 2304 | struct gk20a *g = gk20a_from_vm(vm); |
2301 | u64 vaddr; | 2305 | u64 vaddr; |
@@ -2312,7 +2316,8 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm, | |||
2312 | false, /* clear_ctags */ | 2316 | false, /* clear_ctags */ |
2313 | false, /* sparse */ | 2317 | false, /* sparse */ |
2314 | priv, /* priv */ | 2318 | priv, /* priv */ |
2315 | NULL); /* mapping_batch handle */ | 2319 | NULL, /* mapping_batch handle */ |
2320 | aperture); | ||
2316 | mutex_unlock(&vm->update_gmmu_lock); | 2321 | mutex_unlock(&vm->update_gmmu_lock); |
2317 | if (!vaddr) { | 2322 | if (!vaddr) { |
2318 | gk20a_err(dev_from_vm(vm), "failed to allocate va space"); | 2323 | gk20a_err(dev_from_vm(vm), "failed to allocate va space"); |
@@ -2327,9 +2332,11 @@ u64 gk20a_gmmu_map(struct vm_gk20a *vm, | |||
2327 | u64 size, | 2332 | u64 size, |
2328 | u32 flags, | 2333 | u32 flags, |
2329 | int rw_flag, | 2334 | int rw_flag, |
2330 | bool priv) | 2335 | bool priv, |
2336 | enum gk20a_aperture aperture) | ||
2331 | { | 2337 | { |
2332 | return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv); | 2338 | return __gk20a_gmmu_map(vm, sgt, 0, size, flags, rw_flag, priv, |
2339 | aperture); | ||
2333 | } | 2340 | } |
2334 | 2341 | ||
2335 | /* | 2342 | /* |
@@ -2341,9 +2348,11 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm, | |||
2341 | u64 size, | 2348 | u64 size, |
2342 | u32 flags, | 2349 | u32 flags, |
2343 | int rw_flag, | 2350 | int rw_flag, |
2344 | bool priv) | 2351 | bool priv, |
2352 | enum gk20a_aperture aperture) | ||
2345 | { | 2353 | { |
2346 | return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv); | 2354 | return __gk20a_gmmu_map(vm, sgt, addr, size, flags, rw_flag, priv, |
2355 | aperture); | ||
2347 | } | 2356 | } |
2348 | 2357 | ||
2349 | int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem) | 2358 | int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem) |
@@ -2599,7 +2608,8 @@ int gk20a_gmmu_alloc_map_attr(struct vm_gk20a *vm, | |||
2599 | return err; | 2608 | return err; |
2600 | 2609 | ||
2601 | mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, | 2610 | mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, |
2602 | gk20a_mem_flag_none, false); | 2611 | gk20a_mem_flag_none, false, |
2612 | mem->aperture); | ||
2603 | if (!mem->gpu_va) { | 2613 | if (!mem->gpu_va) { |
2604 | err = -ENOMEM; | 2614 | err = -ENOMEM; |
2605 | goto fail_free; | 2615 | goto fail_free; |
@@ -2626,7 +2636,8 @@ int gk20a_gmmu_alloc_map_attr_vid(struct vm_gk20a *vm, | |||
2626 | return err; | 2636 | return err; |
2627 | 2637 | ||
2628 | mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, | 2638 | mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, |
2629 | gk20a_mem_flag_none, false); | 2639 | gk20a_mem_flag_none, false, |
2640 | mem->aperture); | ||
2630 | if (!mem->gpu_va) { | 2641 | if (!mem->gpu_va) { |
2631 | err = -ENOMEM; | 2642 | err = -ENOMEM; |
2632 | goto fail_free; | 2643 | goto fail_free; |
@@ -3727,7 +3738,8 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, | |||
3727 | false, | 3738 | false, |
3728 | true, | 3739 | true, |
3729 | false, | 3740 | false, |
3730 | NULL); | 3741 | NULL, |
3742 | APERTURE_INVALID); | ||
3731 | if (!map_offset) { | 3743 | if (!map_offset) { |
3732 | mutex_unlock(&vm->update_gmmu_lock); | 3744 | mutex_unlock(&vm->update_gmmu_lock); |
3733 | gk20a_bfree(vma, vaddr_start); | 3745 | gk20a_bfree(vma, vaddr_start); |