summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/gmmu.c
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2017-08-18 06:52:29 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-22 06:53:51 -0400
commit81868a187fa3b217368206f17b19309846e8e7fb (patch)
tree2b59e33b61cc6e206f7781f3b4ab44c5c7b6d721 /drivers/gpu/nvgpu/common/mm/gmmu.c
parent5f010177de985c901c33c914efe70a8498a5974f (diff)
gpu: nvgpu: Nvgpu abstraction for linux barriers.
construct wrapper nvgpu_* methods to replace mb,rmb,wmb,smp_mb,smp_rmb,smp_wmb,read_barrier_depends and smp_read_barrier_depends. NVGPU-122 Change-Id: I8d24dd70fef5cb0fadaacc15f3ab11531667a0df Signed-off-by: Debarshi <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1541199 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Sourab Gupta <sourabg@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/gmmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 73dff2c3..7f486d68 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -21,6 +21,7 @@
21#include <nvgpu/nvgpu_mem.h> 21#include <nvgpu/nvgpu_mem.h>
22#include <nvgpu/enabled.h> 22#include <nvgpu/enabled.h>
23#include <nvgpu/page_allocator.h> 23#include <nvgpu/page_allocator.h>
24#include <nvgpu/barrier.h>
24 25
25#include "gk20a/gk20a.h" 26#include "gk20a/gk20a.h"
26#include "gk20a/mm_gk20a.h" 27#include "gk20a/mm_gk20a.h"
@@ -164,8 +165,8 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm)
164 return err; 165 return err;
165 166
166 /* 167 /*
167 * One mb() is done after all mapping operations. Don't need individual 168 * One nvgpu_smp_mb() is done after all mapping operations. Don't need
168 * barriers for each PD write. 169 * individual barriers for each PD write.
169 */ 170 */
170 vm->pdb.mem->skip_wmb = true; 171 vm->pdb.mem->skip_wmb = true;
171 172
@@ -259,8 +260,8 @@ static int pd_allocate(struct vm_gk20a *vm,
259 } 260 }
260 261
261 /* 262 /*
262 * One mb() is done after all mapping operations. Don't need individual 263 * One nvgpu_smp_mb() is done after all mapping operations. Don't need
263 * barriers for each PD write. 264 * individual barriers for each PD write.
264 */ 265 */
265 pd->mem->skip_wmb = true; 266 pd->mem->skip_wmb = true;
266 267
@@ -714,7 +715,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
714 attrs); 715 attrs);
715 716
716 unmap_gmmu_pages(g, &vm->pdb); 717 unmap_gmmu_pages(g, &vm->pdb);
717 mb(); 718 nvgpu_smp_mb();
718 719
719 __gmmu_dbg(g, attrs, "%-5s Done!", sgt ? "MAP" : "UNMAP"); 720 __gmmu_dbg(g, attrs, "%-5s Done!", sgt ? "MAP" : "UNMAP");
720 721
@@ -983,7 +984,7 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte)
983 * There probably also needs to be a TLB invalidate as well but we leave 984 * There probably also needs to be a TLB invalidate as well but we leave
984 * that to the caller of this function. 985 * that to the caller of this function.
985 */ 986 */
986 wmb(); 987 nvgpu_smp_wmb();
987 988
988 return 0; 989 return 0;
989} 990}