From b7cc3a2aa6c92a09eed43513287c9062f22ad127 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 16 Nov 2017 11:29:11 -0800 Subject: gpu: nvgpu: Fix some barrier usage Commit 81868a187fa3b217368206f17b19309846e8e7fb updated barrier usage to use the nvgpu wrappers and in doing so downgraded many plain barriers {mb(), wmb(), rmb()} to the SMP versions of these barriers. The SMP version of the barriers in question are only issued when running on an SMP machine. In most of the cases mentioned above this is fine since the barriers are present to faciliate proper ordering across CPUs. A single CPU is always coherent with itself, so on a non-SMP case we don't need those barriers. However, there are a few places where the barriers in use (GMMU page table programming, IO accessors, userd) where the barrier usage is for communicating and establishing ordering for the GPU. We need these barriers for both SMP machines and non-SMP machines. Therefor we must use the plain barrier versions. Change-Id: I376129840b7dc64af8f3f23f88057e4e81360f89 Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1599744 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/gmmu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/nvgpu/common/mm') diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index 6cca8c2f..8ad7dac7 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c @@ -180,7 +180,7 @@ int nvgpu_gmmu_init_page_table(struct vm_gk20a *vm) return err; /* - * One nvgpu_smp_mb() is done after all mapping operations. Don't need + * One nvgpu_mb() is done after all mapping operations. Don't need * individual barriers for each PD write. */ vm->pdb.mem->skip_wmb = true; @@ -275,7 +275,7 @@ static int pd_allocate(struct vm_gk20a *vm, } /* - * One nvgpu_smp_mb() is done after all mapping operations. Don't need + * One nvgpu_mb() is done after all mapping operations. Don't need * individual barriers for each PD write. */ pd->mem->skip_wmb = true; @@ -639,7 +639,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm, attrs); unmap_gmmu_pages(g, &vm->pdb); - nvgpu_smp_mb(); + nvgpu_mb(); __gmmu_dbg(g, attrs, "%-5s Done!", sgt ? "MAP" : "UNMAP"); @@ -914,7 +914,7 @@ int __nvgpu_set_pte(struct gk20a *g, struct vm_gk20a *vm, u64 vaddr, u32 *pte) * There probably also needs to be a TLB invalidate as well but we leave * that to the caller of this function. */ - nvgpu_smp_wmb(); + nvgpu_wmb(); return 0; } -- cgit v1.2.2