summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-02-28 12:19:19 -0500
committerSrikar Srimath Tirumala <srikars@nvidia.com>2018-02-28 16:49:22 -0500
commit5a35a95654d561fce09a3b9abf6b82bb7a29d74b (patch)
tree119a07134188d8e06c29a570dd8c6b143f39c9e1 /drivers/gpu/nvgpu/common
parent3fdd8e38b280123fd13bcc4f3fd8928c15e94db6 (diff)
Revert "gpu: nvgpu: Get coherency on gv100 + NVLINK working"
Also revert other changes related to IO coherence. This may be the culprit in a recent dev-kernel lockdown. Bug 2070609 Change-Id: Ida178aef161fadbc6db9512521ea51c702c1564b Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1665914 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Srikar Srimath Tirumala <srikars@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/linux/dma.c34
-rw-r--r--drivers/gpu/nvgpu/common/linux/module.c8
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c51
-rw-r--r--drivers/gpu/nvgpu/common/linux/pci.c17
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c3
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c16
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c46
7 files changed, 41 insertions, 134 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c
index 81aebb7d..c13dae8b 100644
--- a/drivers/gpu/nvgpu/common/linux/dma.c
+++ b/drivers/gpu/nvgpu/common/linux/dma.c
@@ -222,16 +222,6 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
222 void *alloc_ret; 222 void *alloc_ret;
223 223
224 /* 224 /*
225 * WAR for IO coherent chips: the DMA API does not seem to generate
226 * mappings that work correctly. Unclear why - Bug ID: 2040115.
227 *
228 * Basically we just tell the DMA API not to map with NO_KERNEL_MAPPING
229 * and then make a vmap() ourselves.
230 */
231 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
232 flags |= NVGPU_DMA_NO_KERNEL_MAPPING;
233
234 /*
235 * Before the debug print so we see this in the total. But during 225 * Before the debug print so we see this in the total. But during
236 * cleanup in the fail path this has to be subtracted. 226 * cleanup in the fail path this has to be subtracted.
237 */ 227 */
@@ -265,17 +255,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
265 iova, size, flags); 255 iova, size, flags);
266 } 256 }
267 if (err) 257 if (err)
268 goto fail_free_dma; 258 goto fail_free;
269
270 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) {
271 mem->cpu_va = vmap(mem->priv.pages,
272 size >> PAGE_SHIFT,
273 0, PAGE_KERNEL);
274 if (!mem->cpu_va) {
275 err = -ENOMEM;
276 goto fail_free_sgt;
277 }
278 }
279 259
280 mem->aligned_size = size; 260 mem->aligned_size = size;
281 mem->aperture = APERTURE_SYSMEM; 261 mem->aperture = APERTURE_SYSMEM;
@@ -285,14 +265,12 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
285 265
286 return 0; 266 return 0;
287 267
288fail_free_sgt: 268fail_free:
289 nvgpu_free_sgtable(g, &mem->priv.sgt); 269 g->dma_memory_used -= mem->aligned_size;
290fail_free_dma:
291 dma_free_attrs(d, size, alloc_ret, iova, NVGPU_DMA_ATTR(dma_attrs)); 270 dma_free_attrs(d, size, alloc_ret, iova, NVGPU_DMA_ATTR(dma_attrs));
292 mem->cpu_va = NULL; 271 mem->cpu_va = NULL;
293 mem->priv.sgt = NULL; 272 mem->priv.sgt = NULL;
294 mem->size = 0; 273 mem->size = 0;
295 g->dma_memory_used -= mem->aligned_size;
296 return err; 274 return err;
297} 275}
298 276
@@ -488,12 +466,6 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
488 if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) && 466 if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) &&
489 !(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) && 467 !(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) &&
490 (mem->cpu_va || mem->priv.pages)) { 468 (mem->cpu_va || mem->priv.pages)) {
491 /*
492 * Free side of WAR for bug 2040115.
493 */
494 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
495 vunmap(mem->cpu_va);
496
497 if (mem->priv.flags) { 469 if (mem->priv.flags) {
498 NVGPU_DEFINE_DMA_ATTRS(dma_attrs); 470 NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
499 471
diff --git a/drivers/gpu/nvgpu/common/linux/module.c b/drivers/gpu/nvgpu/common/linux/module.c
index 741c86e7..b103fcea 100644
--- a/drivers/gpu/nvgpu/common/linux/module.c
+++ b/drivers/gpu/nvgpu/common/linux/module.c
@@ -20,7 +20,6 @@
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_device.h> 21#include <linux/of_device.h>
22#include <linux/of_platform.h> 22#include <linux/of_platform.h>
23#include <linux/of_address.h>
24#include <linux/interrupt.h> 23#include <linux/interrupt.h>
25#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
26#include <linux/reset.h> 25#include <linux/reset.h>
@@ -1108,7 +1107,6 @@ static int gk20a_probe(struct platform_device *dev)
1108 struct gk20a *gk20a; 1107 struct gk20a *gk20a;
1109 int err; 1108 int err;
1110 struct gk20a_platform *platform = NULL; 1109 struct gk20a_platform *platform = NULL;
1111 struct device_node *np;
1112 1110
1113 if (dev->dev.of_node) { 1111 if (dev->dev.of_node) {
1114 const struct of_device_id *match; 1112 const struct of_device_id *match;
@@ -1149,12 +1147,6 @@ static int gk20a_probe(struct platform_device *dev)
1149 if (err) 1147 if (err)
1150 goto return_err; 1148 goto return_err;
1151 1149
1152 np = nvgpu_get_node(gk20a);
1153 if (of_dma_is_coherent(np)) {
1154 __nvgpu_set_enabled(gk20a, NVGPU_USE_COHERENT_SYSMEM, true);
1155 __nvgpu_set_enabled(gk20a, NVGPU_SUPPORT_IO_COHERENCE, true);
1156 }
1157
1158 if (nvgpu_platform_is_simulation(gk20a)) 1150 if (nvgpu_platform_is_simulation(gk20a))
1159 __nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true); 1151 __nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
1160 1152
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index 69897694..206b83e1 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -34,25 +34,40 @@
34#include "gk20a/gk20a.h" 34#include "gk20a/gk20a.h"
35#include "gk20a/mm_gk20a.h" 35#include "gk20a/mm_gk20a.h"
36 36
37u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
38 u32 sysmem_mask, u32 vidmem_mask)
39{
40 switch (aperture) {
41 case APERTURE_SYSMEM:
42 /* some igpus consider system memory vidmem */
43 return nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE)
44 ? sysmem_mask : vidmem_mask;
45 case APERTURE_VIDMEM:
46 /* for dgpus only */
47 return vidmem_mask;
48 case APERTURE_INVALID:
49 WARN_ON("Bad aperture");
50 }
51 return 0;
52}
53
54u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
55 u32 sysmem_mask, u32 vidmem_mask)
56{
57 return __nvgpu_aperture_mask(g, mem->aperture,
58 sysmem_mask, vidmem_mask);
59}
60
37int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) 61int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
38{ 62{
39 void *cpu_va; 63 void *cpu_va;
40 pgprot_t prot = nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ? 64 pgprot_t prot = nvgpu_is_enabled(g, NVGPU_DMA_COHERENT) ? PAGE_KERNEL :
41 PAGE_KERNEL :
42 pgprot_writecombine(PAGE_KERNEL); 65 pgprot_writecombine(PAGE_KERNEL);
43 66
44 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) 67 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
45 return 0; 68 return 0;
46 69
47 /* 70 /*
48 * WAR for bug 2040115: we already will always have a coherent vmap()
49 * for all sysmem buffers. The prot settings are left alone since
50 * eventually this should be deleted.
51 */
52 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
53 return 0;
54
55 /*
56 * A CPU mapping is implicitly made for all SYSMEM DMA allocations that 71 * A CPU mapping is implicitly made for all SYSMEM DMA allocations that
57 * don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make 72 * don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make
58 * another CPU mapping. 73 * another CPU mapping.
@@ -82,13 +97,6 @@ void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
82 return; 97 return;
83 98
84 /* 99 /*
85 * WAR for bug 2040115: skip this since the map will be taken care of
86 * during the free in the DMA API.
87 */
88 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
89 return;
90
91 /*
92 * Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping 100 * Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping
93 * already made by the DMA API. 101 * already made by the DMA API.
94 */ 102 */
@@ -307,8 +315,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
307 */ 315 */
308u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl) 316u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl)
309{ 317{
310 if (nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) || 318 if (!nvgpu_iommuable(g))
311 !nvgpu_iommuable(g))
312 return g->ops.mm.gpu_phys_addr(g, NULL, sg_phys(sgl)); 319 return g->ops.mm.gpu_phys_addr(g, NULL, sg_phys(sgl));
313 320
314 if (sg_dma_address(sgl) == 0) 321 if (sg_dma_address(sgl) == 0)
@@ -408,12 +415,8 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
408 415
409 /* 416 /*
410 * Re-use the CPU mapping only if the mapping was made by the DMA API. 417 * Re-use the CPU mapping only if the mapping was made by the DMA API.
411 *
412 * Bug 2040115: the DMA API wrapper makes the mapping that we should
413 * re-use.
414 */ 418 */
415 if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) || 419 if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
416 nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
417 dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page); 420 dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page);
418 421
419 dest->priv.pages = src->priv.pages + start_page; 422 dest->priv.pages = src->priv.pages + start_page;
diff --git a/drivers/gpu/nvgpu/common/linux/pci.c b/drivers/gpu/nvgpu/common/linux/pci.c
index 973da9ca..6ebe8dda 100644
--- a/drivers/gpu/nvgpu/common/linux/pci.c
+++ b/drivers/gpu/nvgpu/common/linux/pci.c
@@ -17,13 +17,13 @@
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/pm_runtime.h> 19#include <linux/pm_runtime.h>
20#include <linux/of_platform.h>
21#include <linux/of_address.h>
22 20
23#include <nvgpu/nvgpu_common.h> 21#include <nvgpu/nvgpu_common.h>
24#include <nvgpu/kmem.h> 22#include <nvgpu/kmem.h>
25#include <nvgpu/enabled.h> 23#include <nvgpu/enabled.h>
26#include <nvgpu/nvlink.h> 24#include <nvgpu/nvlink.h>
25#include <linux/of_platform.h>
26#include <linux/of_address.h>
27 27
28#include "gk20a/gk20a.h" 28#include "gk20a/gk20a.h"
29#include "clk/clk.h" 29#include "clk/clk.h"
@@ -566,12 +566,6 @@ static int nvgpu_pci_probe(struct pci_dev *pdev,
566 platform->g = g; 566 platform->g = g;
567 l->dev = &pdev->dev; 567 l->dev = &pdev->dev;
568 568
569 np = nvgpu_get_node(g);
570 if (of_dma_is_coherent(np)) {
571 __nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true);
572 __nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
573 }
574
575 err = pci_enable_device(pdev); 569 err = pci_enable_device(pdev);
576 if (err) 570 if (err)
577 return err; 571 return err;
@@ -650,6 +644,13 @@ static int nvgpu_pci_probe(struct pci_dev *pdev,
650 644
651 g->mm.has_physical_mode = false; 645 g->mm.has_physical_mode = false;
652 646
647 np = nvgpu_get_node(g);
648
649 if (of_dma_is_coherent(np)) {
650 __nvgpu_set_enabled(g, NVGPU_DMA_COHERENT, true);
651 __nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
652 }
653
653 return 0; 654 return 0;
654} 655}
655 656
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 52b2f30c..e3ca4eda 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -166,8 +166,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
166 vm->gmmu_page_sizes[mapped_buffer->pgsz_idx] >> 10, 166 vm->gmmu_page_sizes[mapped_buffer->pgsz_idx] >> 10,
167 vm_aspace_id(vm), 167 vm_aspace_id(vm),
168 mapped_buffer->flags, 168 mapped_buffer->flags,
169 nvgpu_aperture_str(g, 169 nvgpu_aperture_str(gk20a_dmabuf_aperture(g, os_buf->dmabuf)));
170 gk20a_dmabuf_aperture(g, os_buf->dmabuf)));
171 170
172 return mapped_buffer; 171 return mapped_buffer;
173} 172}
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index 41343718..ffac324c 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -79,13 +79,6 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
79 if (!sgt) 79 if (!sgt)
80 return -ENOMEM; 80 return -ENOMEM;
81 81
82 /*
83 * If the GPU is IO coherent and the DMA API is giving us IO coherent
84 * CPU mappings then we gotta make sure we use the IO coherent aperture.
85 */
86 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
87 flags |= NVGPU_VM_MAP_IO_COHERENT;
88
89 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 82 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
90 vaddr = g->ops.mm.gmmu_map(vm, addr, 83 vaddr = g->ops.mm.gmmu_map(vm, addr,
91 sgt, /* sg list */ 84 sgt, /* sg list */
@@ -634,7 +627,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
634 page_size >> 10, 627 page_size >> 10,
635 nvgpu_gmmu_perm_str(attrs->rw_flag), 628 nvgpu_gmmu_perm_str(attrs->rw_flag),
636 attrs->kind_v, 629 attrs->kind_v,
637 nvgpu_aperture_str(g, attrs->aperture), 630 nvgpu_aperture_str(attrs->aperture),
638 attrs->cacheable ? 'C' : '-', 631 attrs->cacheable ? 'C' : '-',
639 attrs->sparse ? 'S' : '-', 632 attrs->sparse ? 'S' : '-',
640 attrs->priv ? 'P' : '-', 633 attrs->priv ? 'P' : '-',
@@ -712,13 +705,6 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
712 attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC); 705 attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);
713 706
714 /* 707 /*
715 * Handle the IO coherency aperture: make sure the .aperture field is
716 * correct based on the IO coherency flag.
717 */
718 if (attrs.coherent && attrs.aperture == APERTURE_SYSMEM)
719 attrs.aperture = __APERTURE_SYSMEM_COH;
720
721 /*
722 * Only allocate a new GPU VA range if we haven't already been passed a 708 * Only allocate a new GPU VA range if we haven't already been passed a
723 * GPU VA range. This facilitates fixed mappings. 709 * GPU VA range. This facilitates fixed mappings.
724 */ 710 */
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index 78a57b4e..73b6b2a7 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -28,52 +28,6 @@
28 28
29#include "gk20a/gk20a.h" 29#include "gk20a/gk20a.h"
30 30
31/*
32 * Make sure to use the right coherency aperture if you use this function! This
33 * will not add any checks. If you want to simply use the default coherency then
34 * use nvgpu_aperture_mask().
35 */
36u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
37 u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask)
38{
39 /*
40 * Some iGPUs treat sysmem (i.e SoC DRAM) as vidmem. In these cases the
41 * "sysmem" aperture should really be translated to VIDMEM.
42 */
43 if (!nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE))
44 aperture = APERTURE_VIDMEM;
45
46 switch (aperture) {
47 case __APERTURE_SYSMEM_COH:
48 return sysmem_coh_mask;
49 case APERTURE_SYSMEM:
50 return sysmem_mask;
51 case APERTURE_VIDMEM:
52 return vidmem_mask;
53 case APERTURE_INVALID:
54 WARN_ON("Bad aperture");
55 }
56 return 0;
57}
58
59u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
60 u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask)
61{
62 enum nvgpu_aperture ap = mem->aperture;
63
64 /*
65 * Handle the coherent aperture: ideally most of the driver is not
66 * aware of the difference between coherent and non-coherent sysmem so
67 * we add this translation step here.
68 */
69 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) &&
70 ap == APERTURE_SYSMEM)
71 ap = __APERTURE_SYSMEM_COH;
72
73 return __nvgpu_aperture_mask(g, ap,
74 sysmem_mask, sysmem_coh_mask, vidmem_mask);
75}
76
77void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl) 31void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl)
78{ 32{
79 return sgt->ops->sgl_next(sgl); 33 return sgt->ops->sgl_next(sgl);