summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2017-03-21 07:42:45 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-21 18:01:48 -0400
commitfcd7fce9bc9ee744a518854995ba9679216e09c8 (patch)
tree4fa4264967ce32994d6789233f3d5624848ba34a
parent8f3875393e7a6bd0fc03afdb1fa99b7e33b71576 (diff)
gpu: nvgpu: use dma-attr wrappers for K4.9 compatibility
On kernel 4.9, the DMA API has changed, so use the NVIDIA compatibility wrappers from dma-attrs.h to allow the code to build for both 4.4 and 4.9. Bug 1853519 Change-Id: I0196936e81c7f72b41b38a67f42af0dc0b5518df Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/1321102 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c24
-rw-r--r--drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c11
2 files changed, 23 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index b9678fbb..bb7c7985 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -2929,15 +2929,23 @@ int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct mem_desc *mem)
2929 return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem); 2929 return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem);
2930} 2930}
2931 2931
2932#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
2933static void gk20a_dma_flags_to_attrs(unsigned long *attrs,
2934 unsigned long flags)
2935#define ATTR_ARG(x) *x
2936#else
2932static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs, 2937static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs,
2933 unsigned long flags) 2938 unsigned long flags)
2939#define ATTR_ARG(x) x
2940#endif
2934{ 2941{
2935 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) 2942 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
2936 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); 2943 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, ATTR_ARG(attrs));
2937 if (flags & NVGPU_DMA_FORCE_CONTIGUOUS) 2944 if (flags & NVGPU_DMA_FORCE_CONTIGUOUS)
2938 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs); 2945 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, ATTR_ARG(attrs));
2939 if (flags & NVGPU_DMA_READ_ONLY) 2946 if (flags & NVGPU_DMA_READ_ONLY)
2940 dma_set_attr(DMA_ATTR_READ_ONLY, attrs); 2947 dma_set_attr(DMA_ATTR_READ_ONLY, ATTR_ARG(attrs));
2948#undef ATTR_ARG
2941} 2949}
2942 2950
2943int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, 2951int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
@@ -2956,12 +2964,14 @@ int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
2956 2964
2957 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 2965 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
2958 mem->pages = dma_alloc_attrs(d, 2966 mem->pages = dma_alloc_attrs(d,
2959 size, &iova, GFP_KERNEL, &dma_attrs); 2967 size, &iova, GFP_KERNEL,
2968 __DMA_ATTR(dma_attrs));
2960 if (!mem->pages) 2969 if (!mem->pages)
2961 return -ENOMEM; 2970 return -ENOMEM;
2962 } else { 2971 } else {
2963 mem->cpu_va = dma_alloc_attrs(d, 2972 mem->cpu_va = dma_alloc_attrs(d,
2964 size, &iova, GFP_KERNEL, &dma_attrs); 2973 size, &iova, GFP_KERNEL,
2974 __DMA_ATTR(dma_attrs));
2965 if (!mem->cpu_va) 2975 if (!mem->cpu_va)
2966 return -ENOMEM; 2976 return -ENOMEM;
2967 } 2977 }
@@ -3009,11 +3019,11 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct mem_desc *mem)
3009 if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 3019 if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
3010 dma_free_attrs(d, mem->size, mem->pages, 3020 dma_free_attrs(d, mem->size, mem->pages,
3011 sg_dma_address(mem->sgt->sgl), 3021 sg_dma_address(mem->sgt->sgl),
3012 &dma_attrs); 3022 __DMA_ATTR(dma_attrs));
3013 } else { 3023 } else {
3014 dma_free_attrs(d, mem->size, mem->cpu_va, 3024 dma_free_attrs(d, mem->size, mem->cpu_va,
3015 sg_dma_address(mem->sgt->sgl), 3025 sg_dma_address(mem->sgt->sgl),
3016 &dma_attrs); 3026 __DMA_ATTR(dma_attrs));
3017 } 3027 }
3018 } else { 3028 } else {
3019 dma_free_coherent(d, mem->size, mem->cpu_va, 3029 dma_free_coherent(d, mem->size, mem->cpu_va,
diff --git a/drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c
index d612fcd2..ceb1d67e 100644
--- a/drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c
+++ b/drivers/gpu/nvgpu/tegra/linux/platform_gk20a_tegra.c
@@ -21,6 +21,7 @@
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <uapi/linux/nvgpu.h> 22#include <uapi/linux/nvgpu.h>
23#include <linux/dma-buf.h> 23#include <linux/dma-buf.h>
24#include <linux/dma-attrs.h>
24#include <linux/nvmap.h> 25#include <linux/nvmap.h>
25#include <linux/reset.h> 26#include <linux/reset.h>
26#include <linux/tegra_soctherm.h> 27#include <linux/tegra_soctherm.h>
@@ -98,7 +99,7 @@ static void gk20a_tegra_secure_page_destroy(struct device *dev,
98 DEFINE_DMA_ATTRS(attrs); 99 DEFINE_DMA_ATTRS(attrs);
99 dma_free_attrs(&tegra_vpr_dev, secure_buffer->size, 100 dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
100 (void *)(uintptr_t)secure_buffer->iova, 101 (void *)(uintptr_t)secure_buffer->iova,
101 secure_buffer->iova, &attrs); 102 secure_buffer->iova, __DMA_ATTR(attrs));
102} 103}
103 104
104int gk20a_tegra_secure_page_alloc(struct device *dev) 105int gk20a_tegra_secure_page_alloc(struct device *dev)
@@ -113,7 +114,7 @@ int gk20a_tegra_secure_page_alloc(struct device *dev)
113 return -EINVAL; 114 return -EINVAL;
114 115
115 (void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova, 116 (void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
116 DMA_MEMORY_NOMAP, &attrs); 117 DMA_MEMORY_NOMAP, __DMA_ATTR(attrs));
117 if (dma_mapping_error(&tegra_vpr_dev, iova)) 118 if (dma_mapping_error(&tegra_vpr_dev, iova))
118 return -ENOMEM; 119 return -ENOMEM;
119 120
@@ -133,7 +134,7 @@ static void gk20a_tegra_secure_destroy(struct gk20a *g,
133 phys_addr_t pa = sg_phys(desc->mem.sgt->sgl); 134 phys_addr_t pa = sg_phys(desc->mem.sgt->sgl);
134 dma_free_attrs(&tegra_vpr_dev, desc->mem.size, 135 dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
135 (void *)(uintptr_t)pa, 136 (void *)(uintptr_t)pa,
136 pa, &attrs); 137 pa, __DMA_ATTR(attrs));
137 gk20a_free_sgtable(&desc->mem.sgt); 138 gk20a_free_sgtable(&desc->mem.sgt);
138 desc->mem.sgt = NULL; 139 desc->mem.sgt = NULL;
139 } 140 }
@@ -154,7 +155,7 @@ int gk20a_tegra_secure_alloc(struct device *dev,
154 return -EINVAL; 155 return -EINVAL;
155 156
156 (void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova, 157 (void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
157 DMA_MEMORY_NOMAP, &attrs); 158 DMA_MEMORY_NOMAP, __DMA_ATTR(attrs));
158 if (dma_mapping_error(&tegra_vpr_dev, iova)) 159 if (dma_mapping_error(&tegra_vpr_dev, iova))
159 return -ENOMEM; 160 return -ENOMEM;
160 161
@@ -185,7 +186,7 @@ fail_sgt:
185 kfree(sgt); 186 kfree(sgt);
186fail: 187fail:
187 dma_free_attrs(&tegra_vpr_dev, desc->mem.size, 188 dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
188 (void *)(uintptr_t)iova, iova, &attrs); 189 (void *)(uintptr_t)iova, iova, __DMA_ATTR(attrs));
189 return err; 190 return err;
190} 191}
191 192