summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2017-03-14 07:47:04 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-21 18:01:47 -0400
commit8f3875393e7a6bd0fc03afdb1fa99b7e33b71576 (patch)
tree7ee7b2da741fae7d06eeb367db2b14d8f78f0f55 /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parent79658ac5cb22cc68a2d24d964379a606086c8b39 (diff)
gpu: nvgpu: abstract away dma alloc attrs
Don't use enum dma_attr in the gk20a_gmmu_alloc_attr* functions, but define nvgpu-internal flags for no kernel mapping, force contiguous, and read only modes. Store the flags in the allocated struct mem_desc and only use gk20a_gmmu_free, remove gk20a_gmmu_free_attr. This helps in OS abstraction. Rename the notion of attr to flags. Add implicit NVGPU_DMA_NO_KERNEL_MAPPING to all vidmem buffers allocated via gk20a_gmmu_alloc_vid for consistency. Fix a bug in gk20a_gmmu_alloc_map_attr that dropped the attr parameter accidentally. Bug 1853519 Change-Id: I1ff67dff9fc425457ae445ce4976a780eb4dcc9f Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/1321101 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c151
1 files changed, 78 insertions, 73 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 32d1f32f..b9678fbb 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -25,6 +25,8 @@
25#include <soc/tegra/chip-id.h> 25#include <soc/tegra/chip-id.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/dma-buf.h> 27#include <linux/dma-buf.h>
28#include <linux/dma-mapping.h>
29#include <linux/dma-attrs.h>
28#include <linux/lcm.h> 30#include <linux/lcm.h>
29#include <linux/fdtable.h> 31#include <linux/fdtable.h>
30#include <uapi/linux/nvgpu.h> 32#include <uapi/linux/nvgpu.h>
@@ -1253,7 +1255,7 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order,
1253 if (IS_ENABLED(CONFIG_ARM64)) 1255 if (IS_ENABLED(CONFIG_ARM64))
1254 err = gk20a_gmmu_alloc(g, len, &entry->mem); 1256 err = gk20a_gmmu_alloc(g, len, &entry->mem);
1255 else 1257 else
1256 err = gk20a_gmmu_alloc_attr(g, DMA_ATTR_NO_KERNEL_MAPPING, 1258 err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING,
1257 len, &entry->mem); 1259 len, &entry->mem);
1258 1260
1259 1261
@@ -1284,15 +1286,7 @@ void free_gmmu_pages(struct vm_gk20a *vm,
1284 return; 1286 return;
1285 } 1287 }
1286 1288
1287 /* 1289 gk20a_gmmu_free(g, &entry->mem);
1288 * On arm32 we're limited by vmalloc space, so we do not map pages by
1289 * default.
1290 */
1291 if (IS_ENABLED(CONFIG_ARM64))
1292 gk20a_gmmu_free(g, &entry->mem);
1293 else
1294 gk20a_gmmu_free_attr(g, DMA_ATTR_NO_KERNEL_MAPPING,
1295 &entry->mem);
1296} 1290}
1297 1291
1298int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) 1292int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry)
@@ -2910,14 +2904,14 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
2910 2904
2911int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem) 2905int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem)
2912{ 2906{
2913 return gk20a_gmmu_alloc_attr(g, 0, size, mem); 2907 return gk20a_gmmu_alloc_flags(g, 0, size, mem);
2914} 2908}
2915 2909
2916int gk20a_gmmu_alloc_attr(struct gk20a *g, enum dma_attr attr, size_t size, 2910int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
2917 struct mem_desc *mem) 2911 struct mem_desc *mem)
2918{ 2912{
2919 if (g->mm.vidmem_is_vidmem) { 2913 if (g->mm.vidmem_is_vidmem) {
2920 int err = gk20a_gmmu_alloc_attr_vid(g, attr, size, mem); 2914 int err = gk20a_gmmu_alloc_flags_vid(g, flags, size, mem);
2921 2915
2922 if (!err) 2916 if (!err)
2923 return 0; 2917 return 0;
@@ -2927,15 +2921,26 @@ int gk20a_gmmu_alloc_attr(struct gk20a *g, enum dma_attr attr, size_t size,
2927 */ 2921 */
2928 } 2922 }
2929 2923
2930 return gk20a_gmmu_alloc_attr_sys(g, attr, size, mem); 2924 return gk20a_gmmu_alloc_flags_sys(g, flags, size, mem);
2931} 2925}
2932 2926
2933int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct mem_desc *mem) 2927int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct mem_desc *mem)
2934{ 2928{
2935 return gk20a_gmmu_alloc_attr_sys(g, 0, size, mem); 2929 return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem);
2936} 2930}
2937 2931
2938int gk20a_gmmu_alloc_attr_sys(struct gk20a *g, enum dma_attr attr, 2932static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs,
2933 unsigned long flags)
2934{
2935 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
2936 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
2937 if (flags & NVGPU_DMA_FORCE_CONTIGUOUS)
2938 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs);
2939 if (flags & NVGPU_DMA_READ_ONLY)
2940 dma_set_attr(DMA_ATTR_READ_ONLY, attrs);
2941}
2942
2943int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
2939 size_t size, struct mem_desc *mem) 2944 size_t size, struct mem_desc *mem)
2940{ 2945{
2941 struct device *d = dev_from_gk20a(g); 2946 struct device *d = dev_from_gk20a(g);
@@ -2944,17 +2949,19 @@ int gk20a_gmmu_alloc_attr_sys(struct gk20a *g, enum dma_attr attr,
2944 2949
2945 gk20a_dbg_fn(""); 2950 gk20a_dbg_fn("");
2946 2951
2947 if (attr) { 2952 if (flags) {
2948 DEFINE_DMA_ATTRS(attrs); 2953 DEFINE_DMA_ATTRS(dma_attrs);
2949 dma_set_attr(attr, &attrs); 2954
2950 if (attr == DMA_ATTR_NO_KERNEL_MAPPING) { 2955 gk20a_dma_flags_to_attrs(&dma_attrs, flags);
2956
2957 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
2951 mem->pages = dma_alloc_attrs(d, 2958 mem->pages = dma_alloc_attrs(d,
2952 size, &iova, GFP_KERNEL, &attrs); 2959 size, &iova, GFP_KERNEL, &dma_attrs);
2953 if (!mem->pages) 2960 if (!mem->pages)
2954 return -ENOMEM; 2961 return -ENOMEM;
2955 } else { 2962 } else {
2956 mem->cpu_va = dma_alloc_attrs(d, 2963 mem->cpu_va = dma_alloc_attrs(d,
2957 size, &iova, GFP_KERNEL, &attrs); 2964 size, &iova, GFP_KERNEL, &dma_attrs);
2958 if (!mem->cpu_va) 2965 if (!mem->cpu_va)
2959 return -ENOMEM; 2966 return -ENOMEM;
2960 } 2967 }
@@ -2964,7 +2971,7 @@ int gk20a_gmmu_alloc_attr_sys(struct gk20a *g, enum dma_attr attr,
2964 return -ENOMEM; 2971 return -ENOMEM;
2965 } 2972 }
2966 2973
2967 if (attr == DMA_ATTR_NO_KERNEL_MAPPING) 2974 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
2968 err = gk20a_get_sgtable_from_pages(d, &mem->sgt, mem->pages, 2975 err = gk20a_get_sgtable_from_pages(d, &mem->sgt, mem->pages,
2969 iova, size); 2976 iova, size);
2970 else { 2977 else {
@@ -2976,6 +2983,7 @@ int gk20a_gmmu_alloc_attr_sys(struct gk20a *g, enum dma_attr attr,
2976 2983
2977 mem->size = size; 2984 mem->size = size;
2978 mem->aperture = APERTURE_SYSMEM; 2985 mem->aperture = APERTURE_SYSMEM;
2986 mem->flags = flags;
2979 2987
2980 gk20a_dbg_fn("done"); 2988 gk20a_dbg_fn("done");
2981 2989
@@ -2988,31 +2996,28 @@ fail_free:
2988 return err; 2996 return err;
2989} 2997}
2990 2998
2991static void gk20a_gmmu_free_attr_sys(struct gk20a *g, enum dma_attr attr, 2999static void gk20a_gmmu_free_sys(struct gk20a *g, struct mem_desc *mem)
2992 struct mem_desc *mem)
2993{ 3000{
2994 struct device *d = dev_from_gk20a(g); 3001 struct device *d = dev_from_gk20a(g);
2995 3002
2996 if (mem->cpu_va || mem->pages) { 3003 if (mem->cpu_va || mem->pages) {
2997 if (attr) { 3004 if (mem->flags) {
2998 DEFINE_DMA_ATTRS(attrs); 3005 DEFINE_DMA_ATTRS(dma_attrs);
2999 dma_set_attr(attr, &attrs); 3006
3000 if (attr == DMA_ATTR_NO_KERNEL_MAPPING) { 3007 gk20a_dma_flags_to_attrs(&dma_attrs, mem->flags);
3001 if (mem->pages) 3008
3002 dma_free_attrs(d, mem->size, mem->pages, 3009 if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
3003 sg_dma_address(mem->sgt->sgl), 3010 dma_free_attrs(d, mem->size, mem->pages,
3004 &attrs); 3011 sg_dma_address(mem->sgt->sgl),
3012 &dma_attrs);
3005 } else { 3013 } else {
3006 if (mem->cpu_va) 3014 dma_free_attrs(d, mem->size, mem->cpu_va,
3007 dma_free_attrs(d, mem->size, 3015 sg_dma_address(mem->sgt->sgl),
3008 mem->cpu_va, 3016 &dma_attrs);
3009 sg_dma_address(mem->sgt->sgl),
3010 &attrs);
3011 } 3017 }
3012 } else { 3018 } else {
3013 if (mem->cpu_va) 3019 dma_free_coherent(d, mem->size, mem->cpu_va,
3014 dma_free_coherent(d, mem->size, mem->cpu_va, 3020 sg_dma_address(mem->sgt->sgl));
3015 sg_dma_address(mem->sgt->sgl));
3016 } 3021 }
3017 mem->cpu_va = NULL; 3022 mem->cpu_va = NULL;
3018 mem->pages = NULL; 3023 mem->pages = NULL;
@@ -3089,13 +3094,14 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
3089 3094
3090int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct mem_desc *mem) 3095int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct mem_desc *mem)
3091{ 3096{
3092 return gk20a_gmmu_alloc_attr_vid(g, 0, size, mem); 3097 return gk20a_gmmu_alloc_flags_vid(g,
3098 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
3093} 3099}
3094 3100
3095int gk20a_gmmu_alloc_attr_vid(struct gk20a *g, enum dma_attr attr, 3101int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags,
3096 size_t size, struct mem_desc *mem) 3102 size_t size, struct mem_desc *mem)
3097{ 3103{
3098 return gk20a_gmmu_alloc_attr_vid_at(g, attr, size, mem, 0); 3104 return gk20a_gmmu_alloc_flags_vid_at(g, flags, size, mem, 0);
3099} 3105}
3100 3106
3101#if defined(CONFIG_GK20A_VIDMEM) 3107#if defined(CONFIG_GK20A_VIDMEM)
@@ -3113,7 +3119,7 @@ static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
3113} 3119}
3114#endif 3120#endif
3115 3121
3116int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g, enum dma_attr attr, 3122int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
3117 size_t size, struct mem_desc *mem, dma_addr_t at) 3123 size_t size, struct mem_desc *mem, dma_addr_t at)
3118{ 3124{
3119#if defined(CONFIG_GK20A_VIDMEM) 3125#if defined(CONFIG_GK20A_VIDMEM)
@@ -3129,9 +3135,11 @@ int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g, enum dma_attr attr,
3129 if (!nvgpu_alloc_initialized(&g->mm.vidmem.allocator)) 3135 if (!nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
3130 return -ENOSYS; 3136 return -ENOSYS;
3131 3137
3132 /* we don't support dma attributes here, except that kernel mappings 3138 /*
3133 * are not done anyway */ 3139 * Our own allocator doesn't have any flags yet, and we can't
3134 WARN_ON(attr != 0 && attr != DMA_ATTR_NO_KERNEL_MAPPING); 3140 * kernel-map these, so require explicit flags.
3141 */
3142 WARN_ON(flags != NVGPU_DMA_NO_KERNEL_MAPPING);
3135 3143
3136 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); 3144 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
3137 before_pending = atomic64_read(&g->mm.vidmem.bytes_pending); 3145 before_pending = atomic64_read(&g->mm.vidmem.bytes_pending);
@@ -3186,12 +3194,14 @@ fail_physfree:
3186#endif 3194#endif
3187} 3195}
3188 3196
3189static void gk20a_gmmu_free_attr_vid(struct gk20a *g, enum dma_attr attr, 3197static void gk20a_gmmu_free_vid(struct gk20a *g, struct mem_desc *mem)
3190 struct mem_desc *mem)
3191{ 3198{
3192#if defined(CONFIG_GK20A_VIDMEM) 3199#if defined(CONFIG_GK20A_VIDMEM)
3193 bool was_empty; 3200 bool was_empty;
3194 3201
3202 /* Sanity check - only this supported when allocating. */
3203 WARN_ON(mem->flags != NVGPU_DMA_NO_KERNEL_MAPPING);
3204
3195 if (mem->user_mem) { 3205 if (mem->user_mem) {
3196 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); 3206 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
3197 was_empty = list_empty(&g->mm.vidmem.clear_list_head); 3207 was_empty = list_empty(&g->mm.vidmem.clear_list_head);
@@ -3216,24 +3226,18 @@ static void gk20a_gmmu_free_attr_vid(struct gk20a *g, enum dma_attr attr,
3216#endif 3226#endif
3217} 3227}
3218 3228
3219void gk20a_gmmu_free_attr(struct gk20a *g, enum dma_attr attr, 3229void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem)
3220 struct mem_desc *mem)
3221{ 3230{
3222 switch (mem->aperture) { 3231 switch (mem->aperture) {
3223 case APERTURE_SYSMEM: 3232 case APERTURE_SYSMEM:
3224 return gk20a_gmmu_free_attr_sys(g, attr, mem); 3233 return gk20a_gmmu_free_sys(g, mem);
3225 case APERTURE_VIDMEM: 3234 case APERTURE_VIDMEM:
3226 return gk20a_gmmu_free_attr_vid(g, attr, mem); 3235 return gk20a_gmmu_free_vid(g, mem);
3227 default: 3236 default:
3228 break; /* like free() on "null" memory */ 3237 break; /* like free() on "null" memory */
3229 } 3238 }
3230} 3239}
3231 3240
3232void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem)
3233{
3234 return gk20a_gmmu_free_attr(g, 0, mem);
3235}
3236
3237/* 3241/*
3238 * If mem is in VIDMEM, return base address in vidmem 3242 * If mem is in VIDMEM, return base address in vidmem
3239 * else return IOVA address for SYSMEM 3243 * else return IOVA address for SYSMEM
@@ -3322,14 +3326,14 @@ u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem,
3322int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, 3326int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
3323 struct mem_desc *mem) 3327 struct mem_desc *mem)
3324{ 3328{
3325 return gk20a_gmmu_alloc_map_attr(vm, 0, size, mem); 3329 return gk20a_gmmu_alloc_map_flags(vm, 0, size, mem);
3326} 3330}
3327 3331
3328int gk20a_gmmu_alloc_map_attr(struct vm_gk20a *vm, 3332int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
3329 enum dma_attr attr, size_t size, struct mem_desc *mem) 3333 size_t size, struct mem_desc *mem)
3330{ 3334{
3331 if (vm->mm->vidmem_is_vidmem) { 3335 if (vm->mm->vidmem_is_vidmem) {
3332 int err = gk20a_gmmu_alloc_map_attr_vid(vm, 0, size, mem); 3336 int err = gk20a_gmmu_alloc_map_flags_vid(vm, flags, size, mem);
3333 3337
3334 if (!err) 3338 if (!err)
3335 return 0; 3339 return 0;
@@ -3339,19 +3343,19 @@ int gk20a_gmmu_alloc_map_attr(struct vm_gk20a *vm,
3339 */ 3343 */
3340 } 3344 }
3341 3345
3342 return gk20a_gmmu_alloc_map_attr_sys(vm, 0, size, mem); 3346 return gk20a_gmmu_alloc_map_flags_sys(vm, flags, size, mem);
3343} 3347}
3344 3348
3345int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, 3349int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
3346 struct mem_desc *mem) 3350 struct mem_desc *mem)
3347{ 3351{
3348 return gk20a_gmmu_alloc_map_attr_sys(vm, 0, size, mem); 3352 return gk20a_gmmu_alloc_map_flags_sys(vm, 0, size, mem);
3349} 3353}
3350 3354
3351int gk20a_gmmu_alloc_map_attr_sys(struct vm_gk20a *vm, 3355int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
3352 enum dma_attr attr, size_t size, struct mem_desc *mem) 3356 size_t size, struct mem_desc *mem)
3353{ 3357{
3354 int err = gk20a_gmmu_alloc_attr_sys(vm->mm->g, attr, size, mem); 3358 int err = gk20a_gmmu_alloc_flags_sys(vm->mm->g, flags, size, mem);
3355 3359
3356 if (err) 3360 if (err)
3357 return err; 3361 return err;
@@ -3371,15 +3375,16 @@ fail_free:
3371 return err; 3375 return err;
3372} 3376}
3373 3377
3374int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, struct mem_desc *mem) 3378int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size,
3379 struct mem_desc *mem)
3375{ 3380{
3376 return gk20a_gmmu_alloc_map_attr_vid(vm, 0, size, mem); 3381 return gk20a_gmmu_alloc_map_flags_vid(vm, 0, size, mem);
3377} 3382}
3378 3383
3379int gk20a_gmmu_alloc_map_attr_vid(struct vm_gk20a *vm, 3384int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
3380 enum dma_attr attr, size_t size, struct mem_desc *mem) 3385 size_t size, struct mem_desc *mem)
3381{ 3386{
3382 int err = gk20a_gmmu_alloc_attr_vid(vm->mm->g, attr, size, mem); 3387 int err = gk20a_gmmu_alloc_flags_vid(vm->mm->g, flags, size, mem);
3383 3388
3384 if (err) 3389 if (err)
3385 return err; 3390 return err;