diff options
author | Deepak Nibade <dnibade@nvidia.com> | 2014-11-04 08:14:28 -0500 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2015-03-18 15:12:01 -0400 |
commit | b3f575074b66e8af1a9943874f9782b793fa7edc (patch) | |
tree | c4aaa1defc512cf5a896edc25445f169de184ece /drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |
parent | 797e4dd319bd2b9e13ce0e44a3bbbb75e4820330 (diff) |
gpu: nvgpu: fix sparse warnings
Fix below sparse warnings :
warning: Using plain integer as NULL pointer
warning: symbol <variable/funcion> was not declared. Should it be static?
warning: Initializer entry defined twice
Also, remove dead functions
Bug 1573254
Change-Id: I29d71ecc01c841233cf6b26c9088ca8874773469
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/593363
Reviewed-by: Amit Sharma (SW-TEGRA) <amisharma@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Sachin Nikam <snikam@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index c121d6bf..7043a81d 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -263,7 +263,7 @@ static int gk20a_init_mm_reset_enable_hw(struct gk20a *g) | |||
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | void gk20a_remove_mm_support(struct mm_gk20a *mm) | 266 | static void gk20a_remove_mm_support(struct mm_gk20a *mm) |
267 | { | 267 | { |
268 | struct gk20a *g = mm->g; | 268 | struct gk20a *g = mm->g; |
269 | struct device *d = dev_from_gk20a(g); | 269 | struct device *d = dev_from_gk20a(g); |
@@ -405,7 +405,7 @@ err_out: | |||
405 | return -ENOMEM; | 405 | return -ENOMEM; |
406 | } | 406 | } |
407 | 407 | ||
408 | void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle, | 408 | static void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle, |
409 | struct sg_table *sgt, u32 order, | 409 | struct sg_table *sgt, u32 order, |
410 | size_t size) | 410 | size_t size) |
411 | { | 411 | { |
@@ -415,7 +415,7 @@ void free_gmmu_phys_pages(struct vm_gk20a *vm, void *handle, | |||
415 | kfree(sgt); | 415 | kfree(sgt); |
416 | } | 416 | } |
417 | 417 | ||
418 | int map_gmmu_phys_pages(void *handle, struct sg_table *sgt, | 418 | static int map_gmmu_phys_pages(void *handle, struct sg_table *sgt, |
419 | void **va, size_t size) | 419 | void **va, size_t size) |
420 | { | 420 | { |
421 | FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); | 421 | FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); |
@@ -423,7 +423,7 @@ int map_gmmu_phys_pages(void *handle, struct sg_table *sgt, | |||
423 | return 0; | 423 | return 0; |
424 | } | 424 | } |
425 | 425 | ||
426 | void unmap_gmmu_phys_pages(void *handle, struct sg_table *sgt, void *va) | 426 | static void unmap_gmmu_phys_pages(void *handle, struct sg_table *sgt, void *va) |
427 | { | 427 | { |
428 | FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); | 428 | FLUSH_CPU_DCACHE(handle, sg_phys(sgt->sgl), sgt->sgl->length); |
429 | } | 429 | } |
@@ -913,7 +913,7 @@ static struct mapped_buffer_node *find_mapped_buffer_reverse_locked( | |||
913 | return mapped_buffer; | 913 | return mapped_buffer; |
914 | node = rb_next(&mapped_buffer->node); | 914 | node = rb_next(&mapped_buffer->node); |
915 | } | 915 | } |
916 | return 0; | 916 | return NULL; |
917 | } | 917 | } |
918 | 918 | ||
919 | static struct mapped_buffer_node *find_mapped_buffer_locked( | 919 | static struct mapped_buffer_node *find_mapped_buffer_locked( |
@@ -931,7 +931,7 @@ static struct mapped_buffer_node *find_mapped_buffer_locked( | |||
931 | else | 931 | else |
932 | return mapped_buffer; | 932 | return mapped_buffer; |
933 | } | 933 | } |
934 | return 0; | 934 | return NULL; |
935 | } | 935 | } |
936 | 936 | ||
937 | static struct mapped_buffer_node *find_mapped_buffer_range_locked( | 937 | static struct mapped_buffer_node *find_mapped_buffer_range_locked( |
@@ -948,7 +948,7 @@ static struct mapped_buffer_node *find_mapped_buffer_range_locked( | |||
948 | else | 948 | else |
949 | node = node->rb_right; | 949 | node = node->rb_right; |
950 | } | 950 | } |
951 | return 0; | 951 | return NULL; |
952 | } | 952 | } |
953 | 953 | ||
954 | #define BFR_ATTRS (sizeof(nvmap_bfr_param)/sizeof(nvmap_bfr_param[0])) | 954 | #define BFR_ATTRS (sizeof(nvmap_bfr_param)/sizeof(nvmap_bfr_param[0])) |
@@ -1177,7 +1177,7 @@ void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm, | |||
1177 | /* unmap here needs to know the page size we assigned at mapping */ | 1177 | /* unmap here needs to know the page size we assigned at mapping */ |
1178 | err = update_gmmu_ptes_locked(vm, | 1178 | err = update_gmmu_ptes_locked(vm, |
1179 | pgsz_idx, | 1179 | pgsz_idx, |
1180 | 0, /* n/a for unmap */ | 1180 | NULL, /* n/a for unmap */ |
1181 | 0, | 1181 | 0, |
1182 | vaddr, | 1182 | vaddr, |
1183 | vaddr + size - 1, | 1183 | vaddr + size - 1, |
@@ -1209,7 +1209,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm, | |||
1209 | bool user_mapped, | 1209 | bool user_mapped, |
1210 | int rw_flag) | 1210 | int rw_flag) |
1211 | { | 1211 | { |
1212 | struct mapped_buffer_node *mapped_buffer = 0; | 1212 | struct mapped_buffer_node *mapped_buffer = NULL; |
1213 | 1213 | ||
1214 | mapped_buffer = | 1214 | mapped_buffer = |
1215 | find_mapped_buffer_reverse_locked(&vm->mapped_buffers, | 1215 | find_mapped_buffer_reverse_locked(&vm->mapped_buffers, |
@@ -1278,7 +1278,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm, | |||
1278 | struct gk20a *g = gk20a_from_vm(vm); | 1278 | struct gk20a *g = gk20a_from_vm(vm); |
1279 | struct gk20a_allocator *ctag_allocator = &g->gr.comp_tags; | 1279 | struct gk20a_allocator *ctag_allocator = &g->gr.comp_tags; |
1280 | struct device *d = dev_from_vm(vm); | 1280 | struct device *d = dev_from_vm(vm); |
1281 | struct mapped_buffer_node *mapped_buffer = 0; | 1281 | struct mapped_buffer_node *mapped_buffer = NULL; |
1282 | bool inserted = false, va_allocated = false; | 1282 | bool inserted = false, va_allocated = false; |
1283 | u32 gmmu_page_size = 0; | 1283 | u32 gmmu_page_size = 0; |
1284 | u64 map_offset = 0; | 1284 | u64 map_offset = 0; |
@@ -1991,7 +1991,7 @@ static int gk20a_vm_put_sparse(struct vm_gk20a *vm, u64 vaddr, | |||
1991 | return gk20a_vm_put_empty(vm, vaddr, num_pages, pgsz_idx); | 1991 | return gk20a_vm_put_empty(vm, vaddr, num_pages, pgsz_idx); |
1992 | } | 1992 | } |
1993 | 1993 | ||
1994 | void gk20a_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr, | 1994 | static void gk20a_vm_clear_sparse(struct vm_gk20a *vm, u64 vaddr, |
1995 | u64 size, u32 pgsz_idx) { | 1995 | u64 size, u32 pgsz_idx) { |
1996 | struct gk20a *g = vm->mm->g; | 1996 | struct gk20a *g = vm->mm->g; |
1997 | 1997 | ||
@@ -2536,7 +2536,7 @@ int gk20a_vm_bind_channel(struct gk20a_as_share *as_share, | |||
2536 | ch->vm = vm; | 2536 | ch->vm = vm; |
2537 | err = channel_gk20a_commit_va(ch); | 2537 | err = channel_gk20a_commit_va(ch); |
2538 | if (err) | 2538 | if (err) |
2539 | ch->vm = 0; | 2539 | ch->vm = NULL; |
2540 | 2540 | ||
2541 | return err; | 2541 | return err; |
2542 | } | 2542 | } |