diff options
author | Debarshi Dutta <ddutta@nvidia.com> | 2017-08-03 06:04:44 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-08-17 17:26:47 -0400 |
commit | 98186ec2c2127c2af65a34f9e697e04f518a79ab (patch) | |
tree | 08ad87f3bf8c739e96b36f01728a8f7a30749a0e /drivers/gpu/nvgpu/common | |
parent | 49dc335cfe588179cbb42d8bab53bc76ba88b28f (diff) |
gpu: nvgpu: Add wrapper over atomic_t and atomic64_t
- added wrapper structs nvgpu_atomic_t and nvgpu_atomic64_t over
atomic_t and atomic64_t
- added nvgpu_atomic_* and nvgpu_atomic64_* APIs to access the above
wrappers.
JIRA NVGPU-121
Change-Id: I61667bb0a84c2fc475365abb79bffb42b8b4786a
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1533044
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/debug_fifo.c | 8 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/dma.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/module.c | 10 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/lockless_allocator.c | 13 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/lockless_allocator_priv.h | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/semaphore.c | 4 |
6 files changed, 23 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/debug_fifo.c b/drivers/gpu/nvgpu/common/linux/debug_fifo.c index a240a138..1763eb7e 100644 --- a/drivers/gpu/nvgpu/common/linux/debug_fifo.c +++ b/drivers/gpu/nvgpu/common/linux/debug_fifo.c | |||
@@ -167,7 +167,7 @@ static int gk20a_fifo_profile_enable(void *data, u64 val) | |||
167 | } | 167 | } |
168 | kref_init(&f->profile.ref); | 168 | kref_init(&f->profile.ref); |
169 | } | 169 | } |
170 | atomic_set(&f->profile.get, 0); | 170 | atomic_set(&f->profile.get.atomic_var, 0); |
171 | f->profile.enabled = true; | 171 | f->profile.enabled = true; |
172 | } | 172 | } |
173 | } | 173 | } |
@@ -246,7 +246,7 @@ static int gk20a_fifo_profile_stats(struct seq_file *s, void *unused) | |||
246 | return 0; | 246 | return 0; |
247 | } | 247 | } |
248 | 248 | ||
249 | get = atomic_read(&g->fifo.profile.get); | 249 | get = atomic_read(&g->fifo.profile.get.atomic_var); |
250 | 250 | ||
251 | __gk20a_fifo_create_stats(g, percentiles_ioctl, | 251 | __gk20a_fifo_create_stats(g, percentiles_ioctl, |
252 | PROFILE_IOCTL_EXIT, PROFILE_IOCTL_ENTRY); | 252 | PROFILE_IOCTL_EXIT, PROFILE_IOCTL_ENTRY); |
@@ -311,7 +311,7 @@ void gk20a_fifo_debugfs_init(struct gk20a *g) | |||
311 | 311 | ||
312 | nvgpu_mutex_init(&g->fifo.profile.lock); | 312 | nvgpu_mutex_init(&g->fifo.profile.lock); |
313 | g->fifo.profile.enabled = false; | 313 | g->fifo.profile.enabled = false; |
314 | atomic_set(&g->fifo.profile.get, 0); | 314 | atomic_set(&g->fifo.profile.get.atomic_var, 0); |
315 | atomic_set(&g->fifo.profile.ref.refcount, 0); | 315 | atomic_set(&g->fifo.profile.ref.refcount, 0); |
316 | 316 | ||
317 | debugfs_create_file("enable", 0600, profile_root, g, | 317 | debugfs_create_file("enable", 0600, profile_root, g, |
@@ -342,7 +342,7 @@ struct fifo_profile_gk20a *gk20a_fifo_profile_acquire(struct gk20a *g) | |||
342 | /* If kref is zero, profiling is not enabled */ | 342 | /* If kref is zero, profiling is not enabled */ |
343 | if (!kref_get_unless_zero(&f->profile.ref)) | 343 | if (!kref_get_unless_zero(&f->profile.ref)) |
344 | return NULL; | 344 | return NULL; |
345 | index = atomic_inc_return(&f->profile.get); | 345 | index = atomic_inc_return(&f->profile.get.atomic_var); |
346 | profile = &f->profile.data[index % FIFO_PROFILING_ENTRIES]; | 346 | profile = &f->profile.data[index % FIFO_PROFILING_ENTRIES]; |
347 | 347 | ||
348 | return profile; | 348 | return profile; |
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c index ea5b2837..2116053d 100644 --- a/drivers/gpu/nvgpu/common/linux/dma.c +++ b/drivers/gpu/nvgpu/common/linux/dma.c | |||
@@ -197,7 +197,7 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, | |||
197 | WARN_ON(flags != NVGPU_DMA_NO_KERNEL_MAPPING); | 197 | WARN_ON(flags != NVGPU_DMA_NO_KERNEL_MAPPING); |
198 | 198 | ||
199 | nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); | 199 | nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); |
200 | before_pending = atomic64_read(&g->mm.vidmem.bytes_pending); | 200 | before_pending = atomic64_read(&g->mm.vidmem.bytes_pending.atomic_var); |
201 | addr = __nvgpu_dma_alloc(vidmem_alloc, at, size); | 201 | addr = __nvgpu_dma_alloc(vidmem_alloc, at, size); |
202 | nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); | 202 | nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); |
203 | if (!addr) { | 203 | if (!addr) { |
@@ -394,7 +394,7 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) | |||
394 | was_empty = nvgpu_list_empty(&g->mm.vidmem.clear_list_head); | 394 | was_empty = nvgpu_list_empty(&g->mm.vidmem.clear_list_head); |
395 | nvgpu_list_add_tail(&mem->clear_list_entry, | 395 | nvgpu_list_add_tail(&mem->clear_list_entry, |
396 | &g->mm.vidmem.clear_list_head); | 396 | &g->mm.vidmem.clear_list_head); |
397 | atomic64_add(mem->size, &g->mm.vidmem.bytes_pending); | 397 | atomic64_add(mem->size, &g->mm.vidmem.bytes_pending.atomic_var); |
398 | nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); | 398 | nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); |
399 | 399 | ||
400 | if (was_empty) { | 400 | if (was_empty) { |
diff --git a/drivers/gpu/nvgpu/common/linux/module.c b/drivers/gpu/nvgpu/common/linux/module.c index bfbe7a58..f5c6ca1f 100644 --- a/drivers/gpu/nvgpu/common/linux/module.c +++ b/drivers/gpu/nvgpu/common/linux/module.c | |||
@@ -68,13 +68,13 @@ int gk20a_busy(struct gk20a *g) | |||
68 | if (!g) | 68 | if (!g) |
69 | return -ENODEV; | 69 | return -ENODEV; |
70 | 70 | ||
71 | atomic_inc(&g->usage_count); | 71 | atomic_inc(&g->usage_count.atomic_var); |
72 | 72 | ||
73 | down_read(&g->busy_lock); | 73 | down_read(&g->busy_lock); |
74 | 74 | ||
75 | if (!gk20a_can_busy(g)) { | 75 | if (!gk20a_can_busy(g)) { |
76 | ret = -ENODEV; | 76 | ret = -ENODEV; |
77 | atomic_dec(&g->usage_count); | 77 | atomic_dec(&g->usage_count.atomic_var); |
78 | goto fail; | 78 | goto fail; |
79 | } | 79 | } |
80 | 80 | ||
@@ -87,7 +87,7 @@ int gk20a_busy(struct gk20a *g) | |||
87 | /* Mark suspended so runtime pm will retry later */ | 87 | /* Mark suspended so runtime pm will retry later */ |
88 | pm_runtime_set_suspended(dev); | 88 | pm_runtime_set_suspended(dev); |
89 | pm_runtime_put_noidle(dev); | 89 | pm_runtime_put_noidle(dev); |
90 | atomic_dec(&g->usage_count); | 90 | atomic_dec(&g->usage_count.atomic_var); |
91 | goto fail; | 91 | goto fail; |
92 | } | 92 | } |
93 | } else { | 93 | } else { |
@@ -97,7 +97,7 @@ int gk20a_busy(struct gk20a *g) | |||
97 | vgpu_pm_finalize_poweron(dev) | 97 | vgpu_pm_finalize_poweron(dev) |
98 | : gk20a_pm_finalize_poweron(dev); | 98 | : gk20a_pm_finalize_poweron(dev); |
99 | if (ret) { | 99 | if (ret) { |
100 | atomic_dec(&g->usage_count); | 100 | atomic_dec(&g->usage_count.atomic_var); |
101 | nvgpu_mutex_release(&g->poweron_lock); | 101 | nvgpu_mutex_release(&g->poweron_lock); |
102 | goto fail; | 102 | goto fail; |
103 | } | 103 | } |
@@ -120,7 +120,7 @@ void gk20a_idle(struct gk20a *g) | |||
120 | { | 120 | { |
121 | struct device *dev; | 121 | struct device *dev; |
122 | 122 | ||
123 | atomic_dec(&g->usage_count); | 123 | atomic_dec(&g->usage_count.atomic_var); |
124 | 124 | ||
125 | dev = dev_from_gk20a(g); | 125 | dev = dev_from_gk20a(g); |
126 | 126 | ||
diff --git a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c index 2a569efd..eeb86095 100644 --- a/drivers/gpu/nvgpu/common/mm/lockless_allocator.c +++ b/drivers/gpu/nvgpu/common/mm/lockless_allocator.c | |||
@@ -65,7 +65,9 @@ static u64 nvgpu_lockless_alloc(struct nvgpu_allocator *a, u64 len) | |||
65 | ret = cmpxchg(&pa->head, head, new_head); | 65 | ret = cmpxchg(&pa->head, head, new_head); |
66 | if (ret == head) { | 66 | if (ret == head) { |
67 | addr = pa->base + head * pa->blk_size; | 67 | addr = pa->base + head * pa->blk_size; |
68 | atomic_inc(&pa->nr_allocs); | 68 | nvgpu_atomic_inc(&pa->nr_allocs); |
69 | alloc_dbg(a, "Alloc node # %d @ addr 0x%llx\n", head, | ||
70 | addr); | ||
69 | break; | 71 | break; |
70 | } | 72 | } |
71 | head = ACCESS_ONCE(pa->head); | 73 | head = ACCESS_ONCE(pa->head); |
@@ -94,7 +96,8 @@ static void nvgpu_lockless_free(struct nvgpu_allocator *a, u64 addr) | |||
94 | ACCESS_ONCE(pa->next[cur_idx]) = head; | 96 | ACCESS_ONCE(pa->next[cur_idx]) = head; |
95 | ret = cmpxchg(&pa->head, head, cur_idx); | 97 | ret = cmpxchg(&pa->head, head, cur_idx); |
96 | if (ret == head) { | 98 | if (ret == head) { |
97 | atomic_dec(&pa->nr_allocs); | 99 | nvgpu_atomic_dec(&pa->nr_allocs); |
100 | alloc_dbg(a, "Free node # %llu\n", cur_idx); | ||
98 | break; | 101 | break; |
99 | } | 102 | } |
100 | } | 103 | } |
@@ -125,9 +128,9 @@ static void nvgpu_lockless_print_stats(struct nvgpu_allocator *a, | |||
125 | /* Actual stats. */ | 128 | /* Actual stats. */ |
126 | __alloc_pstat(s, a, "Stats:\n"); | 129 | __alloc_pstat(s, a, "Stats:\n"); |
127 | __alloc_pstat(s, a, " Number allocs = %d\n", | 130 | __alloc_pstat(s, a, " Number allocs = %d\n", |
128 | atomic_read(&pa->nr_allocs)); | 131 | nvgpu_atomic_read(&pa->nr_allocs)); |
129 | __alloc_pstat(s, a, " Number free = %d\n", | 132 | __alloc_pstat(s, a, " Number free = %d\n", |
130 | pa->nr_nodes - atomic_read(&pa->nr_allocs)); | 133 | pa->nr_nodes - nvgpu_atomic_read(&pa->nr_allocs)); |
131 | } | 134 | } |
132 | #endif | 135 | #endif |
133 | 136 | ||
@@ -193,7 +196,7 @@ int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *__a, | |||
193 | a->blk_size = blk_size; | 196 | a->blk_size = blk_size; |
194 | a->nr_nodes = nr_nodes; | 197 | a->nr_nodes = nr_nodes; |
195 | a->flags = flags; | 198 | a->flags = flags; |
196 | atomic_set(&a->nr_allocs, 0); | 199 | nvgpu_atomic_set(&a->nr_allocs, 0); |
197 | 200 | ||
198 | wmb(); | 201 | wmb(); |
199 | a->inited = true; | 202 | a->inited = true; |
diff --git a/drivers/gpu/nvgpu/common/mm/lockless_allocator_priv.h b/drivers/gpu/nvgpu/common/mm/lockless_allocator_priv.h index 32421ac1..c527bff9 100644 --- a/drivers/gpu/nvgpu/common/mm/lockless_allocator_priv.h +++ b/drivers/gpu/nvgpu/common/mm/lockless_allocator_priv.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. | 2 | * Copyright (c) 2016 - 2017, NVIDIA CORPORATION. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms and conditions of the GNU General Public License, | 5 | * under the terms and conditions of the GNU General Public License, |
@@ -109,7 +109,7 @@ struct nvgpu_lockless_allocator { | |||
109 | bool inited; | 109 | bool inited; |
110 | 110 | ||
111 | /* Statistics */ | 111 | /* Statistics */ |
112 | atomic_t nr_allocs; | 112 | nvgpu_atomic_t nr_allocs; |
113 | }; | 113 | }; |
114 | 114 | ||
115 | static inline struct nvgpu_lockless_allocator *lockless_allocator( | 115 | static inline struct nvgpu_lockless_allocator *lockless_allocator( |
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c index 3e916b9d..ac45aaaa 100644 --- a/drivers/gpu/nvgpu/common/semaphore.c +++ b/drivers/gpu/nvgpu/common/semaphore.c | |||
@@ -364,7 +364,7 @@ static int __nvgpu_init_hw_sema(struct channel_gk20a *ch) | |||
364 | hw_sema->p = p; | 364 | hw_sema->p = p; |
365 | hw_sema->idx = hw_sema_idx; | 365 | hw_sema->idx = hw_sema_idx; |
366 | hw_sema->offset = SEMAPHORE_SIZE * hw_sema_idx; | 366 | hw_sema->offset = SEMAPHORE_SIZE * hw_sema_idx; |
367 | atomic_set(&hw_sema->next_value, 0); | 367 | nvgpu_atomic_set(&hw_sema->next_value, 0); |
368 | nvgpu_init_list_node(&hw_sema->hw_sema_list); | 368 | nvgpu_init_list_node(&hw_sema->hw_sema_list); |
369 | nvgpu_mem_wr(ch->g, &p->rw_mem, hw_sema->offset, 0); | 369 | nvgpu_mem_wr(ch->g, &p->rw_mem, hw_sema->offset, 0); |
370 | 370 | ||
@@ -425,7 +425,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch) | |||
425 | 425 | ||
426 | kref_init(&s->ref); | 426 | kref_init(&s->ref); |
427 | s->hw_sema = ch->hw_sema; | 427 | s->hw_sema = ch->hw_sema; |
428 | atomic_set(&s->value, 0); | 428 | nvgpu_atomic_set(&s->value, 0); |
429 | 429 | ||
430 | /* | 430 | /* |
431 | * Take a ref on the pool so that we can keep this pool alive for | 431 | * Take a ref on the pool so that we can keep this pool alive for |