summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c
diff options
context:
space:
mode:
authorSachit Kadle <skadle@nvidia.com>2016-09-01 23:50:06 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-09-20 13:43:40 -0400
commit35b2507fe3ec6c28c27dd7fb289c003c7a0baf33 (patch)
treea691805580ded71038d7fcca07891c8ef210d420 /drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c
parent101689dd8b536afa3ee7e265dc4ea846fa053767 (diff)
gpu: nvgpu: implement lockless allocator
Implement a lockless allocator for fixed-size data structures. Bug 1795076 Change-Id: I70a5f52cbdb4452cc0fd9a8edf26735be29ede57 Signed-off-by: Sachit Kadle <skadle@nvidia.com> Reviewed-on: http://git-master/r/1213211 (cherry picked from commit e4bff7da0f39c8f4b5691169c02e482bc9d4166e) Reviewed-on: http://git-master/r/1223246 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c204
1 files changed, 204 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c b/drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c
new file mode 100644
index 00000000..32455c98
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_allocator_lockless.c
@@ -0,0 +1,204 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/atomic.h>
21
22#include "gk20a_allocator.h"
23#include "lockless_allocator_priv.h"
24
25static u64 gk20a_lockless_alloc_length(struct gk20a_allocator *a)
26{
27 struct gk20a_lockless_allocator *pa = a->priv;
28
29 return pa->length;
30}
31
32static u64 gk20a_lockless_alloc_base(struct gk20a_allocator *a)
33{
34 struct gk20a_lockless_allocator *pa = a->priv;
35
36 return pa->base;
37}
38
39static int gk20a_lockless_alloc_inited(struct gk20a_allocator *a)
40{
41 struct gk20a_lockless_allocator *pa = a->priv;
42 int inited = pa->inited;
43
44 rmb();
45 return inited;
46}
47
48static u64 gk20a_lockless_alloc_end(struct gk20a_allocator *a)
49{
50 struct gk20a_lockless_allocator *pa = a->priv;
51
52 return pa->base + pa->length;
53}
54
55static u64 gk20a_lockless_alloc(struct gk20a_allocator *a, u64 len)
56{
57 struct gk20a_lockless_allocator *pa = a->priv;
58 int head, new_head, ret;
59 u64 addr = 0;
60
61 if (len != pa->blk_size)
62 return 0;
63
64 head = ACCESS_ONCE(pa->head);
65 while (head >= 0) {
66 new_head = ACCESS_ONCE(pa->next[head]);
67 ret = cmpxchg(&pa->head, head, new_head);
68 if (ret == head) {
69 addr = pa->base + head * pa->blk_size;
70 atomic_inc(&pa->nr_allocs);
71 alloc_dbg(a, "Alloc node # %d @ addr 0x%llx\n", head,
72 addr);
73 break;
74 }
75 head = ACCESS_ONCE(pa->head);
76 }
77 return addr;
78}
79
80static void gk20a_lockless_free(struct gk20a_allocator *a, u64 addr)
81{
82 struct gk20a_lockless_allocator *pa = a->priv;
83 int head, ret;
84 u64 cur_idx, rem;
85
86 cur_idx = addr - pa->base;
87 rem = do_div(cur_idx, pa->blk_size);
88
89 while (1) {
90 head = ACCESS_ONCE(pa->head);
91 ACCESS_ONCE(pa->next[cur_idx]) = head;
92 ret = cmpxchg(&pa->head, head, cur_idx);
93 if (ret == head) {
94 atomic_dec(&pa->nr_allocs);
95 alloc_dbg(a, "Free node # %llu\n", cur_idx);
96 break;
97 }
98 }
99}
100
101static void gk20a_lockless_alloc_destroy(struct gk20a_allocator *a)
102{
103 struct gk20a_lockless_allocator *pa = a->priv;
104
105 vfree(pa->next);
106 kfree(pa);
107}
108
109static void gk20a_lockless_print_stats(struct gk20a_allocator *a,
110 struct seq_file *s, int lock)
111{
112 struct gk20a_lockless_allocator *pa = a->priv;
113
114 __alloc_pstat(s, a, "Lockless allocator params:\n");
115 __alloc_pstat(s, a, " start = 0x%llx\n", pa->base);
116 __alloc_pstat(s, a, " end = 0x%llx\n", pa->base + pa->length);
117
118 /* Actual stats. */
119 __alloc_pstat(s, a, "Stats:\n");
120 __alloc_pstat(s, a, " Number allocs = %d\n",
121 atomic_read(&pa->nr_allocs));
122 __alloc_pstat(s, a, " Number free = %d\n",
123 pa->nr_nodes - atomic_read(&pa->nr_allocs));
124}
125
126static const struct gk20a_allocator_ops pool_ops = {
127 .alloc = gk20a_lockless_alloc,
128 .free = gk20a_lockless_free,
129
130 .base = gk20a_lockless_alloc_base,
131 .length = gk20a_lockless_alloc_length,
132 .end = gk20a_lockless_alloc_end,
133 .inited = gk20a_lockless_alloc_inited,
134
135 .fini = gk20a_lockless_alloc_destroy,
136
137 .print_stats = gk20a_lockless_print_stats,
138};
139
140int gk20a_lockless_allocator_init(struct gk20a_allocator *__a,
141 const char *name, u64 base, u64 length,
142 u64 blk_size, u64 flags)
143{
144 int i;
145 int err;
146 int nr_nodes;
147 u64 count, rem;
148 struct gk20a_lockless_allocator *a;
149
150 if (!blk_size)
151 return -EINVAL;
152
153 /*
154 * Ensure we have space for atleast one node & there's no overflow.
155 * In order to control memory footprint, we require count < INT_MAX
156 */
157 count = length;
158 rem = do_div(count, blk_size);
159 if (!base || !count || count > INT_MAX)
160 return -EINVAL;
161
162 a = kzalloc(sizeof(struct gk20a_lockless_allocator), GFP_KERNEL);
163 if (!a)
164 return -ENOMEM;
165
166 err = __gk20a_alloc_common_init(__a, name, a, false, &pool_ops);
167 if (err)
168 goto fail;
169
170 a->next = vzalloc(sizeof(*a->next) * count);
171 if (!a->next) {
172 err = -ENOMEM;
173 goto fail;
174 }
175
176 /* chain the elements together to form the initial free list */
177 nr_nodes = (int)count;
178 for (i = 0; i < nr_nodes; i++)
179 a->next[i] = i + 1;
180 a->next[nr_nodes - 1] = -1;
181
182 a->base = base;
183 a->length = length;
184 a->blk_size = blk_size;
185 a->nr_nodes = nr_nodes;
186 a->flags = flags;
187 atomic_set(&a->nr_allocs, 0);
188
189 wmb();
190 a->inited = true;
191
192 gk20a_init_alloc_debug(__a);
193 alloc_dbg(__a, "New allocator: type lockless\n");
194 alloc_dbg(__a, " base 0x%llx\n", a->base);
195 alloc_dbg(__a, " nodes %d\n", a->nr_nodes);
196 alloc_dbg(__a, " blk_size 0x%llx\n", a->blk_size);
197 alloc_dbg(__a, " flags 0x%llx\n", a->flags);
198
199 return 0;
200
201fail:
202 kfree(a);
203 return err;
204}