diff options
author | Steve Wise <swise@opengridcomputing.com> | 2010-04-21 18:30:06 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-04-21 18:30:06 -0400 |
commit | cfdda9d764362ab77b11a410bb928400e6520d57 (patch) | |
tree | 3634e5aca12414d40f4e50a3d73543cc479b525f /drivers/infiniband/hw/cxgb4/resource.c | |
parent | 0eddb519b9127c73d53db4bf3ec1d45b13f844d1 (diff) |
RDMA/cxgb4: Add driver for Chelsio T4 RNIC
Add an RDMA/iWARP driver for Chelsio T4 Ethernet adapters.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/resource.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/resource.c | 417 |
1 files changed, 417 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c new file mode 100644 index 000000000000..fb195d1d9015 --- /dev/null +++ b/drivers/infiniband/hw/cxgb4/resource.c | |||
@@ -0,0 +1,417 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | /* Crude resource management */ | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/random.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/kfifo.h> | ||
37 | #include <linux/spinlock.h> | ||
38 | #include <linux/errno.h> | ||
39 | #include <linux/genalloc.h> | ||
40 | #include "iw_cxgb4.h" | ||
41 | |||
42 | #define RANDOM_SIZE 16 | ||
43 | |||
44 | static int __c4iw_init_resource_fifo(struct kfifo *fifo, | ||
45 | spinlock_t *fifo_lock, | ||
46 | u32 nr, u32 skip_low, | ||
47 | u32 skip_high, | ||
48 | int random) | ||
49 | { | ||
50 | u32 i, j, entry = 0, idx; | ||
51 | u32 random_bytes; | ||
52 | u32 rarray[16]; | ||
53 | spin_lock_init(fifo_lock); | ||
54 | |||
55 | if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL)) | ||
56 | return -ENOMEM; | ||
57 | |||
58 | for (i = 0; i < skip_low + skip_high; i++) | ||
59 | kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); | ||
60 | if (random) { | ||
61 | j = 0; | ||
62 | random_bytes = random32(); | ||
63 | for (i = 0; i < RANDOM_SIZE; i++) | ||
64 | rarray[i] = i + skip_low; | ||
65 | for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { | ||
66 | if (j >= RANDOM_SIZE) { | ||
67 | j = 0; | ||
68 | random_bytes = random32(); | ||
69 | } | ||
70 | idx = (random_bytes >> (j * 2)) & 0xF; | ||
71 | kfifo_in(fifo, | ||
72 | (unsigned char *) &rarray[idx], | ||
73 | sizeof(u32)); | ||
74 | rarray[idx] = i; | ||
75 | j++; | ||
76 | } | ||
77 | for (i = 0; i < RANDOM_SIZE; i++) | ||
78 | kfifo_in(fifo, | ||
79 | (unsigned char *) &rarray[i], | ||
80 | sizeof(u32)); | ||
81 | } else | ||
82 | for (i = skip_low; i < nr - skip_high; i++) | ||
83 | kfifo_in(fifo, (unsigned char *) &i, sizeof(u32)); | ||
84 | |||
85 | for (i = 0; i < skip_low + skip_high; i++) | ||
86 | if (kfifo_out_locked(fifo, (unsigned char *) &entry, | ||
87 | sizeof(u32), fifo_lock)) | ||
88 | break; | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock, | ||
93 | u32 nr, u32 skip_low, u32 skip_high) | ||
94 | { | ||
95 | return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, | ||
96 | skip_high, 0); | ||
97 | } | ||
98 | |||
99 | static int c4iw_init_resource_fifo_random(struct kfifo *fifo, | ||
100 | spinlock_t *fifo_lock, | ||
101 | u32 nr, u32 skip_low, u32 skip_high) | ||
102 | { | ||
103 | return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, | ||
104 | skip_high, 1); | ||
105 | } | ||
106 | |||
107 | static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev) | ||
108 | { | ||
109 | u32 i; | ||
110 | |||
111 | spin_lock_init(&rdev->resource.qid_fifo_lock); | ||
112 | |||
113 | if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32), | ||
114 | GFP_KERNEL)) | ||
115 | return -ENOMEM; | ||
116 | |||
117 | for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++) | ||
118 | if (!(i & rdev->qpmask)) | ||
119 | kfifo_in(&rdev->resource.qid_fifo, | ||
120 | (unsigned char *) &i, sizeof(u32)); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | /* nr_* must be power of 2 */ | ||
125 | int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) | ||
126 | { | ||
127 | int err = 0; | ||
128 | err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo, | ||
129 | &rdev->resource.tpt_fifo_lock, | ||
130 | nr_tpt, 1, 0); | ||
131 | if (err) | ||
132 | goto tpt_err; | ||
133 | err = c4iw_init_qid_fifo(rdev); | ||
134 | if (err) | ||
135 | goto qid_err; | ||
136 | err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo, | ||
137 | &rdev->resource.pdid_fifo_lock, | ||
138 | nr_pdid, 1, 0); | ||
139 | if (err) | ||
140 | goto pdid_err; | ||
141 | return 0; | ||
142 | pdid_err: | ||
143 | kfifo_free(&rdev->resource.qid_fifo); | ||
144 | qid_err: | ||
145 | kfifo_free(&rdev->resource.tpt_fifo); | ||
146 | tpt_err: | ||
147 | return -ENOMEM; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * returns 0 if no resource available | ||
152 | */ | ||
153 | u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock) | ||
154 | { | ||
155 | u32 entry; | ||
156 | if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)) | ||
157 | return entry; | ||
158 | else | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock) | ||
163 | { | ||
164 | PDBG("%s entry 0x%x\n", __func__, entry); | ||
165 | kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock); | ||
166 | } | ||
167 | |||
168 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | ||
169 | { | ||
170 | struct c4iw_qid_list *entry; | ||
171 | u32 qid; | ||
172 | int i; | ||
173 | |||
174 | mutex_lock(&uctx->lock); | ||
175 | if (!list_empty(&uctx->cqids)) { | ||
176 | entry = list_entry(uctx->cqids.next, struct c4iw_qid_list, | ||
177 | entry); | ||
178 | list_del(&entry->entry); | ||
179 | qid = entry->qid; | ||
180 | kfree(entry); | ||
181 | } else { | ||
182 | qid = c4iw_get_resource(&rdev->resource.qid_fifo, | ||
183 | &rdev->resource.qid_fifo_lock); | ||
184 | if (!qid) | ||
185 | goto out; | ||
186 | for (i = qid+1; i & rdev->qpmask; i++) { | ||
187 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
188 | if (!entry) | ||
189 | goto out; | ||
190 | entry->qid = i; | ||
191 | list_add_tail(&entry->entry, &uctx->cqids); | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * now put the same ids on the qp list since they all | ||
196 | * map to the same db/gts page. | ||
197 | */ | ||
198 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
199 | if (!entry) | ||
200 | goto out; | ||
201 | entry->qid = qid; | ||
202 | list_add_tail(&entry->entry, &uctx->qpids); | ||
203 | for (i = qid+1; i & rdev->qpmask; i++) { | ||
204 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
205 | if (!entry) | ||
206 | goto out; | ||
207 | entry->qid = i; | ||
208 | list_add_tail(&entry->entry, &uctx->qpids); | ||
209 | } | ||
210 | } | ||
211 | out: | ||
212 | mutex_unlock(&uctx->lock); | ||
213 | PDBG("%s qid 0x%x\n", __func__, qid); | ||
214 | return qid; | ||
215 | } | ||
216 | |||
217 | void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, | ||
218 | struct c4iw_dev_ucontext *uctx) | ||
219 | { | ||
220 | struct c4iw_qid_list *entry; | ||
221 | |||
222 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
223 | if (!entry) | ||
224 | return; | ||
225 | PDBG("%s qid 0x%x\n", __func__, qid); | ||
226 | entry->qid = qid; | ||
227 | mutex_lock(&uctx->lock); | ||
228 | list_add_tail(&entry->entry, &uctx->cqids); | ||
229 | mutex_unlock(&uctx->lock); | ||
230 | } | ||
231 | |||
232 | u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | ||
233 | { | ||
234 | struct c4iw_qid_list *entry; | ||
235 | u32 qid; | ||
236 | int i; | ||
237 | |||
238 | mutex_lock(&uctx->lock); | ||
239 | if (!list_empty(&uctx->qpids)) { | ||
240 | entry = list_entry(uctx->qpids.next, struct c4iw_qid_list, | ||
241 | entry); | ||
242 | list_del(&entry->entry); | ||
243 | qid = entry->qid; | ||
244 | kfree(entry); | ||
245 | } else { | ||
246 | qid = c4iw_get_resource(&rdev->resource.qid_fifo, | ||
247 | &rdev->resource.qid_fifo_lock); | ||
248 | if (!qid) | ||
249 | goto out; | ||
250 | for (i = qid+1; i & rdev->qpmask; i++) { | ||
251 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
252 | if (!entry) | ||
253 | goto out; | ||
254 | entry->qid = i; | ||
255 | list_add_tail(&entry->entry, &uctx->qpids); | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * now put the same ids on the cq list since they all | ||
260 | * map to the same db/gts page. | ||
261 | */ | ||
262 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
263 | if (!entry) | ||
264 | goto out; | ||
265 | entry->qid = qid; | ||
266 | list_add_tail(&entry->entry, &uctx->cqids); | ||
267 | for (i = qid; i & rdev->qpmask; i++) { | ||
268 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
269 | if (!entry) | ||
270 | goto out; | ||
271 | entry->qid = i; | ||
272 | list_add_tail(&entry->entry, &uctx->cqids); | ||
273 | } | ||
274 | } | ||
275 | out: | ||
276 | mutex_unlock(&uctx->lock); | ||
277 | PDBG("%s qid 0x%x\n", __func__, qid); | ||
278 | return qid; | ||
279 | } | ||
280 | |||
281 | void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, | ||
282 | struct c4iw_dev_ucontext *uctx) | ||
283 | { | ||
284 | struct c4iw_qid_list *entry; | ||
285 | |||
286 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
287 | if (!entry) | ||
288 | return; | ||
289 | PDBG("%s qid 0x%x\n", __func__, qid); | ||
290 | entry->qid = qid; | ||
291 | mutex_lock(&uctx->lock); | ||
292 | list_add_tail(&entry->entry, &uctx->qpids); | ||
293 | mutex_unlock(&uctx->lock); | ||
294 | } | ||
295 | |||
296 | void c4iw_destroy_resource(struct c4iw_resource *rscp) | ||
297 | { | ||
298 | kfifo_free(&rscp->tpt_fifo); | ||
299 | kfifo_free(&rscp->qid_fifo); | ||
300 | kfifo_free(&rscp->pdid_fifo); | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * PBL Memory Manager. Uses Linux generic allocator. | ||
305 | */ | ||
306 | |||
307 | #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */ | ||
308 | |||
309 | u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) | ||
310 | { | ||
311 | unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); | ||
312 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); | ||
313 | return (u32)addr; | ||
314 | } | ||
315 | |||
316 | void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) | ||
317 | { | ||
318 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); | ||
319 | gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); | ||
320 | } | ||
321 | |||
322 | int c4iw_pblpool_create(struct c4iw_rdev *rdev) | ||
323 | { | ||
324 | unsigned pbl_start, pbl_chunk, pbl_top; | ||
325 | |||
326 | rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1); | ||
327 | if (!rdev->pbl_pool) | ||
328 | return -ENOMEM; | ||
329 | |||
330 | pbl_start = rdev->lldi.vr->pbl.start; | ||
331 | pbl_chunk = rdev->lldi.vr->pbl.size; | ||
332 | pbl_top = pbl_start + pbl_chunk; | ||
333 | |||
334 | while (pbl_start < pbl_top) { | ||
335 | pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); | ||
336 | if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { | ||
337 | PDBG("%s failed to add PBL chunk (%x/%x)\n", | ||
338 | __func__, pbl_start, pbl_chunk); | ||
339 | if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { | ||
340 | printk(KERN_WARNING MOD | ||
341 | "Failed to add all PBL chunks (%x/%x)\n", | ||
342 | pbl_start, | ||
343 | pbl_top - pbl_start); | ||
344 | return 0; | ||
345 | } | ||
346 | pbl_chunk >>= 1; | ||
347 | } else { | ||
348 | PDBG("%s added PBL chunk (%x/%x)\n", | ||
349 | __func__, pbl_start, pbl_chunk); | ||
350 | pbl_start += pbl_chunk; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) | ||
358 | { | ||
359 | gen_pool_destroy(rdev->pbl_pool); | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * RQT Memory Manager. Uses Linux generic allocator. | ||
364 | */ | ||
365 | |||
366 | #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */ | ||
367 | |||
368 | u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) | ||
369 | { | ||
370 | unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); | ||
371 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); | ||
372 | return (u32)addr; | ||
373 | } | ||
374 | |||
375 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) | ||
376 | { | ||
377 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); | ||
378 | gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); | ||
379 | } | ||
380 | |||
381 | int c4iw_rqtpool_create(struct c4iw_rdev *rdev) | ||
382 | { | ||
383 | unsigned rqt_start, rqt_chunk, rqt_top; | ||
384 | |||
385 | rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1); | ||
386 | if (!rdev->rqt_pool) | ||
387 | return -ENOMEM; | ||
388 | |||
389 | rqt_start = rdev->lldi.vr->rq.start; | ||
390 | rqt_chunk = rdev->lldi.vr->rq.size; | ||
391 | rqt_top = rqt_start + rqt_chunk; | ||
392 | |||
393 | while (rqt_start < rqt_top) { | ||
394 | rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); | ||
395 | if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { | ||
396 | PDBG("%s failed to add RQT chunk (%x/%x)\n", | ||
397 | __func__, rqt_start, rqt_chunk); | ||
398 | if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { | ||
399 | printk(KERN_WARNING MOD | ||
400 | "Failed to add all RQT chunks (%x/%x)\n", | ||
401 | rqt_start, rqt_top - rqt_start); | ||
402 | return 0; | ||
403 | } | ||
404 | rqt_chunk >>= 1; | ||
405 | } else { | ||
406 | PDBG("%s added RQT chunk (%x/%x)\n", | ||
407 | __func__, rqt_start, rqt_chunk); | ||
408 | rqt_start += rqt_chunk; | ||
409 | } | ||
410 | } | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) | ||
415 | { | ||
416 | gen_pool_destroy(rdev->rqt_pool); | ||
417 | } | ||