diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/resource.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/resource.c | 180 |
1 files changed, 74 insertions, 106 deletions
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c index 407ff3924150..cdef4d7fb6d8 100644 --- a/drivers/infiniband/hw/cxgb4/resource.c +++ b/drivers/infiniband/hw/cxgb4/resource.c | |||
@@ -30,96 +30,25 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | /* Crude resource management */ | 32 | /* Crude resource management */ |
33 | #include <linux/kernel.h> | ||
34 | #include <linux/random.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/kfifo.h> | ||
37 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
38 | #include <linux/errno.h> | ||
39 | #include <linux/genalloc.h> | 34 | #include <linux/genalloc.h> |
40 | #include <linux/ratelimit.h> | 35 | #include <linux/ratelimit.h> |
41 | #include "iw_cxgb4.h" | 36 | #include "iw_cxgb4.h" |
42 | 37 | ||
43 | #define RANDOM_SIZE 16 | 38 | static int c4iw_init_qid_table(struct c4iw_rdev *rdev) |
44 | |||
45 | static int __c4iw_init_resource_fifo(struct kfifo *fifo, | ||
46 | spinlock_t *fifo_lock, | ||
47 | u32 nr, u32 skip_low, | ||
48 | u32 skip_high, | ||
49 | int random) | ||
50 | { | ||
51 | u32 i, j, entry = 0, idx; | ||
52 | u32 random_bytes; | ||
53 | u32 rarray[16]; | ||
54 | spin_lock_init(fifo_lock); | ||
55 | |||
56 | if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL)) | ||
57 | return -ENOMEM; | ||
58 | |||
59 | for (i = 0; i < skip_low + skip_high; i++) | ||
60 | kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); | ||
61 | if (random) { | ||
62 | j = 0; | ||
63 | random_bytes = random32(); | ||
64 | for (i = 0; i < RANDOM_SIZE; i++) | ||
65 | rarray[i] = i + skip_low; | ||
66 | for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) { | ||
67 | if (j >= RANDOM_SIZE) { | ||
68 | j = 0; | ||
69 | random_bytes = random32(); | ||
70 | } | ||
71 | idx = (random_bytes >> (j * 2)) & 0xF; | ||
72 | kfifo_in(fifo, | ||
73 | (unsigned char *) &rarray[idx], | ||
74 | sizeof(u32)); | ||
75 | rarray[idx] = i; | ||
76 | j++; | ||
77 | } | ||
78 | for (i = 0; i < RANDOM_SIZE; i++) | ||
79 | kfifo_in(fifo, | ||
80 | (unsigned char *) &rarray[i], | ||
81 | sizeof(u32)); | ||
82 | } else | ||
83 | for (i = skip_low; i < nr - skip_high; i++) | ||
84 | kfifo_in(fifo, (unsigned char *) &i, sizeof(u32)); | ||
85 | |||
86 | for (i = 0; i < skip_low + skip_high; i++) | ||
87 | if (kfifo_out_locked(fifo, (unsigned char *) &entry, | ||
88 | sizeof(u32), fifo_lock)) | ||
89 | break; | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock, | ||
94 | u32 nr, u32 skip_low, u32 skip_high) | ||
95 | { | ||
96 | return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, | ||
97 | skip_high, 0); | ||
98 | } | ||
99 | |||
100 | static int c4iw_init_resource_fifo_random(struct kfifo *fifo, | ||
101 | spinlock_t *fifo_lock, | ||
102 | u32 nr, u32 skip_low, u32 skip_high) | ||
103 | { | ||
104 | return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low, | ||
105 | skip_high, 1); | ||
106 | } | ||
107 | |||
108 | static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev) | ||
109 | { | 39 | { |
110 | u32 i; | 40 | u32 i; |
111 | 41 | ||
112 | spin_lock_init(&rdev->resource.qid_fifo_lock); | 42 | if (c4iw_id_table_alloc(&rdev->resource.qid_table, |
113 | 43 | rdev->lldi.vr->qp.start, | |
114 | if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size * | 44 | rdev->lldi.vr->qp.size, |
115 | sizeof(u32), GFP_KERNEL)) | 45 | rdev->lldi.vr->qp.size, 0)) |
116 | return -ENOMEM; | 46 | return -ENOMEM; |
117 | 47 | ||
118 | for (i = rdev->lldi.vr->qp.start; | 48 | for (i = rdev->lldi.vr->qp.start; |
119 | i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) | 49 | i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) |
120 | if (!(i & rdev->qpmask)) | 50 | if (!(i & rdev->qpmask)) |
121 | kfifo_in(&rdev->resource.qid_fifo, | 51 | c4iw_id_free(&rdev->resource.qid_table, i); |
122 | (unsigned char *) &i, sizeof(u32)); | ||
123 | return 0; | 52 | return 0; |
124 | } | 53 | } |
125 | 54 | ||
@@ -127,44 +56,42 @@ static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev) | |||
127 | int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) | 56 | int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid) |
128 | { | 57 | { |
129 | int err = 0; | 58 | int err = 0; |
130 | err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo, | 59 | err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1, |
131 | &rdev->resource.tpt_fifo_lock, | 60 | C4IW_ID_TABLE_F_RANDOM); |
132 | nr_tpt, 1, 0); | ||
133 | if (err) | 61 | if (err) |
134 | goto tpt_err; | 62 | goto tpt_err; |
135 | err = c4iw_init_qid_fifo(rdev); | 63 | err = c4iw_init_qid_table(rdev); |
136 | if (err) | 64 | if (err) |
137 | goto qid_err; | 65 | goto qid_err; |
138 | err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo, | 66 | err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0, |
139 | &rdev->resource.pdid_fifo_lock, | 67 | nr_pdid, 1, 0); |
140 | nr_pdid, 1, 0); | ||
141 | if (err) | 68 | if (err) |
142 | goto pdid_err; | 69 | goto pdid_err; |
143 | return 0; | 70 | return 0; |
144 | pdid_err: | 71 | pdid_err: |
145 | kfifo_free(&rdev->resource.qid_fifo); | 72 | c4iw_id_table_free(&rdev->resource.qid_table); |
146 | qid_err: | 73 | qid_err: |
147 | kfifo_free(&rdev->resource.tpt_fifo); | 74 | c4iw_id_table_free(&rdev->resource.tpt_table); |
148 | tpt_err: | 75 | tpt_err: |
149 | return -ENOMEM; | 76 | return -ENOMEM; |
150 | } | 77 | } |
151 | 78 | ||
152 | /* | 79 | /* |
153 | * returns 0 if no resource available | 80 | * returns 0 if no resource available |
154 | */ | 81 | */ |
155 | u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock) | 82 | u32 c4iw_get_resource(struct c4iw_id_table *id_table) |
156 | { | 83 | { |
157 | u32 entry; | 84 | u32 entry; |
158 | if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)) | 85 | entry = c4iw_id_alloc(id_table); |
159 | return entry; | 86 | if (entry == (u32)(-1)) |
160 | else | ||
161 | return 0; | 87 | return 0; |
88 | return entry; | ||
162 | } | 89 | } |
163 | 90 | ||
164 | void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock) | 91 | void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) |
165 | { | 92 | { |
166 | PDBG("%s entry 0x%x\n", __func__, entry); | 93 | PDBG("%s entry 0x%x\n", __func__, entry); |
167 | kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock); | 94 | c4iw_id_free(id_table, entry); |
168 | } | 95 | } |
169 | 96 | ||
170 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | 97 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) |
@@ -181,10 +108,12 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | |||
181 | qid = entry->qid; | 108 | qid = entry->qid; |
182 | kfree(entry); | 109 | kfree(entry); |
183 | } else { | 110 | } else { |
184 | qid = c4iw_get_resource(&rdev->resource.qid_fifo, | 111 | qid = c4iw_get_resource(&rdev->resource.qid_table); |
185 | &rdev->resource.qid_fifo_lock); | ||
186 | if (!qid) | 112 | if (!qid) |
187 | goto out; | 113 | goto out; |
114 | mutex_lock(&rdev->stats.lock); | ||
115 | rdev->stats.qid.cur += rdev->qpmask + 1; | ||
116 | mutex_unlock(&rdev->stats.lock); | ||
188 | for (i = qid+1; i & rdev->qpmask; i++) { | 117 | for (i = qid+1; i & rdev->qpmask; i++) { |
189 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | 118 | entry = kmalloc(sizeof *entry, GFP_KERNEL); |
190 | if (!entry) | 119 | if (!entry) |
@@ -213,6 +142,10 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | |||
213 | out: | 142 | out: |
214 | mutex_unlock(&uctx->lock); | 143 | mutex_unlock(&uctx->lock); |
215 | PDBG("%s qid 0x%x\n", __func__, qid); | 144 | PDBG("%s qid 0x%x\n", __func__, qid); |
145 | mutex_lock(&rdev->stats.lock); | ||
146 | if (rdev->stats.qid.cur > rdev->stats.qid.max) | ||
147 | rdev->stats.qid.max = rdev->stats.qid.cur; | ||
148 | mutex_unlock(&rdev->stats.lock); | ||
216 | return qid; | 149 | return qid; |
217 | } | 150 | } |
218 | 151 | ||
@@ -245,10 +178,12 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | |||
245 | qid = entry->qid; | 178 | qid = entry->qid; |
246 | kfree(entry); | 179 | kfree(entry); |
247 | } else { | 180 | } else { |
248 | qid = c4iw_get_resource(&rdev->resource.qid_fifo, | 181 | qid = c4iw_get_resource(&rdev->resource.qid_table); |
249 | &rdev->resource.qid_fifo_lock); | ||
250 | if (!qid) | 182 | if (!qid) |
251 | goto out; | 183 | goto out; |
184 | mutex_lock(&rdev->stats.lock); | ||
185 | rdev->stats.qid.cur += rdev->qpmask + 1; | ||
186 | mutex_unlock(&rdev->stats.lock); | ||
252 | for (i = qid+1; i & rdev->qpmask; i++) { | 187 | for (i = qid+1; i & rdev->qpmask; i++) { |
253 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | 188 | entry = kmalloc(sizeof *entry, GFP_KERNEL); |
254 | if (!entry) | 189 | if (!entry) |
@@ -277,6 +212,10 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) | |||
277 | out: | 212 | out: |
278 | mutex_unlock(&uctx->lock); | 213 | mutex_unlock(&uctx->lock); |
279 | PDBG("%s qid 0x%x\n", __func__, qid); | 214 | PDBG("%s qid 0x%x\n", __func__, qid); |
215 | mutex_lock(&rdev->stats.lock); | ||
216 | if (rdev->stats.qid.cur > rdev->stats.qid.max) | ||
217 | rdev->stats.qid.max = rdev->stats.qid.cur; | ||
218 | mutex_unlock(&rdev->stats.lock); | ||
280 | return qid; | 219 | return qid; |
281 | } | 220 | } |
282 | 221 | ||
@@ -297,9 +236,9 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, | |||
297 | 236 | ||
298 | void c4iw_destroy_resource(struct c4iw_resource *rscp) | 237 | void c4iw_destroy_resource(struct c4iw_resource *rscp) |
299 | { | 238 | { |
300 | kfifo_free(&rscp->tpt_fifo); | 239 | c4iw_id_table_free(&rscp->tpt_table); |
301 | kfifo_free(&rscp->qid_fifo); | 240 | c4iw_id_table_free(&rscp->qid_table); |
302 | kfifo_free(&rscp->pdid_fifo); | 241 | c4iw_id_table_free(&rscp->pdid_table); |
303 | } | 242 | } |
304 | 243 | ||
305 | /* | 244 | /* |
@@ -312,15 +251,23 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) | |||
312 | { | 251 | { |
313 | unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); | 252 | unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); |
314 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); | 253 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); |
315 | if (!addr) | 254 | mutex_lock(&rdev->stats.lock); |
316 | printk_ratelimited(KERN_WARNING MOD "%s: Out of PBL memory\n", | 255 | if (addr) { |
317 | pci_name(rdev->lldi.pdev)); | 256 | rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); |
257 | if (rdev->stats.pbl.cur > rdev->stats.pbl.max) | ||
258 | rdev->stats.pbl.max = rdev->stats.pbl.cur; | ||
259 | } else | ||
260 | rdev->stats.pbl.fail++; | ||
261 | mutex_unlock(&rdev->stats.lock); | ||
318 | return (u32)addr; | 262 | return (u32)addr; |
319 | } | 263 | } |
320 | 264 | ||
321 | void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) | 265 | void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) |
322 | { | 266 | { |
323 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); | 267 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); |
268 | mutex_lock(&rdev->stats.lock); | ||
269 | rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); | ||
270 | mutex_unlock(&rdev->stats.lock); | ||
324 | gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); | 271 | gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); |
325 | } | 272 | } |
326 | 273 | ||
@@ -377,12 +324,23 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) | |||
377 | if (!addr) | 324 | if (!addr) |
378 | printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n", | 325 | printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n", |
379 | pci_name(rdev->lldi.pdev)); | 326 | pci_name(rdev->lldi.pdev)); |
327 | mutex_lock(&rdev->stats.lock); | ||
328 | if (addr) { | ||
329 | rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); | ||
330 | if (rdev->stats.rqt.cur > rdev->stats.rqt.max) | ||
331 | rdev->stats.rqt.max = rdev->stats.rqt.cur; | ||
332 | } else | ||
333 | rdev->stats.rqt.fail++; | ||
334 | mutex_unlock(&rdev->stats.lock); | ||
380 | return (u32)addr; | 335 | return (u32)addr; |
381 | } | 336 | } |
382 | 337 | ||
383 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) | 338 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) |
384 | { | 339 | { |
385 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); | 340 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6); |
341 | mutex_lock(&rdev->stats.lock); | ||
342 | rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); | ||
343 | mutex_unlock(&rdev->stats.lock); | ||
386 | gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); | 344 | gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); |
387 | } | 345 | } |
388 | 346 | ||
@@ -433,12 +391,22 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) | |||
433 | { | 391 | { |
434 | unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); | 392 | unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); |
435 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); | 393 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); |
394 | if (addr) { | ||
395 | mutex_lock(&rdev->stats.lock); | ||
396 | rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); | ||
397 | if (rdev->stats.ocqp.cur > rdev->stats.ocqp.max) | ||
398 | rdev->stats.ocqp.max = rdev->stats.ocqp.cur; | ||
399 | mutex_unlock(&rdev->stats.lock); | ||
400 | } | ||
436 | return (u32)addr; | 401 | return (u32)addr; |
437 | } | 402 | } |
438 | 403 | ||
439 | void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) | 404 | void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) |
440 | { | 405 | { |
441 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); | 406 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); |
407 | mutex_lock(&rdev->stats.lock); | ||
408 | rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); | ||
409 | mutex_unlock(&rdev->stats.lock); | ||
442 | gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); | 410 | gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); |
443 | } | 411 | } |
444 | 412 | ||