aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/device.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c316
1 files changed, 186 insertions, 130 deletions
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 9bbf491d5d9e..40a13cc633a3 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -44,34 +44,38 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44MODULE_LICENSE("Dual BSD/GPL"); 44MODULE_LICENSE("Dual BSD/GPL");
45MODULE_VERSION(DRV_VERSION); 45MODULE_VERSION(DRV_VERSION);
46 46
47static LIST_HEAD(dev_list); 47static LIST_HEAD(uld_ctx_list);
48static DEFINE_MUTEX(dev_mutex); 48static DEFINE_MUTEX(dev_mutex);
49 49
50static struct dentry *c4iw_debugfs_root; 50static struct dentry *c4iw_debugfs_root;
51 51
52struct debugfs_qp_data { 52struct c4iw_debugfs_data {
53 struct c4iw_dev *devp; 53 struct c4iw_dev *devp;
54 char *buf; 54 char *buf;
55 int bufsize; 55 int bufsize;
56 int pos; 56 int pos;
57}; 57};
58 58
59static int count_qps(int id, void *p, void *data) 59static int count_idrs(int id, void *p, void *data)
60{ 60{
61 struct c4iw_qp *qp = p;
62 int *countp = data; 61 int *countp = data;
63 62
64 if (id != qp->wq.sq.qid)
65 return 0;
66
67 *countp = *countp + 1; 63 *countp = *countp + 1;
68 return 0; 64 return 0;
69} 65}
70 66
71static int dump_qps(int id, void *p, void *data) 67static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
68 loff_t *ppos)
69{
70 struct c4iw_debugfs_data *d = file->private_data;
71
72 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
73}
74
75static int dump_qp(int id, void *p, void *data)
72{ 76{
73 struct c4iw_qp *qp = p; 77 struct c4iw_qp *qp = p;
74 struct debugfs_qp_data *qpd = data; 78 struct c4iw_debugfs_data *qpd = data;
75 int space; 79 int space;
76 int cc; 80 int cc;
77 81
@@ -83,17 +87,22 @@ static int dump_qps(int id, void *p, void *data)
83 return 1; 87 return 1;
84 88
85 if (qp->ep) 89 if (qp->ep)
86 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u " 90 cc = snprintf(qpd->buf + qpd->pos, space,
91 "qp sq id %u rq id %u state %u onchip %u "
87 "ep tid %u state %u %pI4:%u->%pI4:%u\n", 92 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
88 qp->wq.sq.qid, (int)qp->attr.state, 93 qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
94 qp->wq.sq.flags & T4_SQ_ONCHIP,
89 qp->ep->hwtid, (int)qp->ep->com.state, 95 qp->ep->hwtid, (int)qp->ep->com.state,
90 &qp->ep->com.local_addr.sin_addr.s_addr, 96 &qp->ep->com.local_addr.sin_addr.s_addr,
91 ntohs(qp->ep->com.local_addr.sin_port), 97 ntohs(qp->ep->com.local_addr.sin_port),
92 &qp->ep->com.remote_addr.sin_addr.s_addr, 98 &qp->ep->com.remote_addr.sin_addr.s_addr,
93 ntohs(qp->ep->com.remote_addr.sin_port)); 99 ntohs(qp->ep->com.remote_addr.sin_port));
94 else 100 else
95 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n", 101 cc = snprintf(qpd->buf + qpd->pos, space,
96 qp->wq.sq.qid, (int)qp->attr.state); 102 "qp sq id %u rq id %u state %u onchip %u\n",
103 qp->wq.sq.qid, qp->wq.rq.qid,
104 (int)qp->attr.state,
105 qp->wq.sq.flags & T4_SQ_ONCHIP);
97 if (cc < space) 106 if (cc < space)
98 qpd->pos += cc; 107 qpd->pos += cc;
99 return 0; 108 return 0;
@@ -101,7 +110,7 @@ static int dump_qps(int id, void *p, void *data)
101 110
102static int qp_release(struct inode *inode, struct file *file) 111static int qp_release(struct inode *inode, struct file *file)
103{ 112{
104 struct debugfs_qp_data *qpd = file->private_data; 113 struct c4iw_debugfs_data *qpd = file->private_data;
105 if (!qpd) { 114 if (!qpd) {
106 printk(KERN_INFO "%s null qpd?\n", __func__); 115 printk(KERN_INFO "%s null qpd?\n", __func__);
107 return 0; 116 return 0;
@@ -113,7 +122,7 @@ static int qp_release(struct inode *inode, struct file *file)
113 122
114static int qp_open(struct inode *inode, struct file *file) 123static int qp_open(struct inode *inode, struct file *file)
115{ 124{
116 struct debugfs_qp_data *qpd; 125 struct c4iw_debugfs_data *qpd;
117 int ret = 0; 126 int ret = 0;
118 int count = 1; 127 int count = 1;
119 128
@@ -126,7 +135,7 @@ static int qp_open(struct inode *inode, struct file *file)
126 qpd->pos = 0; 135 qpd->pos = 0;
127 136
128 spin_lock_irq(&qpd->devp->lock); 137 spin_lock_irq(&qpd->devp->lock);
129 idr_for_each(&qpd->devp->qpidr, count_qps, &count); 138 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
130 spin_unlock_irq(&qpd->devp->lock); 139 spin_unlock_irq(&qpd->devp->lock);
131 140
132 qpd->bufsize = count * 128; 141 qpd->bufsize = count * 128;
@@ -137,7 +146,7 @@ static int qp_open(struct inode *inode, struct file *file)
137 } 146 }
138 147
139 spin_lock_irq(&qpd->devp->lock); 148 spin_lock_irq(&qpd->devp->lock);
140 idr_for_each(&qpd->devp->qpidr, dump_qps, qpd); 149 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
141 spin_unlock_irq(&qpd->devp->lock); 150 spin_unlock_irq(&qpd->devp->lock);
142 151
143 qpd->buf[qpd->pos++] = 0; 152 qpd->buf[qpd->pos++] = 0;
@@ -149,43 +158,86 @@ out:
149 return ret; 158 return ret;
150} 159}
151 160
152static ssize_t qp_read(struct file *file, char __user *buf, size_t count, 161static const struct file_operations qp_debugfs_fops = {
153 loff_t *ppos) 162 .owner = THIS_MODULE,
163 .open = qp_open,
164 .release = qp_release,
165 .read = debugfs_read,
166 .llseek = default_llseek,
167};
168
169static int dump_stag(int id, void *p, void *data)
154{ 170{
155 struct debugfs_qp_data *qpd = file->private_data; 171 struct c4iw_debugfs_data *stagd = data;
156 loff_t pos = *ppos; 172 int space;
157 loff_t avail = qpd->pos; 173 int cc;
174
175 space = stagd->bufsize - stagd->pos - 1;
176 if (space == 0)
177 return 1;
178
179 cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
180 if (cc < space)
181 stagd->pos += cc;
182 return 0;
183}
158 184
159 if (pos < 0) 185static int stag_release(struct inode *inode, struct file *file)
160 return -EINVAL; 186{
161 if (pos >= avail) 187 struct c4iw_debugfs_data *stagd = file->private_data;
188 if (!stagd) {
189 printk(KERN_INFO "%s null stagd?\n", __func__);
162 return 0; 190 return 0;
163 if (count > avail - pos) 191 }
164 count = avail - pos; 192 kfree(stagd->buf);
193 kfree(stagd);
194 return 0;
195}
165 196
166 while (count) { 197static int stag_open(struct inode *inode, struct file *file)
167 size_t len = 0; 198{
199 struct c4iw_debugfs_data *stagd;
200 int ret = 0;
201 int count = 1;
168 202
169 len = min((int)count, (int)qpd->pos - (int)pos); 203 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
170 if (copy_to_user(buf, qpd->buf + pos, len)) 204 if (!stagd) {
171 return -EFAULT; 205 ret = -ENOMEM;
172 if (len == 0) 206 goto out;
173 return -EINVAL; 207 }
208 stagd->devp = inode->i_private;
209 stagd->pos = 0;
210
211 spin_lock_irq(&stagd->devp->lock);
212 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
213 spin_unlock_irq(&stagd->devp->lock);
174 214
175 buf += len; 215 stagd->bufsize = count * sizeof("0x12345678\n");
176 pos += len; 216 stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
177 count -= len; 217 if (!stagd->buf) {
218 ret = -ENOMEM;
219 goto err1;
178 } 220 }
179 count = pos - *ppos; 221
180 *ppos = pos; 222 spin_lock_irq(&stagd->devp->lock);
181 return count; 223 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
224 spin_unlock_irq(&stagd->devp->lock);
225
226 stagd->buf[stagd->pos++] = 0;
227 file->private_data = stagd;
228 goto out;
229err1:
230 kfree(stagd);
231out:
232 return ret;
182} 233}
183 234
184static const struct file_operations qp_debugfs_fops = { 235static const struct file_operations stag_debugfs_fops = {
185 .owner = THIS_MODULE, 236 .owner = THIS_MODULE,
186 .open = qp_open, 237 .open = stag_open,
187 .release = qp_release, 238 .release = stag_release,
188 .read = qp_read, 239 .read = debugfs_read,
240 .llseek = default_llseek,
189}; 241};
190 242
191static int setup_debugfs(struct c4iw_dev *devp) 243static int setup_debugfs(struct c4iw_dev *devp)
@@ -199,6 +251,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
199 (void *)devp, &qp_debugfs_fops); 251 (void *)devp, &qp_debugfs_fops);
200 if (de && de->d_inode) 252 if (de && de->d_inode)
201 de->d_inode->i_size = 4096; 253 de->d_inode->i_size = 4096;
254
255 de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
256 (void *)devp, &stag_debugfs_fops);
257 if (de && de->d_inode)
258 de->d_inode->i_size = 4096;
202 return 0; 259 return 0;
203} 260}
204 261
@@ -290,7 +347,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
290 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); 347 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
291 goto err3; 348 goto err3;
292 } 349 }
350 err = c4iw_ocqp_pool_create(rdev);
351 if (err) {
352 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
353 goto err4;
354 }
293 return 0; 355 return 0;
356err4:
357 c4iw_rqtpool_destroy(rdev);
294err3: 358err3:
295 c4iw_pblpool_destroy(rdev); 359 c4iw_pblpool_destroy(rdev);
296err2: 360err2:
@@ -306,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
306 c4iw_destroy_resource(&rdev->resource); 370 c4iw_destroy_resource(&rdev->resource);
307} 371}
308 372
309static void c4iw_remove(struct c4iw_dev *dev) 373struct uld_ctx {
374 struct list_head entry;
375 struct cxgb4_lld_info lldi;
376 struct c4iw_dev *dev;
377};
378
379static void c4iw_remove(struct uld_ctx *ctx)
310{ 380{
311 PDBG("%s c4iw_dev %p\n", __func__, dev); 381 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
312 cancel_delayed_work_sync(&dev->db_drop_task); 382 c4iw_unregister_device(ctx->dev);
313 list_del(&dev->entry); 383 c4iw_rdev_close(&ctx->dev->rdev);
314 if (dev->registered) 384 idr_destroy(&ctx->dev->cqidr);
315 c4iw_unregister_device(dev); 385 idr_destroy(&ctx->dev->qpidr);
316 c4iw_rdev_close(&dev->rdev); 386 idr_destroy(&ctx->dev->mmidr);
317 idr_destroy(&dev->cqidr); 387 iounmap(ctx->dev->rdev.oc_mw_kva);
318 idr_destroy(&dev->qpidr); 388 ib_dealloc_device(&ctx->dev->ibdev);
319 idr_destroy(&dev->mmidr); 389 ctx->dev = NULL;
320 ib_dealloc_device(&dev->ibdev);
321} 390}
322 391
323static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 392static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -328,26 +397,33 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
328 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 397 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
329 if (!devp) { 398 if (!devp) {
330 printk(KERN_ERR MOD "Cannot allocate ib device\n"); 399 printk(KERN_ERR MOD "Cannot allocate ib device\n");
331 return NULL; 400 return ERR_PTR(-ENOMEM);
332 } 401 }
333 devp->rdev.lldi = *infop; 402 devp->rdev.lldi = *infop;
334 403
335 mutex_lock(&dev_mutex); 404 devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
405 (pci_resource_len(devp->rdev.lldi.pdev, 2) -
406 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
407 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
408 devp->rdev.lldi.vr->ocq.size);
409
410 PDBG(KERN_INFO MOD "ocq memory: "
411 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
412 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
413 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
336 414
337 ret = c4iw_rdev_open(&devp->rdev); 415 ret = c4iw_rdev_open(&devp->rdev);
338 if (ret) { 416 if (ret) {
339 mutex_unlock(&dev_mutex); 417 mutex_unlock(&dev_mutex);
340 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); 418 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
341 ib_dealloc_device(&devp->ibdev); 419 ib_dealloc_device(&devp->ibdev);
342 return NULL; 420 return ERR_PTR(ret);
343 } 421 }
344 422
345 idr_init(&devp->cqidr); 423 idr_init(&devp->cqidr);
346 idr_init(&devp->qpidr); 424 idr_init(&devp->qpidr);
347 idr_init(&devp->mmidr); 425 idr_init(&devp->mmidr);
348 spin_lock_init(&devp->lock); 426 spin_lock_init(&devp->lock);
349 list_add_tail(&devp->entry, &dev_list);
350 mutex_unlock(&dev_mutex);
351 427
352 if (c4iw_debugfs_root) { 428 if (c4iw_debugfs_root) {
353 devp->debugfs_root = debugfs_create_dir( 429 devp->debugfs_root = debugfs_create_dir(
@@ -360,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
360 436
361static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) 437static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
362{ 438{
363 struct c4iw_dev *dev; 439 struct uld_ctx *ctx;
364 static int vers_printed; 440 static int vers_printed;
365 int i; 441 int i;
366 442
@@ -368,65 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
368 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", 444 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
369 DRV_VERSION); 445 DRV_VERSION);
370 446
371 dev = c4iw_alloc(infop); 447 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
372 if (!dev) 448 if (!ctx) {
449 ctx = ERR_PTR(-ENOMEM);
373 goto out; 450 goto out;
451 }
452 ctx->lldi = *infop;
374 453
375 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", 454 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
376 __func__, pci_name(dev->rdev.lldi.pdev), 455 __func__, pci_name(ctx->lldi.pdev),
377 dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, 456 ctx->lldi.nchan, ctx->lldi.nrxq,
378 dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); 457 ctx->lldi.ntxq, ctx->lldi.nports);
379 458
380 for (i = 0; i < dev->rdev.lldi.nrxq; i++) 459 mutex_lock(&dev_mutex);
381 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); 460 list_add_tail(&ctx->entry, &uld_ctx_list);
382out: 461 mutex_unlock(&dev_mutex);
383 return dev;
384}
385
386static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl,
387 unsigned int skb_len,
388 unsigned int pull_len)
389{
390 struct sk_buff *skb;
391 struct skb_shared_info *ssi;
392 462
393 if (gl->tot_len <= 512) { 463 for (i = 0; i < ctx->lldi.nrxq; i++)
394 skb = alloc_skb(gl->tot_len, GFP_ATOMIC); 464 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
395 if (unlikely(!skb))
396 goto out;
397 __skb_put(skb, gl->tot_len);
398 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
399 } else {
400 skb = alloc_skb(skb_len, GFP_ATOMIC);
401 if (unlikely(!skb))
402 goto out;
403 __skb_put(skb, pull_len);
404 skb_copy_to_linear_data(skb, gl->va, pull_len);
405
406 ssi = skb_shinfo(skb);
407 ssi->frags[0].page = gl->frags[0].page;
408 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
409 ssi->frags[0].size = gl->frags[0].size - pull_len;
410 if (gl->nfrags > 1)
411 memcpy(&ssi->frags[1], &gl->frags[1],
412 (gl->nfrags - 1) * sizeof(skb_frag_t));
413 ssi->nr_frags = gl->nfrags;
414
415 skb->len = gl->tot_len;
416 skb->data_len = skb->len - pull_len;
417 skb->truesize += skb->data_len;
418
419 /* Get a reference for the last page, we don't own it */
420 get_page(gl->frags[gl->nfrags - 1].page);
421 }
422out: 465out:
423 return skb; 466 return ctx;
424} 467}
425 468
426static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 469static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
427 const struct pkt_gl *gl) 470 const struct pkt_gl *gl)
428{ 471{
429 struct c4iw_dev *dev = handle; 472 struct uld_ctx *ctx = handle;
473 struct c4iw_dev *dev = ctx->dev;
430 struct sk_buff *skb; 474 struct sk_buff *skb;
431 const struct cpl_act_establish *rpl; 475 const struct cpl_act_establish *rpl;
432 unsigned int opcode; 476 unsigned int opcode;
@@ -447,7 +491,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
447 c4iw_ev_handler(dev, qid); 491 c4iw_ev_handler(dev, qid);
448 return 0; 492 return 0;
449 } else { 493 } else {
450 skb = t4_pktgl_to_skb(gl, 128, 128); 494 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
451 if (unlikely(!skb)) 495 if (unlikely(!skb))
452 goto nomem; 496 goto nomem;
453 } 497 }
@@ -468,39 +512,49 @@ nomem:
468 512
469static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 513static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
470{ 514{
471 struct c4iw_dev *dev = handle; 515 struct uld_ctx *ctx = handle;
472 516
473 PDBG("%s new_state %u\n", __func__, new_state); 517 PDBG("%s new_state %u\n", __func__, new_state);
474 switch (new_state) { 518 switch (new_state) {
475 case CXGB4_STATE_UP: 519 case CXGB4_STATE_UP:
476 printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); 520 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
477 if (!dev->registered) { 521 if (!ctx->dev) {
478 int ret; 522 int ret = 0;
479 ret = c4iw_register_device(dev); 523
480 if (ret) 524 ctx->dev = c4iw_alloc(&ctx->lldi);
525 if (!IS_ERR(ctx->dev))
526 ret = c4iw_register_device(ctx->dev);
527 if (IS_ERR(ctx->dev) || ret)
481 printk(KERN_ERR MOD 528 printk(KERN_ERR MOD
482 "%s: RDMA registration failed: %d\n", 529 "%s: RDMA registration failed: %d\n",
483 pci_name(dev->rdev.lldi.pdev), ret); 530 pci_name(ctx->lldi.pdev), ret);
484 } 531 }
485 break; 532 break;
486 case CXGB4_STATE_DOWN: 533 case CXGB4_STATE_DOWN:
487 printk(KERN_INFO MOD "%s: Down\n", 534 printk(KERN_INFO MOD "%s: Down\n",
488 pci_name(dev->rdev.lldi.pdev)); 535 pci_name(ctx->lldi.pdev));
489 if (dev->registered) 536 if (ctx->dev)
490 c4iw_unregister_device(dev); 537 c4iw_remove(ctx);
491 break; 538 break;
492 case CXGB4_STATE_START_RECOVERY: 539 case CXGB4_STATE_START_RECOVERY:
493 printk(KERN_INFO MOD "%s: Fatal Error\n", 540 printk(KERN_INFO MOD "%s: Fatal Error\n",
494 pci_name(dev->rdev.lldi.pdev)); 541 pci_name(ctx->lldi.pdev));
495 if (dev->registered) 542 if (ctx->dev) {
496 c4iw_unregister_device(dev); 543 struct ib_event event;
544
545 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
546 memset(&event, 0, sizeof event);
547 event.event = IB_EVENT_DEVICE_FATAL;
548 event.device = &ctx->dev->ibdev;
549 ib_dispatch_event(&event);
550 c4iw_remove(ctx);
551 }
497 break; 552 break;
498 case CXGB4_STATE_DETACH: 553 case CXGB4_STATE_DETACH:
499 printk(KERN_INFO MOD "%s: Detach\n", 554 printk(KERN_INFO MOD "%s: Detach\n",
500 pci_name(dev->rdev.lldi.pdev)); 555 pci_name(ctx->lldi.pdev));
501 mutex_lock(&dev_mutex); 556 if (ctx->dev)
502 c4iw_remove(dev); 557 c4iw_remove(ctx);
503 mutex_unlock(&dev_mutex);
504 break; 558 break;
505 } 559 }
506 return 0; 560 return 0;
@@ -533,11 +587,13 @@ static int __init c4iw_init_module(void)
533 587
534static void __exit c4iw_exit_module(void) 588static void __exit c4iw_exit_module(void)
535{ 589{
536 struct c4iw_dev *dev, *tmp; 590 struct uld_ctx *ctx, *tmp;
537 591
538 mutex_lock(&dev_mutex); 592 mutex_lock(&dev_mutex);
539 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 593 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
540 c4iw_remove(dev); 594 if (ctx->dev)
595 c4iw_remove(ctx);
596 kfree(ctx);
541 } 597 }
542 mutex_unlock(&dev_mutex); 598 mutex_unlock(&dev_mutex);
543 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 599 cxgb4_unregister_uld(CXGB4_ULD_RDMA);