diff options
Diffstat (limited to 'drivers/infiniband/hw/amso1100')
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_alloc.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_cq.c | 18 | ||||
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_provider.c | 39 | ||||
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_qp.c | 36 | ||||
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_rnic.c | 68 |
7 files changed, 93 insertions, 88 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index dc1ebeac35c7..27fe242ed435 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c | |||
@@ -1155,7 +1155,8 @@ static int __devinit c2_probe(struct pci_dev *pcidev, | |||
1155 | goto bail10; | 1155 | goto bail10; |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | c2_register_device(c2dev); | 1158 | if (c2_register_device(c2dev)) |
1159 | goto bail10; | ||
1159 | 1160 | ||
1160 | return 0; | 1161 | return 0; |
1161 | 1162 | ||
@@ -1243,7 +1244,7 @@ static struct pci_driver c2_pci_driver = { | |||
1243 | 1244 | ||
1244 | static int __init c2_init_module(void) | 1245 | static int __init c2_init_module(void) |
1245 | { | 1246 | { |
1246 | return pci_module_init(&c2_pci_driver); | 1247 | return pci_register_driver(&c2_pci_driver); |
1247 | } | 1248 | } |
1248 | 1249 | ||
1249 | static void __exit c2_exit_module(void) | 1250 | static void __exit c2_exit_module(void) |
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h index 1b17dcdd0505..04a9db5de881 100644 --- a/drivers/infiniband/hw/amso1100/c2.h +++ b/drivers/infiniband/hw/amso1100/c2.h | |||
@@ -302,7 +302,7 @@ struct c2_dev { | |||
302 | unsigned long pa; /* PA device memory */ | 302 | unsigned long pa; /* PA device memory */ |
303 | void **qptr_array; | 303 | void **qptr_array; |
304 | 304 | ||
305 | kmem_cache_t *host_msg_cache; | 305 | struct kmem_cache *host_msg_cache; |
306 | 306 | ||
307 | struct list_head cca_link; /* adapter list */ | 307 | struct list_head cca_link; /* adapter list */ |
308 | struct list_head eh_wakeup_list; /* event wakeup list */ | 308 | struct list_head eh_wakeup_list; /* event wakeup list */ |
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c index 028a60bbfca9..0315f99e4191 100644 --- a/drivers/infiniband/hw/amso1100/c2_alloc.c +++ b/drivers/infiniband/hw/amso1100/c2_alloc.c | |||
@@ -42,13 +42,14 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, | |||
42 | { | 42 | { |
43 | int i; | 43 | int i; |
44 | struct sp_chunk *new_head; | 44 | struct sp_chunk *new_head; |
45 | dma_addr_t dma_addr; | ||
45 | 46 | ||
46 | new_head = (struct sp_chunk *) __get_free_page(gfp_mask); | 47 | new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE, |
48 | &dma_addr, gfp_mask); | ||
47 | if (new_head == NULL) | 49 | if (new_head == NULL) |
48 | return -ENOMEM; | 50 | return -ENOMEM; |
49 | 51 | ||
50 | new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head, | 52 | new_head->dma_addr = dma_addr; |
51 | PAGE_SIZE, DMA_FROM_DEVICE); | ||
52 | pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); | 53 | pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); |
53 | 54 | ||
54 | new_head->next = NULL; | 55 | new_head->next = NULL; |
@@ -80,10 +81,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root) | |||
80 | 81 | ||
81 | while (root) { | 82 | while (root) { |
82 | next = root->next; | 83 | next = root->next; |
83 | dma_unmap_single(c2dev->ibdev.dma_device, | 84 | dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, |
84 | pci_unmap_addr(root, mapping), PAGE_SIZE, | 85 | pci_unmap_addr(root, mapping)); |
85 | DMA_FROM_DEVICE); | ||
86 | __free_page((struct page *) root); | ||
87 | root = next; | 86 | root = next; |
88 | } | 87 | } |
89 | } | 88 | } |
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c index 9d7bcc5ade93..05c9154d46f4 100644 --- a/drivers/infiniband/hw/amso1100/c2_cq.c +++ b/drivers/infiniband/hw/amso1100/c2_cq.c | |||
@@ -246,20 +246,17 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | |||
246 | 246 | ||
247 | static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) | 247 | static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) |
248 | { | 248 | { |
249 | 249 | dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size, | |
250 | dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping), | 250 | mq->msg_pool.host, pci_unmap_addr(mq, mapping)); |
251 | mq->q_size * mq->msg_size, DMA_FROM_DEVICE); | ||
252 | free_pages((unsigned long) mq->msg_pool.host, | ||
253 | get_order(mq->q_size * mq->msg_size)); | ||
254 | } | 251 | } |
255 | 252 | ||
256 | static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, | 253 | static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, |
257 | int msg_size) | 254 | int msg_size) |
258 | { | 255 | { |
259 | unsigned long pool_start; | 256 | u8 *pool_start; |
260 | 257 | ||
261 | pool_start = __get_free_pages(GFP_KERNEL, | 258 | pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size, |
262 | get_order(q_size * msg_size)); | 259 | &mq->host_dma, GFP_KERNEL); |
263 | if (!pool_start) | 260 | if (!pool_start) |
264 | return -ENOMEM; | 261 | return -ENOMEM; |
265 | 262 | ||
@@ -267,13 +264,10 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, | |||
267 | 0, /* index (currently unknown) */ | 264 | 0, /* index (currently unknown) */ |
268 | q_size, | 265 | q_size, |
269 | msg_size, | 266 | msg_size, |
270 | (u8 *) pool_start, | 267 | pool_start, |
271 | NULL, /* peer (currently unknown) */ | 268 | NULL, /* peer (currently unknown) */ |
272 | C2_MQ_HOST_TARGET); | 269 | C2_MQ_HOST_TARGET); |
273 | 270 | ||
274 | mq->host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
275 | (void *)pool_start, | ||
276 | q_size * msg_size, DMA_FROM_DEVICE); | ||
277 | pci_unmap_addr_set(mq, mapping, mq->host_dma); | 271 | pci_unmap_addr_set(mq, mapping, mq->host_dma); |
278 | 272 | ||
279 | return 0; | 273 | return 0; |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index da98d9f71429..fef972752912 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -757,20 +757,17 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) | |||
757 | 757 | ||
758 | int c2_register_device(struct c2_dev *dev) | 758 | int c2_register_device(struct c2_dev *dev) |
759 | { | 759 | { |
760 | int ret; | 760 | int ret = -ENOMEM; |
761 | int i; | 761 | int i; |
762 | 762 | ||
763 | /* Register pseudo network device */ | 763 | /* Register pseudo network device */ |
764 | dev->pseudo_netdev = c2_pseudo_netdev_init(dev); | 764 | dev->pseudo_netdev = c2_pseudo_netdev_init(dev); |
765 | if (dev->pseudo_netdev) { | 765 | if (!dev->pseudo_netdev) |
766 | ret = register_netdev(dev->pseudo_netdev); | 766 | goto out3; |
767 | if (ret) { | 767 | |
768 | printk(KERN_ERR PFX | 768 | ret = register_netdev(dev->pseudo_netdev); |
769 | "Unable to register netdev, ret = %d\n", ret); | 769 | if (ret) |
770 | free_netdev(dev->pseudo_netdev); | 770 | goto out2; |
771 | return ret; | ||
772 | } | ||
773 | } | ||
774 | 771 | ||
775 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | 772 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); |
776 | strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); | 773 | strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); |
@@ -848,21 +845,25 @@ int c2_register_device(struct c2_dev *dev) | |||
848 | 845 | ||
849 | ret = ib_register_device(&dev->ibdev); | 846 | ret = ib_register_device(&dev->ibdev); |
850 | if (ret) | 847 | if (ret) |
851 | return ret; | 848 | goto out1; |
852 | 849 | ||
853 | for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) { | 850 | for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) { |
854 | ret = class_device_create_file(&dev->ibdev.class_dev, | 851 | ret = class_device_create_file(&dev->ibdev.class_dev, |
855 | c2_class_attributes[i]); | 852 | c2_class_attributes[i]); |
856 | if (ret) { | 853 | if (ret) |
857 | unregister_netdev(dev->pseudo_netdev); | 854 | goto out0; |
858 | free_netdev(dev->pseudo_netdev); | ||
859 | ib_unregister_device(&dev->ibdev); | ||
860 | return ret; | ||
861 | } | ||
862 | } | 855 | } |
856 | goto out3; | ||
863 | 857 | ||
864 | pr_debug("%s:%u\n", __FUNCTION__, __LINE__); | 858 | out0: |
865 | return 0; | 859 | ib_unregister_device(&dev->ibdev); |
860 | out1: | ||
861 | unregister_netdev(dev->pseudo_netdev); | ||
862 | out2: | ||
863 | free_netdev(dev->pseudo_netdev); | ||
864 | out3: | ||
865 | pr_debug("%s:%u ret=%d\n", __FUNCTION__, __LINE__, ret); | ||
866 | return ret; | ||
866 | } | 867 | } |
867 | 868 | ||
868 | void c2_unregister_device(struct c2_dev *dev) | 869 | void c2_unregister_device(struct c2_dev *dev) |
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c index 5bcf697aa335..179d005ed4a5 100644 --- a/drivers/infiniband/hw/amso1100/c2_qp.c +++ b/drivers/infiniband/hw/amso1100/c2_qp.c | |||
@@ -564,6 +564,32 @@ int c2_alloc_qp(struct c2_dev *c2dev, | |||
564 | return err; | 564 | return err; |
565 | } | 565 | } |
566 | 566 | ||
567 | static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) | ||
568 | { | ||
569 | if (send_cq == recv_cq) | ||
570 | spin_lock_irq(&send_cq->lock); | ||
571 | else if (send_cq > recv_cq) { | ||
572 | spin_lock_irq(&send_cq->lock); | ||
573 | spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); | ||
574 | } else { | ||
575 | spin_lock_irq(&recv_cq->lock); | ||
576 | spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); | ||
577 | } | ||
578 | } | ||
579 | |||
580 | static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq) | ||
581 | { | ||
582 | if (send_cq == recv_cq) | ||
583 | spin_unlock_irq(&send_cq->lock); | ||
584 | else if (send_cq > recv_cq) { | ||
585 | spin_unlock(&recv_cq->lock); | ||
586 | spin_unlock_irq(&send_cq->lock); | ||
587 | } else { | ||
588 | spin_unlock(&send_cq->lock); | ||
589 | spin_unlock_irq(&recv_cq->lock); | ||
590 | } | ||
591 | } | ||
592 | |||
567 | void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) | 593 | void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) |
568 | { | 594 | { |
569 | struct c2_cq *send_cq; | 595 | struct c2_cq *send_cq; |
@@ -576,15 +602,9 @@ void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) | |||
576 | * Lock CQs here, so that CQ polling code can do QP lookup | 602 | * Lock CQs here, so that CQ polling code can do QP lookup |
577 | * without taking a lock. | 603 | * without taking a lock. |
578 | */ | 604 | */ |
579 | spin_lock_irq(&send_cq->lock); | 605 | c2_lock_cqs(send_cq, recv_cq); |
580 | if (send_cq != recv_cq) | ||
581 | spin_lock(&recv_cq->lock); | ||
582 | |||
583 | c2_free_qpn(c2dev, qp->qpn); | 606 | c2_free_qpn(c2dev, qp->qpn); |
584 | 607 | c2_unlock_cqs(send_cq, recv_cq); | |
585 | if (send_cq != recv_cq) | ||
586 | spin_unlock(&recv_cq->lock); | ||
587 | spin_unlock_irq(&send_cq->lock); | ||
588 | 608 | ||
589 | /* | 609 | /* |
590 | * Destory qp in the rnic... | 610 | * Destory qp in the rnic... |
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index e37c5688c214..1687c511cb2f 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c | |||
@@ -150,15 +150,15 @@ static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props) | |||
150 | (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg); | 150 | (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg); |
151 | if (!reply) | 151 | if (!reply) |
152 | err = -ENOMEM; | 152 | err = -ENOMEM; |
153 | 153 | else | |
154 | err = c2_errno(reply); | 154 | err = c2_errno(reply); |
155 | if (err) | 155 | if (err) |
156 | goto bail2; | 156 | goto bail2; |
157 | 157 | ||
158 | props->fw_ver = | 158 | props->fw_ver = |
159 | ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | | 159 | ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | |
160 | ((be32_to_cpu(reply->fw_ver_minor) && 0xFFFF) << 16) | | 160 | ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) | |
161 | (be32_to_cpu(reply->fw_ver_patch) && 0xFFFF); | 161 | (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF); |
162 | memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); | 162 | memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); |
163 | props->max_mr_size = 0xFFFFFFFF; | 163 | props->max_mr_size = 0xFFFFFFFF; |
164 | props->page_size_cap = ~(C2_MIN_PAGESIZE-1); | 164 | props->page_size_cap = ~(C2_MIN_PAGESIZE-1); |
@@ -441,7 +441,7 @@ static int c2_rnic_close(struct c2_dev *c2dev) | |||
441 | * involves initalizing the various limits and resouce pools that | 441 | * involves initalizing the various limits and resouce pools that |
442 | * comprise the RNIC instance. | 442 | * comprise the RNIC instance. |
443 | */ | 443 | */ |
444 | int c2_rnic_init(struct c2_dev *c2dev) | 444 | int __devinit c2_rnic_init(struct c2_dev *c2dev) |
445 | { | 445 | { |
446 | int err; | 446 | int err; |
447 | u32 qsize, msgsize; | 447 | u32 qsize, msgsize; |
@@ -517,14 +517,12 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
517 | /* Initialize the Verbs Reply Queue */ | 517 | /* Initialize the Verbs Reply Queue */ |
518 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); | 518 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); |
519 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); | 519 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); |
520 | q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); | 520 | q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, |
521 | &c2dev->rep_vq.host_dma, GFP_KERNEL); | ||
521 | if (!q1_pages) { | 522 | if (!q1_pages) { |
522 | err = -ENOMEM; | 523 | err = -ENOMEM; |
523 | goto bail1; | 524 | goto bail1; |
524 | } | 525 | } |
525 | c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
526 | (void *)q1_pages, qsize * msgsize, | ||
527 | DMA_FROM_DEVICE); | ||
528 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); | 526 | pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); |
529 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, | 527 | pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, |
530 | (unsigned long long) c2dev->rep_vq.host_dma); | 528 | (unsigned long long) c2dev->rep_vq.host_dma); |
@@ -540,17 +538,15 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
540 | /* Initialize the Asynchronus Event Queue */ | 538 | /* Initialize the Asynchronus Event Queue */ |
541 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); | 539 | qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); |
542 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); | 540 | msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); |
543 | q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); | 541 | q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize, |
542 | &c2dev->aeq.host_dma, GFP_KERNEL); | ||
544 | if (!q2_pages) { | 543 | if (!q2_pages) { |
545 | err = -ENOMEM; | 544 | err = -ENOMEM; |
546 | goto bail2; | 545 | goto bail2; |
547 | } | 546 | } |
548 | c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device, | ||
549 | (void *)q2_pages, qsize * msgsize, | ||
550 | DMA_FROM_DEVICE); | ||
551 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); | 547 | pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); |
552 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, | 548 | pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q2_pages, |
553 | (unsigned long long) c2dev->rep_vq.host_dma); | 549 | (unsigned long long) c2dev->aeq.host_dma); |
554 | c2_mq_rep_init(&c2dev->aeq, | 550 | c2_mq_rep_init(&c2dev->aeq, |
555 | 2, | 551 | 2, |
556 | qsize, | 552 | qsize, |
@@ -597,17 +593,13 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
597 | bail4: | 593 | bail4: |
598 | vq_term(c2dev); | 594 | vq_term(c2dev); |
599 | bail3: | 595 | bail3: |
600 | dma_unmap_single(c2dev->ibdev.dma_device, | 596 | dma_free_coherent(&c2dev->pcidev->dev, |
601 | pci_unmap_addr(&c2dev->aeq, mapping), | 597 | c2dev->aeq.q_size * c2dev->aeq.msg_size, |
602 | c2dev->aeq.q_size * c2dev->aeq.msg_size, | 598 | q2_pages, pci_unmap_addr(&c2dev->aeq, mapping)); |
603 | DMA_FROM_DEVICE); | ||
604 | kfree(q2_pages); | ||
605 | bail2: | 599 | bail2: |
606 | dma_unmap_single(c2dev->ibdev.dma_device, | 600 | dma_free_coherent(&c2dev->pcidev->dev, |
607 | pci_unmap_addr(&c2dev->rep_vq, mapping), | 601 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, |
608 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, | 602 | q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping)); |
609 | DMA_FROM_DEVICE); | ||
610 | kfree(q1_pages); | ||
611 | bail1: | 603 | bail1: |
612 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); | 604 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); |
613 | bail0: | 605 | bail0: |
@@ -619,7 +611,7 @@ int c2_rnic_init(struct c2_dev *c2dev) | |||
619 | /* | 611 | /* |
620 | * Called by c2_remove to cleanup the RNIC resources. | 612 | * Called by c2_remove to cleanup the RNIC resources. |
621 | */ | 613 | */ |
622 | void c2_rnic_term(struct c2_dev *c2dev) | 614 | void __devexit c2_rnic_term(struct c2_dev *c2dev) |
623 | { | 615 | { |
624 | 616 | ||
625 | /* Close the open adapter instance */ | 617 | /* Close the open adapter instance */ |
@@ -640,19 +632,17 @@ void c2_rnic_term(struct c2_dev *c2dev) | |||
640 | /* Free the verbs request allocator */ | 632 | /* Free the verbs request allocator */ |
641 | vq_term(c2dev); | 633 | vq_term(c2dev); |
642 | 634 | ||
643 | /* Unmap and free the asynchronus event queue */ | 635 | /* Free the asynchronus event queue */ |
644 | dma_unmap_single(c2dev->ibdev.dma_device, | 636 | dma_free_coherent(&c2dev->pcidev->dev, |
645 | pci_unmap_addr(&c2dev->aeq, mapping), | 637 | c2dev->aeq.q_size * c2dev->aeq.msg_size, |
646 | c2dev->aeq.q_size * c2dev->aeq.msg_size, | 638 | c2dev->aeq.msg_pool.host, |
647 | DMA_FROM_DEVICE); | 639 | pci_unmap_addr(&c2dev->aeq, mapping)); |
648 | kfree(c2dev->aeq.msg_pool.host); | 640 | |
649 | 641 | /* Free the verbs reply queue */ | |
650 | /* Unmap and free the verbs reply queue */ | 642 | dma_free_coherent(&c2dev->pcidev->dev, |
651 | dma_unmap_single(c2dev->ibdev.dma_device, | 643 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, |
652 | pci_unmap_addr(&c2dev->rep_vq, mapping), | 644 | c2dev->rep_vq.msg_pool.host, |
653 | c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, | 645 | pci_unmap_addr(&c2dev->rep_vq, mapping)); |
654 | DMA_FROM_DEVICE); | ||
655 | kfree(c2dev->rep_vq.msg_pool.host); | ||
656 | 646 | ||
657 | /* Free the MQ shared pointer pool */ | 647 | /* Free the MQ shared pointer pool */ |
658 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); | 648 | c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); |