aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c5
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_alloc.c13
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c18
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c39
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c36
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c68
-rw-r--r--drivers/infiniband/hw/ehca/Kconfig1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c17
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c17
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c56
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c32
-rw-r--r--drivers/infiniband/hw/ehca/ehca_tools.h1
-rw-r--r--drivers/infiniband/hw/ehca/hipz_hw.h2
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c13
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.h15
-rw-r--r--drivers/infiniband/hw/ipath/Kconfig2
-rw-r--r--drivers/infiniband/hw/ipath/Makefile5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_diag.c65
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c31
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c117
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c10
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h7
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c21
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c29
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_pd.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c12
40 files changed, 414 insertions, 316 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index dc1ebeac35c7..27fe242ed435 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1155,7 +1155,8 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
1155 goto bail10; 1155 goto bail10;
1156 } 1156 }
1157 1157
1158 c2_register_device(c2dev); 1158 if (c2_register_device(c2dev))
1159 goto bail10;
1159 1160
1160 return 0; 1161 return 0;
1161 1162
@@ -1243,7 +1244,7 @@ static struct pci_driver c2_pci_driver = {
1243 1244
1244static int __init c2_init_module(void) 1245static int __init c2_init_module(void)
1245{ 1246{
1246 return pci_module_init(&c2_pci_driver); 1247 return pci_register_driver(&c2_pci_driver);
1247} 1248}
1248 1249
1249static void __exit c2_exit_module(void) 1250static void __exit c2_exit_module(void)
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index 1b17dcdd0505..04a9db5de881 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -302,7 +302,7 @@ struct c2_dev {
302 unsigned long pa; /* PA device memory */ 302 unsigned long pa; /* PA device memory */
303 void **qptr_array; 303 void **qptr_array;
304 304
305 kmem_cache_t *host_msg_cache; 305 struct kmem_cache *host_msg_cache;
306 306
307 struct list_head cca_link; /* adapter list */ 307 struct list_head cca_link; /* adapter list */
308 struct list_head eh_wakeup_list; /* event wakeup list */ 308 struct list_head eh_wakeup_list; /* event wakeup list */
diff --git a/drivers/infiniband/hw/amso1100/c2_alloc.c b/drivers/infiniband/hw/amso1100/c2_alloc.c
index 028a60bbfca9..0315f99e4191 100644
--- a/drivers/infiniband/hw/amso1100/c2_alloc.c
+++ b/drivers/infiniband/hw/amso1100/c2_alloc.c
@@ -42,13 +42,14 @@ static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
42{ 42{
43 int i; 43 int i;
44 struct sp_chunk *new_head; 44 struct sp_chunk *new_head;
45 dma_addr_t dma_addr;
45 46
46 new_head = (struct sp_chunk *) __get_free_page(gfp_mask); 47 new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
48 &dma_addr, gfp_mask);
47 if (new_head == NULL) 49 if (new_head == NULL)
48 return -ENOMEM; 50 return -ENOMEM;
49 51
50 new_head->dma_addr = dma_map_single(c2dev->ibdev.dma_device, new_head, 52 new_head->dma_addr = dma_addr;
51 PAGE_SIZE, DMA_FROM_DEVICE);
52 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr); 53 pci_unmap_addr_set(new_head, mapping, new_head->dma_addr);
53 54
54 new_head->next = NULL; 55 new_head->next = NULL;
@@ -80,10 +81,8 @@ void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
80 81
81 while (root) { 82 while (root) {
82 next = root->next; 83 next = root->next;
83 dma_unmap_single(c2dev->ibdev.dma_device, 84 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
84 pci_unmap_addr(root, mapping), PAGE_SIZE, 85 pci_unmap_addr(root, mapping));
85 DMA_FROM_DEVICE);
86 __free_page((struct page *) root);
87 root = next; 86 root = next;
88 } 87 }
89} 88}
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 9d7bcc5ade93..05c9154d46f4 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -246,20 +246,17 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
246 246
247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
248{ 248{
249 249 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
250 dma_unmap_single(c2dev->ibdev.dma_device, pci_unmap_addr(mq, mapping), 250 mq->msg_pool.host, pci_unmap_addr(mq, mapping));
251 mq->q_size * mq->msg_size, DMA_FROM_DEVICE);
252 free_pages((unsigned long) mq->msg_pool.host,
253 get_order(mq->q_size * mq->msg_size));
254} 251}
255 252
256static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size, 253static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
257 int msg_size) 254 int msg_size)
258{ 255{
259 unsigned long pool_start; 256 u8 *pool_start;
260 257
261 pool_start = __get_free_pages(GFP_KERNEL, 258 pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
262 get_order(q_size * msg_size)); 259 &mq->host_dma, GFP_KERNEL);
263 if (!pool_start) 260 if (!pool_start)
264 return -ENOMEM; 261 return -ENOMEM;
265 262
@@ -267,13 +264,10 @@ static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
267 0, /* index (currently unknown) */ 264 0, /* index (currently unknown) */
268 q_size, 265 q_size,
269 msg_size, 266 msg_size,
270 (u8 *) pool_start, 267 pool_start,
271 NULL, /* peer (currently unknown) */ 268 NULL, /* peer (currently unknown) */
272 C2_MQ_HOST_TARGET); 269 C2_MQ_HOST_TARGET);
273 270
274 mq->host_dma = dma_map_single(c2dev->ibdev.dma_device,
275 (void *)pool_start,
276 q_size * msg_size, DMA_FROM_DEVICE);
277 pci_unmap_addr_set(mq, mapping, mq->host_dma); 271 pci_unmap_addr_set(mq, mapping, mq->host_dma);
278 272
279 return 0; 273 return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index da98d9f71429..fef972752912 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -757,20 +757,17 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
757 757
758int c2_register_device(struct c2_dev *dev) 758int c2_register_device(struct c2_dev *dev)
759{ 759{
760 int ret; 760 int ret = -ENOMEM;
761 int i; 761 int i;
762 762
763 /* Register pseudo network device */ 763 /* Register pseudo network device */
764 dev->pseudo_netdev = c2_pseudo_netdev_init(dev); 764 dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
765 if (dev->pseudo_netdev) { 765 if (!dev->pseudo_netdev)
766 ret = register_netdev(dev->pseudo_netdev); 766 goto out3;
767 if (ret) { 767
768 printk(KERN_ERR PFX 768 ret = register_netdev(dev->pseudo_netdev);
769 "Unable to register netdev, ret = %d\n", ret); 769 if (ret)
770 free_netdev(dev->pseudo_netdev); 770 goto out2;
771 return ret;
772 }
773 }
774 771
775 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 772 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
776 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX); 773 strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
@@ -848,21 +845,25 @@ int c2_register_device(struct c2_dev *dev)
848 845
849 ret = ib_register_device(&dev->ibdev); 846 ret = ib_register_device(&dev->ibdev);
850 if (ret) 847 if (ret)
851 return ret; 848 goto out1;
852 849
853 for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) { 850 for (i = 0; i < ARRAY_SIZE(c2_class_attributes); ++i) {
854 ret = class_device_create_file(&dev->ibdev.class_dev, 851 ret = class_device_create_file(&dev->ibdev.class_dev,
855 c2_class_attributes[i]); 852 c2_class_attributes[i]);
856 if (ret) { 853 if (ret)
857 unregister_netdev(dev->pseudo_netdev); 854 goto out0;
858 free_netdev(dev->pseudo_netdev);
859 ib_unregister_device(&dev->ibdev);
860 return ret;
861 }
862 } 855 }
856 goto out3;
863 857
864 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 858out0:
865 return 0; 859 ib_unregister_device(&dev->ibdev);
860out1:
861 unregister_netdev(dev->pseudo_netdev);
862out2:
863 free_netdev(dev->pseudo_netdev);
864out3:
865 pr_debug("%s:%u ret=%d\n", __FUNCTION__, __LINE__, ret);
866 return ret;
866} 867}
867 868
868void c2_unregister_device(struct c2_dev *dev) 869void c2_unregister_device(struct c2_dev *dev)
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 5bcf697aa335..179d005ed4a5 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -564,6 +564,32 @@ int c2_alloc_qp(struct c2_dev *c2dev,
564 return err; 564 return err;
565} 565}
566 566
567static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
568{
569 if (send_cq == recv_cq)
570 spin_lock_irq(&send_cq->lock);
571 else if (send_cq > recv_cq) {
572 spin_lock_irq(&send_cq->lock);
573 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
574 } else {
575 spin_lock_irq(&recv_cq->lock);
576 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
577 }
578}
579
580static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
581{
582 if (send_cq == recv_cq)
583 spin_unlock_irq(&send_cq->lock);
584 else if (send_cq > recv_cq) {
585 spin_unlock(&recv_cq->lock);
586 spin_unlock_irq(&send_cq->lock);
587 } else {
588 spin_unlock(&send_cq->lock);
589 spin_unlock_irq(&recv_cq->lock);
590 }
591}
592
567void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp) 593void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
568{ 594{
569 struct c2_cq *send_cq; 595 struct c2_cq *send_cq;
@@ -576,15 +602,9 @@ void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
576 * Lock CQs here, so that CQ polling code can do QP lookup 602 * Lock CQs here, so that CQ polling code can do QP lookup
577 * without taking a lock. 603 * without taking a lock.
578 */ 604 */
579 spin_lock_irq(&send_cq->lock); 605 c2_lock_cqs(send_cq, recv_cq);
580 if (send_cq != recv_cq)
581 spin_lock(&recv_cq->lock);
582
583 c2_free_qpn(c2dev, qp->qpn); 606 c2_free_qpn(c2dev, qp->qpn);
584 607 c2_unlock_cqs(send_cq, recv_cq);
585 if (send_cq != recv_cq)
586 spin_unlock(&recv_cq->lock);
587 spin_unlock_irq(&send_cq->lock);
588 608
589 /* 609 /*
590 * Destory qp in the rnic... 610 * Destory qp in the rnic...
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index e37c5688c214..1687c511cb2f 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -150,15 +150,15 @@ static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
150 (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg); 150 (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
151 if (!reply) 151 if (!reply)
152 err = -ENOMEM; 152 err = -ENOMEM;
153 153 else
154 err = c2_errno(reply); 154 err = c2_errno(reply);
155 if (err) 155 if (err)
156 goto bail2; 156 goto bail2;
157 157
158 props->fw_ver = 158 props->fw_ver =
159 ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | 159 ((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
160 ((be32_to_cpu(reply->fw_ver_minor) && 0xFFFF) << 16) | 160 ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) |
161 (be32_to_cpu(reply->fw_ver_patch) && 0xFFFF); 161 (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF);
162 memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6); 162 memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
163 props->max_mr_size = 0xFFFFFFFF; 163 props->max_mr_size = 0xFFFFFFFF;
164 props->page_size_cap = ~(C2_MIN_PAGESIZE-1); 164 props->page_size_cap = ~(C2_MIN_PAGESIZE-1);
@@ -441,7 +441,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)
441 * involves initalizing the various limits and resouce pools that 441 * involves initalizing the various limits and resouce pools that
442 * comprise the RNIC instance. 442 * comprise the RNIC instance.
443 */ 443 */
444int c2_rnic_init(struct c2_dev *c2dev) 444int __devinit c2_rnic_init(struct c2_dev *c2dev)
445{ 445{
446 int err; 446 int err;
447 u32 qsize, msgsize; 447 u32 qsize, msgsize;
@@ -517,14 +517,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
517 /* Initialize the Verbs Reply Queue */ 517 /* Initialize the Verbs Reply Queue */
518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE)); 518 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE)); 519 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
520 q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 520 q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
521 &c2dev->rep_vq.host_dma, GFP_KERNEL);
521 if (!q1_pages) { 522 if (!q1_pages) {
522 err = -ENOMEM; 523 err = -ENOMEM;
523 goto bail1; 524 goto bail1;
524 } 525 }
525 c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
526 (void *)q1_pages, qsize * msgsize,
527 DMA_FROM_DEVICE);
528 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma); 526 pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
529 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages, 527 pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
530 (unsigned long long) c2dev->rep_vq.host_dma); 528 (unsigned long long) c2dev->rep_vq.host_dma);
@@ -540,17 +538,15 @@ int c2_rnic_init(struct c2_dev *c2dev)
540 /* Initialize the Asynchronus Event Queue */ 538 /* Initialize the Asynchronus Event Queue */
541 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE)); 539 qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
542 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE)); 540 msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
543 q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL); 541 q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
542 &c2dev->aeq.host_dma, GFP_KERNEL);
544 if (!q2_pages) { 543 if (!q2_pages) {
545 err = -ENOMEM; 544 err = -ENOMEM;
546 goto bail2; 545 goto bail2;
547 } 546 }
548 c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
549 (void *)q2_pages, qsize * msgsize,
550 DMA_FROM_DEVICE);
551 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma); 547 pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
552 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages, 548 pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q2_pages,
553 (unsigned long long) c2dev->rep_vq.host_dma); 549 (unsigned long long) c2dev->aeq.host_dma);
554 c2_mq_rep_init(&c2dev->aeq, 550 c2_mq_rep_init(&c2dev->aeq,
555 2, 551 2,
556 qsize, 552 qsize,
@@ -597,17 +593,13 @@ int c2_rnic_init(struct c2_dev *c2dev)
597 bail4: 593 bail4:
598 vq_term(c2dev); 594 vq_term(c2dev);
599 bail3: 595 bail3:
600 dma_unmap_single(c2dev->ibdev.dma_device, 596 dma_free_coherent(&c2dev->pcidev->dev,
601 pci_unmap_addr(&c2dev->aeq, mapping), 597 c2dev->aeq.q_size * c2dev->aeq.msg_size,
602 c2dev->aeq.q_size * c2dev->aeq.msg_size, 598 q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
603 DMA_FROM_DEVICE);
604 kfree(q2_pages);
605 bail2: 599 bail2:
606 dma_unmap_single(c2dev->ibdev.dma_device, 600 dma_free_coherent(&c2dev->pcidev->dev,
607 pci_unmap_addr(&c2dev->rep_vq, mapping), 601 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
608 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 602 q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
609 DMA_FROM_DEVICE);
610 kfree(q1_pages);
611 bail1: 603 bail1:
612 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 604 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
613 bail0: 605 bail0:
@@ -619,7 +611,7 @@ int c2_rnic_init(struct c2_dev *c2dev)
619/* 611/*
620 * Called by c2_remove to cleanup the RNIC resources. 612 * Called by c2_remove to cleanup the RNIC resources.
621 */ 613 */
622void c2_rnic_term(struct c2_dev *c2dev) 614void __devexit c2_rnic_term(struct c2_dev *c2dev)
623{ 615{
624 616
625 /* Close the open adapter instance */ 617 /* Close the open adapter instance */
@@ -640,19 +632,17 @@ void c2_rnic_term(struct c2_dev *c2dev)
640 /* Free the verbs request allocator */ 632 /* Free the verbs request allocator */
641 vq_term(c2dev); 633 vq_term(c2dev);
642 634
643 /* Unmap and free the asynchronus event queue */ 635 /* Free the asynchronus event queue */
644 dma_unmap_single(c2dev->ibdev.dma_device, 636 dma_free_coherent(&c2dev->pcidev->dev,
645 pci_unmap_addr(&c2dev->aeq, mapping), 637 c2dev->aeq.q_size * c2dev->aeq.msg_size,
646 c2dev->aeq.q_size * c2dev->aeq.msg_size, 638 c2dev->aeq.msg_pool.host,
647 DMA_FROM_DEVICE); 639 pci_unmap_addr(&c2dev->aeq, mapping));
648 kfree(c2dev->aeq.msg_pool.host); 640
649 641 /* Free the verbs reply queue */
650 /* Unmap and free the verbs reply queue */ 642 dma_free_coherent(&c2dev->pcidev->dev,
651 dma_unmap_single(c2dev->ibdev.dma_device, 643 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
652 pci_unmap_addr(&c2dev->rep_vq, mapping), 644 c2dev->rep_vq.msg_pool.host,
653 c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size, 645 pci_unmap_addr(&c2dev->rep_vq, mapping));
654 DMA_FROM_DEVICE);
655 kfree(c2dev->rep_vq.msg_pool.host);
656 646
657 /* Free the MQ shared pointer pool */ 647 /* Free the MQ shared pointer pool */
658 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool); 648 c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/infiniband/hw/ehca/Kconfig
index 922389b64394..727b10d89686 100644
--- a/drivers/infiniband/hw/ehca/Kconfig
+++ b/drivers/infiniband/hw/ehca/Kconfig
@@ -10,6 +10,7 @@ config INFINIBAND_EHCA
10config INFINIBAND_EHCA_SCALING 10config INFINIBAND_EHCA_SCALING
11 bool "Scaling support (EXPERIMENTAL)" 11 bool "Scaling support (EXPERIMENTAL)"
12 depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL 12 depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL
13 default y
13 ---help--- 14 ---help---
14 eHCA scaling support schedules the CQ callbacks to different CPUs. 15 eHCA scaling support schedules the CQ callbacks to different CPUs.
15 16
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index 3bac197f9014..214e2fdddeef 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -118,8 +118,7 @@ struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
118 } 118 }
119 memcpy(&av->av.grh.word_1, &gid, sizeof(gid)); 119 memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
120 } 120 }
121 /* for the time being we use a hard coded PMTU of 2048 Bytes */ 121 av->av.pmtu = EHCA_MAX_MTU;
122 av->av.pmtu = 4;
123 122
124 /* dgid comes in grh.word_3 */ 123 /* dgid comes in grh.word_3 */
125 memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid, 124 memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
@@ -193,7 +192,7 @@ int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
193 memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid)); 192 memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
194 } 193 }
195 194
196 new_ehca_av.pmtu = 4; /* see also comment in create_ah() */ 195 new_ehca_av.pmtu = EHCA_MAX_MTU;
197 196
198 memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid, 197 memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
199 sizeof(ah_attr->grh.dgid)); 198 sizeof(ah_attr->grh.dgid));
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 5eae6ac48425..e1b618c5f685 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -40,6 +40,7 @@
40 */ 40 */
41 41
42#include "ehca_tools.h" 42#include "ehca_tools.h"
43#include "ehca_iverbs.h"
43#include "hcp_if.h" 44#include "hcp_if.h"
44 45
45int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props) 46int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
@@ -49,7 +50,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
49 ib_device); 50 ib_device);
50 struct hipz_query_hca *rblock; 51 struct hipz_query_hca *rblock;
51 52
52 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 53 rblock = ehca_alloc_fw_ctrlblock();
53 if (!rblock) { 54 if (!rblock) {
54 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 55 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
55 return -ENOMEM; 56 return -ENOMEM;
@@ -96,7 +97,7 @@ int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
96 = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX); 97 = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX);
97 98
98query_device1: 99query_device1:
99 kfree(rblock); 100 ehca_free_fw_ctrlblock(rblock);
100 101
101 return ret; 102 return ret;
102} 103}
@@ -109,7 +110,7 @@ int ehca_query_port(struct ib_device *ibdev,
109 ib_device); 110 ib_device);
110 struct hipz_query_port *rblock; 111 struct hipz_query_port *rblock;
111 112
112 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 113 rblock = ehca_alloc_fw_ctrlblock();
113 if (!rblock) { 114 if (!rblock) {
114 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 115 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
115 return -ENOMEM; 116 return -ENOMEM;
@@ -162,7 +163,7 @@ int ehca_query_port(struct ib_device *ibdev,
162 props->active_speed = 0x1; 163 props->active_speed = 0x1;
163 164
164query_port1: 165query_port1:
165 kfree(rblock); 166 ehca_free_fw_ctrlblock(rblock);
166 167
167 return ret; 168 return ret;
168} 169}
@@ -178,7 +179,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
178 return -EINVAL; 179 return -EINVAL;
179 } 180 }
180 181
181 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 182 rblock = ehca_alloc_fw_ctrlblock();
182 if (!rblock) { 183 if (!rblock) {
183 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 184 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
184 return -ENOMEM; 185 return -ENOMEM;
@@ -193,7 +194,7 @@ int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
193 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16)); 194 memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
194 195
195query_pkey1: 196query_pkey1:
196 kfree(rblock); 197 ehca_free_fw_ctrlblock(rblock);
197 198
198 return ret; 199 return ret;
199} 200}
@@ -211,7 +212,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
211 return -EINVAL; 212 return -EINVAL;
212 } 213 }
213 214
214 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 215 rblock = ehca_alloc_fw_ctrlblock();
215 if (!rblock) { 216 if (!rblock) {
216 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 217 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
217 return -ENOMEM; 218 return -ENOMEM;
@@ -227,7 +228,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
227 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64)); 228 memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
228 229
229query_gid1: 230query_gid1:
230 kfree(rblock); 231 ehca_free_fw_ctrlblock(rblock);
231 232
232 return ret; 233 return ret;
233} 234}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 048cc443d1e7..c3ea746e9045 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -45,6 +45,7 @@
45#include "ehca_tools.h" 45#include "ehca_tools.h"
46#include "hcp_if.h" 46#include "hcp_if.h"
47#include "hipz_fns.h" 47#include "hipz_fns.h"
48#include "ipz_pt_fn.h"
48 49
49#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1) 50#define EQE_COMPLETION_EVENT EHCA_BMASK_IBM(1,1)
50#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31) 51#define EQE_CQ_QP_NUMBER EHCA_BMASK_IBM(8,31)
@@ -137,38 +138,36 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
137 u64 *rblock; 138 u64 *rblock;
138 unsigned long block_count; 139 unsigned long block_count;
139 140
140 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 141 rblock = ehca_alloc_fw_ctrlblock();
141 if (!rblock) { 142 if (!rblock) {
142 ehca_err(&shca->ib_device, "Cannot allocate rblock memory."); 143 ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
143 ret = -ENOMEM; 144 ret = -ENOMEM;
144 goto error_data1; 145 goto error_data1;
145 } 146 }
146 147
148 /* rblock must be 4K aligned and should be 4K large */
147 ret = hipz_h_error_data(shca->ipz_hca_handle, 149 ret = hipz_h_error_data(shca->ipz_hca_handle,
148 resource, 150 resource,
149 rblock, 151 rblock,
150 &block_count); 152 &block_count);
151 153
152 if (ret == H_R_STATE) { 154 if (ret == H_R_STATE)
153 ehca_err(&shca->ib_device, 155 ehca_err(&shca->ib_device,
154 "No error data is available: %lx.", resource); 156 "No error data is available: %lx.", resource);
155 }
156 else if (ret == H_SUCCESS) { 157 else if (ret == H_SUCCESS) {
157 int length; 158 int length;
158 159
159 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]); 160 length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
160 161
161 if (length > PAGE_SIZE) 162 if (length > EHCA_PAGESIZE)
162 length = PAGE_SIZE; 163 length = EHCA_PAGESIZE;
163 164
164 print_error_data(shca, data, rblock, length); 165 print_error_data(shca, data, rblock, length);
165 } 166 } else
166 else {
167 ehca_err(&shca->ib_device, 167 ehca_err(&shca->ib_device,
168 "Error data could not be fetched: %lx", resource); 168 "Error data could not be fetched: %lx", resource);
169 }
170 169
171 kfree(rblock); 170 ehca_free_fw_ctrlblock(rblock);
172 171
173error_data1: 172error_data1:
174 return ret; 173 return ret;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 319c39d47f3a..3720e3032cce 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -179,4 +179,12 @@ int ehca_mmap_register(u64 physical,void **mapped,
179 179
180int ehca_munmap(unsigned long addr, size_t len); 180int ehca_munmap(unsigned long addr, size_t len);
181 181
182#ifdef CONFIG_PPC_64K_PAGES
183void *ehca_alloc_fw_ctrlblock(void);
184void ehca_free_fw_ctrlblock(void *ptr);
185#else
186#define ehca_alloc_fw_ctrlblock() ((void *) get_zeroed_page(GFP_KERNEL))
187#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
188#endif
189
182#endif 190#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 024d511c4b58..3d1c1c535038 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -40,6 +40,9 @@
40 * POSSIBILITY OF SUCH DAMAGE. 40 * POSSIBILITY OF SUCH DAMAGE.
41 */ 41 */
42 42
43#ifdef CONFIG_PPC_64K_PAGES
44#include <linux/slab.h>
45#endif
43#include "ehca_classes.h" 46#include "ehca_classes.h"
44#include "ehca_iverbs.h" 47#include "ehca_iverbs.h"
45#include "ehca_mrmw.h" 48#include "ehca_mrmw.h"
@@ -49,7 +52,7 @@
49MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
50MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 53MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
51MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); 54MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
52MODULE_VERSION("SVNEHCA_0017"); 55MODULE_VERSION("SVNEHCA_0019");
53 56
54int ehca_open_aqp1 = 0; 57int ehca_open_aqp1 = 0;
55int ehca_debug_level = 0; 58int ehca_debug_level = 0;
@@ -94,11 +97,31 @@ spinlock_t ehca_cq_idr_lock;
94DEFINE_IDR(ehca_qp_idr); 97DEFINE_IDR(ehca_qp_idr);
95DEFINE_IDR(ehca_cq_idr); 98DEFINE_IDR(ehca_cq_idr);
96 99
100
97static struct list_head shca_list; /* list of all registered ehcas */ 101static struct list_head shca_list; /* list of all registered ehcas */
98static spinlock_t shca_list_lock; 102static spinlock_t shca_list_lock;
99 103
100static struct timer_list poll_eqs_timer; 104static struct timer_list poll_eqs_timer;
101 105
106#ifdef CONFIG_PPC_64K_PAGES
107static struct kmem_cache *ctblk_cache = NULL;
108
109void *ehca_alloc_fw_ctrlblock(void)
110{
111 void *ret = kmem_cache_zalloc(ctblk_cache, SLAB_KERNEL);
112 if (!ret)
113 ehca_gen_err("Out of memory for ctblk");
114 return ret;
115}
116
117void ehca_free_fw_ctrlblock(void *ptr)
118{
119 if (ptr)
120 kmem_cache_free(ctblk_cache, ptr);
121
122}
123#endif
124
102static int ehca_create_slab_caches(void) 125static int ehca_create_slab_caches(void)
103{ 126{
104 int ret; 127 int ret;
@@ -133,6 +156,17 @@ static int ehca_create_slab_caches(void)
133 goto create_slab_caches5; 156 goto create_slab_caches5;
134 } 157 }
135 158
159#ifdef CONFIG_PPC_64K_PAGES
160 ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
161 EHCA_PAGESIZE, H_CB_ALIGNMENT,
162 SLAB_HWCACHE_ALIGN,
163 NULL, NULL);
164 if (!ctblk_cache) {
165 ehca_gen_err("Cannot create ctblk SLAB cache.");
166 ehca_cleanup_mrmw_cache();
167 goto create_slab_caches5;
168 }
169#endif
136 return 0; 170 return 0;
137 171
138create_slab_caches5: 172create_slab_caches5:
@@ -157,6 +191,10 @@ static void ehca_destroy_slab_caches(void)
157 ehca_cleanup_qp_cache(); 191 ehca_cleanup_qp_cache();
158 ehca_cleanup_cq_cache(); 192 ehca_cleanup_cq_cache();
159 ehca_cleanup_pd_cache(); 193 ehca_cleanup_pd_cache();
194#ifdef CONFIG_PPC_64K_PAGES
195 if (ctblk_cache)
196 kmem_cache_destroy(ctblk_cache);
197#endif
160} 198}
161 199
162#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39) 200#define EHCA_HCAAVER EHCA_BMASK_IBM(32,39)
@@ -168,7 +206,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
168 u64 h_ret; 206 u64 h_ret;
169 struct hipz_query_hca *rblock; 207 struct hipz_query_hca *rblock;
170 208
171 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 209 rblock = ehca_alloc_fw_ctrlblock();
172 if (!rblock) { 210 if (!rblock) {
173 ehca_gen_err("Cannot allocate rblock memory."); 211 ehca_gen_err("Cannot allocate rblock memory.");
174 return -ENOMEM; 212 return -ENOMEM;
@@ -211,7 +249,7 @@ int ehca_sense_attributes(struct ehca_shca *shca)
211 shca->sport[1].rate = IB_RATE_30_GBPS; 249 shca->sport[1].rate = IB_RATE_30_GBPS;
212 250
213num_ports1: 251num_ports1:
214 kfree(rblock); 252 ehca_free_fw_ctrlblock(rblock);
215 return ret; 253 return ret;
216} 254}
217 255
@@ -220,7 +258,7 @@ static int init_node_guid(struct ehca_shca *shca)
220 int ret = 0; 258 int ret = 0;
221 struct hipz_query_hca *rblock; 259 struct hipz_query_hca *rblock;
222 260
223 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 261 rblock = ehca_alloc_fw_ctrlblock();
224 if (!rblock) { 262 if (!rblock) {
225 ehca_err(&shca->ib_device, "Can't allocate rblock memory."); 263 ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
226 return -ENOMEM; 264 return -ENOMEM;
@@ -235,7 +273,7 @@ static int init_node_guid(struct ehca_shca *shca)
235 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64)); 273 memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
236 274
237init_node_guid1: 275init_node_guid1:
238 kfree(rblock); 276 ehca_free_fw_ctrlblock(rblock);
239 return ret; 277 return ret;
240} 278}
241 279
@@ -431,7 +469,7 @@ static ssize_t ehca_show_##name(struct device *dev, \
431 \ 469 \
432 shca = dev->driver_data; \ 470 shca = dev->driver_data; \
433 \ 471 \
434 rblock = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); \ 472 rblock = ehca_alloc_fw_ctrlblock(); \
435 if (!rblock) { \ 473 if (!rblock) { \
436 dev_err(dev, "Can't allocate rblock memory."); \ 474 dev_err(dev, "Can't allocate rblock memory."); \
437 return 0; \ 475 return 0; \
@@ -439,12 +477,12 @@ static ssize_t ehca_show_##name(struct device *dev, \
439 \ 477 \
440 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \ 478 if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
441 dev_err(dev, "Can't query device properties"); \ 479 dev_err(dev, "Can't query device properties"); \
442 kfree(rblock); \ 480 ehca_free_fw_ctrlblock(rblock); \
443 return 0; \ 481 return 0; \
444 } \ 482 } \
445 \ 483 \
446 data = rblock->name; \ 484 data = rblock->name; \
447 kfree(rblock); \ 485 ehca_free_fw_ctrlblock(rblock); \
448 \ 486 \
449 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \ 487 if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
450 return snprintf(buf, 256, "1\n"); \ 488 return snprintf(buf, 256, "1\n"); \
@@ -752,7 +790,7 @@ int __init ehca_module_init(void)
752 int ret; 790 int ret;
753 791
754 printk(KERN_INFO "eHCA Infiniband Device Driver " 792 printk(KERN_INFO "eHCA Infiniband Device Driver "
755 "(Rel.: SVNEHCA_0017)\n"); 793 "(Rel.: SVNEHCA_0019)\n");
756 idr_init(&ehca_qp_idr); 794 idr_init(&ehca_qp_idr);
757 idr_init(&ehca_cq_idr); 795 idr_init(&ehca_cq_idr);
758 spin_lock_init(&ehca_qp_idr_lock); 796 spin_lock_init(&ehca_qp_idr_lock);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 5ca65441e1da..abce676c0ae0 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -1013,7 +1013,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1013 u32 i; 1013 u32 i;
1014 u64 *kpage; 1014 u64 *kpage;
1015 1015
1016 kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 1016 kpage = ehca_alloc_fw_ctrlblock();
1017 if (!kpage) { 1017 if (!kpage) {
1018 ehca_err(&shca->ib_device, "kpage alloc failed"); 1018 ehca_err(&shca->ib_device, "kpage alloc failed");
1019 ret = -ENOMEM; 1019 ret = -ENOMEM;
@@ -1092,7 +1092,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1092 1092
1093 1093
1094ehca_reg_mr_rpages_exit1: 1094ehca_reg_mr_rpages_exit1:
1095 kfree(kpage); 1095 ehca_free_fw_ctrlblock(kpage);
1096ehca_reg_mr_rpages_exit0: 1096ehca_reg_mr_rpages_exit0:
1097 if (ret) 1097 if (ret)
1098 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " 1098 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
@@ -1124,7 +1124,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1124 ehca_mrmw_map_acl(acl, &hipz_acl); 1124 ehca_mrmw_map_acl(acl, &hipz_acl);
1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1125 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1126 1126
1127 kpage = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 1127 kpage = ehca_alloc_fw_ctrlblock();
1128 if (!kpage) { 1128 if (!kpage) {
1129 ehca_err(&shca->ib_device, "kpage alloc failed"); 1129 ehca_err(&shca->ib_device, "kpage alloc failed");
1130 ret = -ENOMEM; 1130 ret = -ENOMEM;
@@ -1181,7 +1181,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1181 } 1181 }
1182 1182
1183ehca_rereg_mr_rereg1_exit1: 1183ehca_rereg_mr_rereg1_exit1:
1184 kfree(kpage); 1184 ehca_free_fw_ctrlblock(kpage);
1185ehca_rereg_mr_rereg1_exit0: 1185ehca_rereg_mr_rereg1_exit0:
1186 if ( ret && (ret != -EAGAIN) ) 1186 if ( ret && (ret != -EAGAIN) )
1187 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " 1187 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 4394123cdbd7..8682aa50c707 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -732,8 +732,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
732 u64 h_ret; 732 u64 h_ret;
733 struct ipz_queue *squeue; 733 struct ipz_queue *squeue;
734 void *bad_send_wqe_p, *bad_send_wqe_v; 734 void *bad_send_wqe_p, *bad_send_wqe_v;
735 void *squeue_start_p, *squeue_end_p; 735 u64 q_ofs;
736 void *squeue_start_v, *squeue_end_v;
737 struct ehca_wqe *wqe; 736 struct ehca_wqe *wqe;
738 int qp_num = my_qp->ib_qp.qp_num; 737 int qp_num = my_qp->ib_qp.qp_num;
739 738
@@ -755,26 +754,23 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
755 if (ehca_debug_level) 754 if (ehca_debug_level)
756 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); 755 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
757 squeue = &my_qp->ipz_squeue; 756 squeue = &my_qp->ipz_squeue;
758 squeue_start_p = (void*)virt_to_abs(ipz_qeit_calc(squeue, 0L)); 757 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
759 squeue_end_p = squeue_start_p+squeue->queue_length; 758 ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
760 squeue_start_v = abs_to_virt((u64)squeue_start_p); 759 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
761 squeue_end_v = abs_to_virt((u64)squeue_end_p); 760 return -EFAULT;
762 ehca_dbg(&shca->ib_device, "qp_num=%x squeue_start_v=%p squeue_end_v=%p", 761 }
763 qp_num, squeue_start_v, squeue_end_v);
764 762
765 /* loop sets wqe's purge bit */ 763 /* loop sets wqe's purge bit */
766 wqe = (struct ehca_wqe*)bad_send_wqe_v; 764 wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
767 *bad_wqe_cnt = 0; 765 *bad_wqe_cnt = 0;
768 while (wqe->optype != 0xff && wqe->wqef != 0xff) { 766 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
769 if (ehca_debug_level) 767 if (ehca_debug_level)
770 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); 768 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
771 wqe->nr_of_data_seg = 0; /* suppress data access */ 769 wqe->nr_of_data_seg = 0; /* suppress data access */
772 wqe->wqef = WQEF_PURGE; /* WQE to be purged */ 770 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
773 wqe = (struct ehca_wqe*)((u8*)wqe+squeue->qe_size); 771 q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
772 wqe = (struct ehca_wqe*)ipz_qeit_calc(squeue, q_ofs);
774 *bad_wqe_cnt = (*bad_wqe_cnt)+1; 773 *bad_wqe_cnt = (*bad_wqe_cnt)+1;
775 if ((void*)wqe >= squeue_end_v) {
776 wqe = squeue_start_v;
777 }
778 } 774 }
779 /* 775 /*
780 * bad wqe will be reprocessed and ignored when pol_cq() is called, 776 * bad wqe will be reprocessed and ignored when pol_cq() is called,
@@ -811,8 +807,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
811 unsigned long spl_flags = 0; 807 unsigned long spl_flags = 0;
812 808
813 /* do query_qp to obtain current attr values */ 809 /* do query_qp to obtain current attr values */
814 mqpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL); 810 mqpcb = ehca_alloc_fw_ctrlblock();
815 if (mqpcb == NULL) { 811 if (!mqpcb) {
816 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 812 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
817 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 813 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
818 return -ENOMEM; 814 return -ENOMEM;
@@ -1225,7 +1221,7 @@ modify_qp_exit2:
1225 } 1221 }
1226 1222
1227modify_qp_exit1: 1223modify_qp_exit1:
1228 kfree(mqpcb); 1224 ehca_free_fw_ctrlblock(mqpcb);
1229 1225
1230 return ret; 1226 return ret;
1231} 1227}
@@ -1277,7 +1273,7 @@ int ehca_query_qp(struct ib_qp *qp,
1277 return -EINVAL; 1273 return -EINVAL;
1278 } 1274 }
1279 1275
1280 qpcb = kzalloc(H_CB_ALIGNMENT, GFP_KERNEL ); 1276 qpcb = ehca_alloc_fw_ctrlblock();
1281 if (!qpcb) { 1277 if (!qpcb) {
1282 ehca_err(qp->device,"Out of memory for qpcb " 1278 ehca_err(qp->device,"Out of memory for qpcb "
1283 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num); 1279 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
@@ -1401,7 +1397,7 @@ int ehca_query_qp(struct ib_qp *qp,
1401 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); 1397 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
1402 1398
1403query_qp_exit1: 1399query_qp_exit1:
1404 kfree(qpcb); 1400 ehca_free_fw_ctrlblock(qpcb);
1405 1401
1406 return ret; 1402 return ret;
1407} 1403}
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/infiniband/hw/ehca/ehca_tools.h
index 809da3ef706b..973c4b591545 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/infiniband/hw/ehca/ehca_tools.h
@@ -63,6 +63,7 @@
63#include <asm/ibmebus.h> 63#include <asm/ibmebus.h>
64#include <asm/io.h> 64#include <asm/io.h>
65#include <asm/pgtable.h> 65#include <asm/pgtable.h>
66#include <asm/hvcall.h>
66 67
67extern int ehca_debug_level; 68extern int ehca_debug_level;
68 69
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h
index 3fc92b031c50..fad91368dc5a 100644
--- a/drivers/infiniband/hw/ehca/hipz_hw.h
+++ b/drivers/infiniband/hw/ehca/hipz_hw.h
@@ -45,6 +45,8 @@
45 45
46#include "ehca_tools.h" 46#include "ehca_tools.h"
47 47
48#define EHCA_MAX_MTU 4
49
48/* QP Table Entry Memory Map */ 50/* QP Table Entry Memory Map */
49struct hipz_qptemm { 51struct hipz_qptemm {
50 u64 qpx_hcr; 52 u64 qpx_hcr;
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index e028ff1588cc..bf7a40088f61 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -70,6 +70,19 @@ void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
70 return ret; 70 return ret;
71} 71}
72 72
73int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
74{
75 int i;
76 for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
77 u64 page = (u64)virt_to_abs(queue->queue_pages[i]);
78 if (addr >= page && addr < page + queue->pagesize) {
79 *q_offset = addr - page + i * queue->pagesize;
80 return 0;
81 }
82 }
83 return -EINVAL;
84}
85
73int ipz_queue_ctor(struct ipz_queue *queue, 86int ipz_queue_ctor(struct ipz_queue *queue,
74 const u32 nr_of_pages, 87 const u32 nr_of_pages,
75 const u32 pagesize, const u32 qe_size, const u32 nr_of_sg) 88 const u32 pagesize, const u32 qe_size, const u32 nr_of_sg)
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
index 2f13509d5257..dc3bda2634b7 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -150,6 +150,21 @@ static inline void *ipz_qeit_reset(struct ipz_queue *queue)
150 return ipz_qeit_get(queue); 150 return ipz_qeit_get(queue);
151} 151}
152 152
153/*
154 * return the q_offset corresponding to an absolute address
155 */
156int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
157
158/*
159 * return the next queue offset. don't modify the queue.
160 */
161static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
162{
163 offset += queue->qe_size;
164 if (offset >= queue->queue_length) offset = 0;
165 return offset;
166}
167
153/* struct generic page table */ 168/* struct generic page table */
154struct ipz_pt { 169struct ipz_pt {
155 u64 entries[EHCA_PT_ENTRIES]; 170 u64 entries[EHCA_PT_ENTRIES];
diff --git a/drivers/infiniband/hw/ipath/Kconfig b/drivers/infiniband/hw/ipath/Kconfig
index 574a678e7fdd..90c14543677d 100644
--- a/drivers/infiniband/hw/ipath/Kconfig
+++ b/drivers/infiniband/hw/ipath/Kconfig
@@ -1,6 +1,6 @@
1config INFINIBAND_IPATH 1config INFINIBAND_IPATH
2 tristate "QLogic InfiniPath Driver" 2 tristate "QLogic InfiniPath Driver"
3 depends on PCI_MSI && 64BIT && INFINIBAND 3 depends on (PCI_MSI || HT_IRQ) && 64BIT && INFINIBAND && NET
4 ---help--- 4 ---help---
5 This is a driver for QLogic InfiniPath host channel adapters, 5 This is a driver for QLogic InfiniPath host channel adapters,
6 including InfiniBand verbs support. This driver allows these 6 including InfiniBand verbs support. This driver allows these
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile
index 5e29cb0095e5..7dc10551cf18 100644
--- a/drivers/infiniband/hw/ipath/Makefile
+++ b/drivers/infiniband/hw/ipath/Makefile
@@ -10,8 +10,6 @@ ib_ipath-y := \
10 ipath_eeprom.o \ 10 ipath_eeprom.o \
11 ipath_file_ops.o \ 11 ipath_file_ops.o \
12 ipath_fs.o \ 12 ipath_fs.o \
13 ipath_iba6110.o \
14 ipath_iba6120.o \
15 ipath_init_chip.o \ 13 ipath_init_chip.o \
16 ipath_intr.o \ 14 ipath_intr.o \
17 ipath_keys.o \ 15 ipath_keys.o \
@@ -31,5 +29,8 @@ ib_ipath-y := \
31 ipath_verbs_mcast.o \ 29 ipath_verbs_mcast.o \
32 ipath_verbs.o 30 ipath_verbs.o
33 31
32ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
33ib_ipath-$(CONFIG_PCI_MSI) += ipath_iba6120.o
34
34ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o 35ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
35ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o 36ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c
index 29958b6e0214..28c087b824c2 100644
--- a/drivers/infiniband/hw/ipath/ipath_diag.c
+++ b/drivers/infiniband/hw/ipath/ipath_diag.c
@@ -67,19 +67,54 @@ static struct file_operations diag_file_ops = {
67 .release = ipath_diag_release 67 .release = ipath_diag_release
68}; 68};
69 69
70static ssize_t ipath_diagpkt_write(struct file *fp,
71 const char __user *data,
72 size_t count, loff_t *off);
73
74static struct file_operations diagpkt_file_ops = {
75 .owner = THIS_MODULE,
76 .write = ipath_diagpkt_write,
77};
78
79static atomic_t diagpkt_count = ATOMIC_INIT(0);
80static struct cdev *diagpkt_cdev;
81static struct class_device *diagpkt_class_dev;
82
70int ipath_diag_add(struct ipath_devdata *dd) 83int ipath_diag_add(struct ipath_devdata *dd)
71{ 84{
72 char name[16]; 85 char name[16];
86 int ret = 0;
87
88 if (atomic_inc_return(&diagpkt_count) == 1) {
89 ret = ipath_cdev_init(IPATH_DIAGPKT_MINOR,
90 "ipath_diagpkt", &diagpkt_file_ops,
91 &diagpkt_cdev, &diagpkt_class_dev);
92
93 if (ret) {
94 ipath_dev_err(dd, "Couldn't create ipath_diagpkt "
95 "device: %d", ret);
96 goto done;
97 }
98 }
73 99
74 snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit); 100 snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
75 101
76 return ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name, 102 ret = ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
77 &diag_file_ops, &dd->diag_cdev, 103 &diag_file_ops, &dd->diag_cdev,
78 &dd->diag_class_dev); 104 &dd->diag_class_dev);
105 if (ret)
106 ipath_dev_err(dd, "Couldn't create %s device: %d",
107 name, ret);
108
109done:
110 return ret;
79} 111}
80 112
81void ipath_diag_remove(struct ipath_devdata *dd) 113void ipath_diag_remove(struct ipath_devdata *dd)
82{ 114{
115 if (atomic_dec_and_test(&diagpkt_count))
116 ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev);
117
83 ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_class_dev); 118 ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_class_dev);
84} 119}
85 120
@@ -275,30 +310,6 @@ bail:
275 return ret; 310 return ret;
276} 311}
277 312
278static ssize_t ipath_diagpkt_write(struct file *fp,
279 const char __user *data,
280 size_t count, loff_t *off);
281
282static struct file_operations diagpkt_file_ops = {
283 .owner = THIS_MODULE,
284 .write = ipath_diagpkt_write,
285};
286
287static struct cdev *diagpkt_cdev;
288static struct class_device *diagpkt_class_dev;
289
290int __init ipath_diagpkt_add(void)
291{
292 return ipath_cdev_init(IPATH_DIAGPKT_MINOR,
293 "ipath_diagpkt", &diagpkt_file_ops,
294 &diagpkt_cdev, &diagpkt_class_dev);
295}
296
297void __exit ipath_diagpkt_remove(void)
298{
299 ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_class_dev);
300}
301
302/** 313/**
303 * ipath_diagpkt_write - write an IB packet 314 * ipath_diagpkt_write - write an IB packet
304 * @fp: the diag data device file pointer 315 * @fp: the diag data device file pointer
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 12cefa658f3b..1aeddb48e355 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -304,7 +304,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
304 } 304 }
305 addr = pci_resource_start(pdev, 0); 305 addr = pci_resource_start(pdev, 0);
306 len = pci_resource_len(pdev, 0); 306 len = pci_resource_len(pdev, 0);
307 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %x, vend %x/%x " 307 ipath_cdbg(VERBOSE, "regbase (0) %llx len %d pdev->irq %d, vend %x/%x "
308 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor, 308 "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
309 ent->device, ent->driver_data); 309 ent->device, ent->driver_data);
310 310
@@ -390,12 +390,16 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
390 390
391 /* setup the chip-specific functions, as early as possible. */ 391 /* setup the chip-specific functions, as early as possible. */
392 switch (ent->device) { 392 switch (ent->device) {
393#ifdef CONFIG_HT_IRQ
393 case PCI_DEVICE_ID_INFINIPATH_HT: 394 case PCI_DEVICE_ID_INFINIPATH_HT:
394 ipath_init_iba6110_funcs(dd); 395 ipath_init_iba6110_funcs(dd);
395 break; 396 break;
397#endif
398#ifdef CONFIG_PCI_MSI
396 case PCI_DEVICE_ID_INFINIPATH_PE800: 399 case PCI_DEVICE_ID_INFINIPATH_PE800:
397 ipath_init_iba6120_funcs(dd); 400 ipath_init_iba6120_funcs(dd);
398 break; 401 break;
402#endif
399 default: 403 default:
400 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, " 404 ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
401 "failing\n", ent->device); 405 "failing\n", ent->device);
@@ -467,15 +471,15 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
467 * check 0 irq after we return from chip-specific bus setup, since 471 * check 0 irq after we return from chip-specific bus setup, since
468 * that can affect this due to setup 472 * that can affect this due to setup
469 */ 473 */
470 if (!pdev->irq) 474 if (!dd->ipath_irq)
471 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't " 475 ipath_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
472 "work\n"); 476 "work\n");
473 else { 477 else {
474 ret = request_irq(pdev->irq, ipath_intr, IRQF_SHARED, 478 ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
475 IPATH_DRV_NAME, dd); 479 IPATH_DRV_NAME, dd);
476 if (ret) { 480 if (ret) {
477 ipath_dev_err(dd, "Couldn't setup irq handler, " 481 ipath_dev_err(dd, "Couldn't setup irq handler, "
478 "irq=%u: %d\n", pdev->irq, ret); 482 "irq=%d: %d\n", dd->ipath_irq, ret);
479 goto bail_iounmap; 483 goto bail_iounmap;
480 } 484 }
481 } 485 }
@@ -637,11 +641,10 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
637 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs 641 * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
638 * for all versions of the driver, if they were allocated 642 * for all versions of the driver, if they were allocated
639 */ 643 */
640 if (pdev->irq) { 644 if (dd->ipath_irq) {
641 ipath_cdbg(VERBOSE, 645 ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
642 "unit %u free_irq of irq %x\n", 646 dd->ipath_unit, dd->ipath_irq);
643 dd->ipath_unit, pdev->irq); 647 dd->ipath_f_free_irq(dd);
644 free_irq(pdev->irq, dd);
645 } else 648 } else
646 ipath_dbg("irq is 0, not doing free_irq " 649 ipath_dbg("irq is 0, not doing free_irq "
647 "for unit %u\n", dd->ipath_unit); 650 "for unit %u\n", dd->ipath_unit);
@@ -2005,18 +2008,8 @@ static int __init infinipath_init(void)
2005 goto bail_group; 2008 goto bail_group;
2006 } 2009 }
2007 2010
2008 ret = ipath_diagpkt_add();
2009 if (ret < 0) {
2010 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2011 "diag data device: error %d\n", -ret);
2012 goto bail_ipathfs;
2013 }
2014
2015 goto bail; 2011 goto bail;
2016 2012
2017bail_ipathfs:
2018 ipath_exit_ipathfs();
2019
2020bail_group: 2013bail_group:
2021 ipath_driver_remove_group(&ipath_driver.driver); 2014 ipath_driver_remove_group(&ipath_driver.driver);
2022 2015
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index 9e4e8d4c6e20..e57c7a351cb5 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <linux/htirq.h>
41 42
42#include "ipath_kernel.h" 43#include "ipath_kernel.h"
43#include "ipath_registers.h" 44#include "ipath_registers.h"
@@ -913,49 +914,40 @@ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
913 } 914 }
914} 915}
915 916
916static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev, 917static int ipath_ht_intconfig(struct ipath_devdata *dd)
917 int pos)
918{ 918{
919 u32 int_handler_addr_lower; 919 int ret;
920 u32 int_handler_addr_upper;
921 u64 ihandler;
922 u32 intvec;
923 920
924 /* use indirection register to get the intr handler */ 921 if (dd->ipath_intconfig) {
925 pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x10); 922 ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
926 pci_read_config_dword(pdev, pos + 4, &int_handler_addr_lower); 923 dd->ipath_intconfig); /* interrupt address */
927 pci_write_config_byte(pdev, pos + HT_INTR_REG_INDEX, 0x11); 924 ret = 0;
928 pci_read_config_dword(pdev, pos + 4, &int_handler_addr_upper); 925 } else {
926 ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
927 "interrupt address\n");
928 ret = -EINVAL;
929 }
929 930
930 ihandler = (u64) int_handler_addr_lower | 931 return ret;
931 ((u64) int_handler_addr_upper << 32); 932}
933
934static void ipath_ht_irq_update(struct pci_dev *dev, int irq,
935 struct ht_irq_msg *msg)
936{
937 struct ipath_devdata *dd = pci_get_drvdata(dev);
938 u64 prev_intconfig = dd->ipath_intconfig;
939
940 dd->ipath_intconfig = msg->address_lo;
941 dd->ipath_intconfig |= ((u64) msg->address_hi) << 32;
932 942
933 /* 943 /*
934 * kernels with CONFIG_PCI_MSI set the vector in the irq field of 944 * If the previous value of dd->ipath_intconfig is zero, we're
935 * struct pci_device, so we use that to program the internal 945 * getting configured for the first time, and must not program the
936 * interrupt register (not config space) with that value. The BIOS 946 * intconfig register here (it will be programmed later, when the
937 * must still have done the basic MSI setup. 947 * hardware is ready). Otherwise, we should.
938 */
939 intvec = pdev->irq;
940 /*
941 * clear any vector bits there; normally not set but we'll overload
942 * this for some debug purposes (setting the HTC debug register
943 * value from software, rather than GPIOs), so it might be set on a
944 * driver reload.
945 */ 948 */
946 ihandler &= ~0xff0000; 949 if (prev_intconfig)
947 /* x86 vector goes in intrinfo[23:16] */ 950 ipath_ht_intconfig(dd);
948 ihandler |= intvec << 16;
949 ipath_cdbg(VERBOSE, "ihandler lower %x, upper %x, intvec %x, "
950 "interruptconfig %llx\n", int_handler_addr_lower,
951 int_handler_addr_upper, intvec,
952 (unsigned long long) ihandler);
953
954 /* can't program yet, so save for interrupt setup */
955 dd->ipath_intconfig = ihandler;
956 /* keep going, so we find link control stuff also */
957
958 return ihandler != 0;
959} 951}
960 952
961/** 953/**
@@ -971,12 +963,19 @@ static int set_int_handler(struct ipath_devdata *dd, struct pci_dev *pdev,
971static int ipath_setup_ht_config(struct ipath_devdata *dd, 963static int ipath_setup_ht_config(struct ipath_devdata *dd,
972 struct pci_dev *pdev) 964 struct pci_dev *pdev)
973{ 965{
974 int pos, ret = 0; 966 int pos, ret;
975 int ihandler = 0; 967
968 ret = __ht_create_irq(pdev, 0, ipath_ht_irq_update);
969 if (ret < 0) {
970 ipath_dev_err(dd, "Couldn't create interrupt handler: "
971 "err %d\n", ret);
972 goto bail;
973 }
974 dd->ipath_irq = ret;
975 ret = 0;
976 976
977 /* 977 /*
978 * Read the capability info to find the interrupt info, and also 978 * Handle clearing CRC errors in linkctrl register if necessary. We
979 * handle clearing CRC errors in linkctrl register if necessary. We
980 * do this early, before we ever enable errors or hardware errors, 979 * do this early, before we ever enable errors or hardware errors,
981 * mostly to avoid causing the chip to enter freeze mode. 980 * mostly to avoid causing the chip to enter freeze mode.
982 */ 981 */
@@ -1000,17 +999,9 @@ static int ipath_setup_ht_config(struct ipath_devdata *dd,
1000 } 999 }
1001 if (!(cap_type & 0xE0)) 1000 if (!(cap_type & 0xE0))
1002 slave_or_pri_blk(dd, pdev, pos, cap_type); 1001 slave_or_pri_blk(dd, pdev, pos, cap_type);
1003 else if (cap_type == HT_INTR_DISC_CONFIG)
1004 ihandler = set_int_handler(dd, pdev, pos);
1005 } while ((pos = pci_find_next_capability(pdev, pos, 1002 } while ((pos = pci_find_next_capability(pdev, pos,
1006 PCI_CAP_ID_HT))); 1003 PCI_CAP_ID_HT)));
1007 1004
1008 if (!ihandler) {
1009 ipath_dev_err(dd, "Couldn't find interrupt handler in "
1010 "config space\n");
1011 ret = -ENODEV;
1012 }
1013
1014bail: 1005bail:
1015 return ret; 1006 return ret;
1016} 1007}
@@ -1360,25 +1351,6 @@ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
1360 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 1351 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
1361} 1352}
1362 1353
1363static int ipath_ht_intconfig(struct ipath_devdata *dd)
1364{
1365 int ret;
1366
1367 if (!dd->ipath_intconfig) {
1368 ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
1369 "interrupt address\n");
1370 ret = 1;
1371 goto bail;
1372 }
1373
1374 ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
1375 dd->ipath_intconfig); /* interrupt address */
1376 ret = 0;
1377
1378bail:
1379 return ret;
1380}
1381
1382/** 1354/**
1383 * ipath_pe_put_tid - write a TID in chip 1355 * ipath_pe_put_tid - write a TID in chip
1384 * @dd: the infinipath device 1356 * @dd: the infinipath device
@@ -1575,6 +1547,14 @@ static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
1575 return 0; 1547 return 0;
1576} 1548}
1577 1549
1550static void ipath_ht_free_irq(struct ipath_devdata *dd)
1551{
1552 free_irq(dd->ipath_irq, dd);
1553 ht_destroy_irq(dd->ipath_irq);
1554 dd->ipath_irq = 0;
1555 dd->ipath_intconfig = 0;
1556}
1557
1578/** 1558/**
1579 * ipath_init_iba6110_funcs - set up the chip-specific function pointers 1559 * ipath_init_iba6110_funcs - set up the chip-specific function pointers
1580 * @dd: the infinipath device 1560 * @dd: the infinipath device
@@ -1598,6 +1578,7 @@ void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
1598 dd->ipath_f_cleanup = ipath_setup_ht_cleanup; 1578 dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
1599 dd->ipath_f_setextled = ipath_setup_ht_setextled; 1579 dd->ipath_f_setextled = ipath_setup_ht_setextled;
1600 dd->ipath_f_get_base_info = ipath_ht_get_base_info; 1580 dd->ipath_f_get_base_info = ipath_ht_get_base_info;
1581 dd->ipath_f_free_irq = ipath_ht_free_irq;
1601 1582
1602 /* 1583 /*
1603 * initialize chip-specific variables 1584 * initialize chip-specific variables
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index a72ab9de386a..6af89683f710 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -851,6 +851,7 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
851 int pos, ret; 851 int pos, ret;
852 852
853 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ 853 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */
854 dd->ipath_irq = pdev->irq;
854 ret = pci_enable_msi(dd->pcidev); 855 ret = pci_enable_msi(dd->pcidev);
855 if (ret) 856 if (ret)
856 ipath_dev_err(dd, "pci_enable_msi failed: %d, " 857 ipath_dev_err(dd, "pci_enable_msi failed: %d, "
@@ -1323,6 +1324,12 @@ done:
1323 return 0; 1324 return 0;
1324} 1325}
1325 1326
1327static void ipath_pe_free_irq(struct ipath_devdata *dd)
1328{
1329 free_irq(dd->ipath_irq, dd);
1330 dd->ipath_irq = 0;
1331}
1332
1326/** 1333/**
1327 * ipath_init_iba6120_funcs - set up the chip-specific function pointers 1334 * ipath_init_iba6120_funcs - set up the chip-specific function pointers
1328 * @dd: the infinipath device 1335 * @dd: the infinipath device
@@ -1349,6 +1356,7 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
1349 dd->ipath_f_cleanup = ipath_setup_pe_cleanup; 1356 dd->ipath_f_cleanup = ipath_setup_pe_cleanup;
1350 dd->ipath_f_setextled = ipath_setup_pe_setextled; 1357 dd->ipath_f_setextled = ipath_setup_pe_setextled;
1351 dd->ipath_f_get_base_info = ipath_pe_get_base_info; 1358 dd->ipath_f_get_base_info = ipath_pe_get_base_info;
1359 dd->ipath_f_free_irq = ipath_pe_free_irq;
1352 1360
1353 /* initialize chip-specific variables */ 1361 /* initialize chip-specific variables */
1354 dd->ipath_f_tidtemplate = ipath_pe_tidtemplate; 1362 dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index d9079ee12030..5652a550d442 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -710,14 +710,14 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
710 * linuxbios development work, and it may happen in 710 * linuxbios development work, and it may happen in
711 * the future again. 711 * the future again.
712 */ 712 */
713 if (dd->pcidev && dd->pcidev->irq) { 713 if (dd->pcidev && dd->ipath_irq) {
714 ipath_dev_err(dd, "Now %u unexpected " 714 ipath_dev_err(dd, "Now %u unexpected "
715 "interrupts, unregistering " 715 "interrupts, unregistering "
716 "interrupt handler\n", 716 "interrupt handler\n",
717 *unexpectp); 717 *unexpectp);
718 ipath_dbg("free_irq of irq %x\n", 718 ipath_dbg("free_irq of irq %d\n",
719 dd->pcidev->irq); 719 dd->ipath_irq);
720 free_irq(dd->pcidev->irq, dd); 720 dd->ipath_f_free_irq(dd);
721 } 721 }
722 } 722 }
723 if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) { 723 if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) {
@@ -753,7 +753,7 @@ static void ipath_bad_regread(struct ipath_devdata *dd)
753 if (allbits == 2) { 753 if (allbits == 2) {
754 ipath_dev_err(dd, "Still bad interrupt status, " 754 ipath_dev_err(dd, "Still bad interrupt status, "
755 "unregistering interrupt\n"); 755 "unregistering interrupt\n");
756 free_irq(dd->pcidev->irq, dd); 756 dd->ipath_f_free_irq(dd);
757 } else if (allbits > 2) { 757 } else if (allbits > 2) {
758 if ((allbits % 10000) == 0) 758 if ((allbits % 10000) == 0)
759 printk("."); 759 printk(".");
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 7c436697d0e4..986b2125b8f5 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -213,6 +213,8 @@ struct ipath_devdata {
213 void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64); 213 void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
214 /* fill out chip-specific fields */ 214 /* fill out chip-specific fields */
215 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *); 215 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
216 /* free irq */
217 void (*ipath_f_free_irq)(struct ipath_devdata *);
216 struct ipath_ibdev *verbs_dev; 218 struct ipath_ibdev *verbs_dev;
217 struct timer_list verbs_timer; 219 struct timer_list verbs_timer;
218 /* total dwords sent (summed from counter) */ 220 /* total dwords sent (summed from counter) */
@@ -328,6 +330,8 @@ struct ipath_devdata {
328 /* so we can rewrite it after a chip reset */ 330 /* so we can rewrite it after a chip reset */
329 u32 ipath_pcibar1; 331 u32 ipath_pcibar1;
330 332
333 /* interrupt number */
334 int ipath_irq;
331 /* HT/PCI Vendor ID (here for NodeInfo) */ 335 /* HT/PCI Vendor ID (here for NodeInfo) */
332 u16 ipath_vendorid; 336 u16 ipath_vendorid;
333 /* HT/PCI Device ID (here for NodeInfo) */ 337 /* HT/PCI Device ID (here for NodeInfo) */
@@ -869,9 +873,6 @@ int ipath_device_create_group(struct device *, struct ipath_devdata *);
869void ipath_device_remove_group(struct device *, struct ipath_devdata *); 873void ipath_device_remove_group(struct device *, struct ipath_devdata *);
870int ipath_expose_reset(struct device *); 874int ipath_expose_reset(struct device *);
871 875
872int ipath_diagpkt_add(void);
873void ipath_diagpkt_remove(void);
874
875int ipath_init_ipathfs(void); 876int ipath_init_ipathfs(void);
876void ipath_exit_ipathfs(void); 877void ipath_exit_ipathfs(void);
877int ipathfs_add_device(struct ipath_devdata *); 878int ipathfs_add_device(struct ipath_devdata *);
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index a5456108dbad..acdee33ee1f8 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -1487,7 +1487,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1487 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; 1487 idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
1488 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; 1488 idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
1489 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; 1489 idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
1490 idev->pma_counter_select[5] = IB_PMA_PORT_XMIT_WAIT; 1490 idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
1491 idev->link_width_enabled = 3; /* 1x or 4x */ 1491 idev->link_width_enabled = 3; /* 1x or 4x */
1492 1492
1493 /* Snapshot current HW counters to "clear" them. */ 1493 /* Snapshot current HW counters to "clear" them. */
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index 69599455aca2..57cdc1bc5f50 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -33,7 +33,6 @@
33 * $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $ 33 * $Id: mthca_av.c 1349 2004-12-16 21:09:43Z roland $
34 */ 34 */
35 35
36#include <linux/init.h>
37#include <linux/string.h> 36#include <linux/string.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
39 38
@@ -323,7 +322,7 @@ int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr)
323 return 0; 322 return 0;
324} 323}
325 324
326int __devinit mthca_init_av_table(struct mthca_dev *dev) 325int mthca_init_av_table(struct mthca_dev *dev)
327{ 326{
328 int err; 327 int err;
329 328
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 99a94d710935..768df7265b81 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1820,11 +1820,11 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1820 1820
1821#define MAD_IFC_BOX_SIZE 0x400 1821#define MAD_IFC_BOX_SIZE 0x400
1822#define MAD_IFC_MY_QPN_OFFSET 0x100 1822#define MAD_IFC_MY_QPN_OFFSET 0x100
1823#define MAD_IFC_RQPN_OFFSET 0x104 1823#define MAD_IFC_RQPN_OFFSET 0x108
1824#define MAD_IFC_SL_OFFSET 0x108 1824#define MAD_IFC_SL_OFFSET 0x10c
1825#define MAD_IFC_G_PATH_OFFSET 0x109 1825#define MAD_IFC_G_PATH_OFFSET 0x10d
1826#define MAD_IFC_RLID_OFFSET 0x10a 1826#define MAD_IFC_RLID_OFFSET 0x10e
1827#define MAD_IFC_PKEY_OFFSET 0x10e 1827#define MAD_IFC_PKEY_OFFSET 0x112
1828#define MAD_IFC_GRH_OFFSET 0x140 1828#define MAD_IFC_GRH_OFFSET 0x140
1829 1829
1830 inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1830 inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -1862,7 +1862,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1862 1862
1863 val = in_wc->dlid_path_bits | 1863 val = in_wc->dlid_path_bits |
1864 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); 1864 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
1865 MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET); 1865 MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET);
1866 1866
1867 MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET); 1867 MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET);
1868 MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); 1868 MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET);
@@ -1870,7 +1870,7 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1870 if (in_grh) 1870 if (in_grh)
1871 memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40); 1871 memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40);
1872 1872
1873 op_modifier |= 0x10; 1873 op_modifier |= 0x4;
1874 1874
1875 in_modifier |= in_wc->slid << 16; 1875 in_modifier |= in_wc->slid << 16;
1876 } 1876 }
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index e393681ba7d4..283d50b76c3d 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -36,9 +36,10 @@
36 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $ 36 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
37 */ 37 */
38 38
39#include <linux/init.h>
40#include <linux/hardirq.h> 39#include <linux/hardirq.h>
41 40
41#include <asm/io.h>
42
42#include <rdma/ib_pack.h> 43#include <rdma/ib_pack.h>
43 44
44#include "mthca_dev.h" 45#include "mthca_dev.h"
@@ -210,6 +211,11 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
210 mthca_write64(doorbell, 211 mthca_write64(doorbell,
211 dev->kar + MTHCA_CQ_DOORBELL, 212 dev->kar + MTHCA_CQ_DOORBELL,
212 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 213 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
214 /*
215 * Make sure doorbells don't leak out of CQ spinlock
216 * and reach the HCA out of order:
217 */
218 mmiowb();
213 } 219 }
214} 220}
215 221
@@ -963,7 +969,7 @@ void mthca_free_cq(struct mthca_dev *dev,
963 mthca_free_mailbox(dev, mailbox); 969 mthca_free_mailbox(dev, mailbox);
964} 970}
965 971
966int __devinit mthca_init_cq_table(struct mthca_dev *dev) 972int mthca_init_cq_table(struct mthca_dev *dev)
967{ 973{
968 int err; 974 int err;
969 975
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index e284e0613a94..8ec9fa1ff9ea 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -33,7 +33,6 @@
33 * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $ 33 * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $
34 */ 34 */
35 35
36#include <linux/init.h>
37#include <linux/errno.h> 36#include <linux/errno.h>
38#include <linux/interrupt.h> 37#include <linux/interrupt.h>
39#include <linux/pci.h> 38#include <linux/pci.h>
@@ -479,10 +478,10 @@ static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)
479 return IRQ_HANDLED; 478 return IRQ_HANDLED;
480} 479}
481 480
482static int __devinit mthca_create_eq(struct mthca_dev *dev, 481static int mthca_create_eq(struct mthca_dev *dev,
483 int nent, 482 int nent,
484 u8 intr, 483 u8 intr,
485 struct mthca_eq *eq) 484 struct mthca_eq *eq)
486{ 485{
487 int npages; 486 int npages;
488 u64 *dma_list = NULL; 487 u64 *dma_list = NULL;
@@ -664,9 +663,9 @@ static void mthca_free_irqs(struct mthca_dev *dev)
664 dev->eq_table.eq + i); 663 dev->eq_table.eq + i);
665} 664}
666 665
667static int __devinit mthca_map_reg(struct mthca_dev *dev, 666static int mthca_map_reg(struct mthca_dev *dev,
668 unsigned long offset, unsigned long size, 667 unsigned long offset, unsigned long size,
669 void __iomem **map) 668 void __iomem **map)
670{ 669{
671 unsigned long base = pci_resource_start(dev->pdev, 0); 670 unsigned long base = pci_resource_start(dev->pdev, 0);
672 671
@@ -691,7 +690,7 @@ static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset,
691 iounmap(map); 690 iounmap(map);
692} 691}
693 692
694static int __devinit mthca_map_eq_regs(struct mthca_dev *dev) 693static int mthca_map_eq_regs(struct mthca_dev *dev)
695{ 694{
696 if (mthca_is_memfree(dev)) { 695 if (mthca_is_memfree(dev)) {
697 /* 696 /*
@@ -781,7 +780,7 @@ static void mthca_unmap_eq_regs(struct mthca_dev *dev)
781 } 780 }
782} 781}
783 782
784int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) 783int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
785{ 784{
786 int ret; 785 int ret;
787 u8 status; 786 u8 status;
@@ -825,7 +824,7 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev)
825 __free_page(dev->eq_table.icm_page); 824 __free_page(dev->eq_table.icm_page);
826} 825}
827 826
828int __devinit mthca_init_eq_table(struct mthca_dev *dev) 827int mthca_init_eq_table(struct mthca_dev *dev)
829{ 828{
830 int err; 829 int err;
831 u8 status; 830 u8 status;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 45e106f14807..acfa41d968ee 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -317,7 +317,7 @@ err:
317 return ret; 317 return ret;
318} 318}
319 319
320void __devexit mthca_free_agents(struct mthca_dev *dev) 320void mthca_free_agents(struct mthca_dev *dev)
321{ 321{
322 struct ib_mad_agent *agent; 322 struct ib_mad_agent *agent;
323 int p, q; 323 int p, q;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 47ea02148368..0491ec7a7c0a 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -98,7 +98,7 @@ static struct mthca_profile default_profile = {
98 .uarc_size = 1 << 18, /* Arbel only */ 98 .uarc_size = 1 << 18, /* Arbel only */
99}; 99};
100 100
101static int __devinit mthca_tune_pci(struct mthca_dev *mdev) 101static int mthca_tune_pci(struct mthca_dev *mdev)
102{ 102{
103 int cap; 103 int cap;
104 u16 val; 104 u16 val;
@@ -143,7 +143,7 @@ static int __devinit mthca_tune_pci(struct mthca_dev *mdev)
143 return 0; 143 return 0;
144} 144}
145 145
146static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) 146static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
147{ 147{
148 int err; 148 int err;
149 u8 status; 149 u8 status;
@@ -255,7 +255,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
255 return 0; 255 return 0;
256} 256}
257 257
258static int __devinit mthca_init_tavor(struct mthca_dev *mdev) 258static int mthca_init_tavor(struct mthca_dev *mdev)
259{ 259{
260 u8 status; 260 u8 status;
261 int err; 261 int err;
@@ -333,7 +333,7 @@ err_disable:
333 return err; 333 return err;
334} 334}
335 335
336static int __devinit mthca_load_fw(struct mthca_dev *mdev) 336static int mthca_load_fw(struct mthca_dev *mdev)
337{ 337{
338 u8 status; 338 u8 status;
339 int err; 339 int err;
@@ -379,10 +379,10 @@ err_free:
379 return err; 379 return err;
380} 380}
381 381
382static int __devinit mthca_init_icm(struct mthca_dev *mdev, 382static int mthca_init_icm(struct mthca_dev *mdev,
383 struct mthca_dev_lim *dev_lim, 383 struct mthca_dev_lim *dev_lim,
384 struct mthca_init_hca_param *init_hca, 384 struct mthca_init_hca_param *init_hca,
385 u64 icm_size) 385 u64 icm_size)
386{ 386{
387 u64 aux_pages; 387 u64 aux_pages;
388 u8 status; 388 u8 status;
@@ -575,7 +575,7 @@ static void mthca_free_icms(struct mthca_dev *mdev)
575 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); 575 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
576} 576}
577 577
578static int __devinit mthca_init_arbel(struct mthca_dev *mdev) 578static int mthca_init_arbel(struct mthca_dev *mdev)
579{ 579{
580 struct mthca_dev_lim dev_lim; 580 struct mthca_dev_lim dev_lim;
581 struct mthca_profile profile; 581 struct mthca_profile profile;
@@ -683,7 +683,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
683 mthca_SYS_DIS(mdev, &status); 683 mthca_SYS_DIS(mdev, &status);
684} 684}
685 685
686static int __devinit mthca_init_hca(struct mthca_dev *mdev) 686static int mthca_init_hca(struct mthca_dev *mdev)
687{ 687{
688 u8 status; 688 u8 status;
689 int err; 689 int err;
@@ -720,7 +720,7 @@ err_close:
720 return err; 720 return err;
721} 721}
722 722
723static int __devinit mthca_setup_hca(struct mthca_dev *dev) 723static int mthca_setup_hca(struct mthca_dev *dev)
724{ 724{
725 int err; 725 int err;
726 u8 status; 726 u8 status;
@@ -875,8 +875,7 @@ err_uar_table_free:
875 return err; 875 return err;
876} 876}
877 877
878static int __devinit mthca_request_regions(struct pci_dev *pdev, 878static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden)
879 int ddr_hidden)
880{ 879{
881 int err; 880 int err;
882 881
@@ -928,7 +927,7 @@ static void mthca_release_regions(struct pci_dev *pdev,
928 MTHCA_HCR_SIZE); 927 MTHCA_HCR_SIZE);
929} 928}
930 929
931static int __devinit mthca_enable_msi_x(struct mthca_dev *mdev) 930static int mthca_enable_msi_x(struct mthca_dev *mdev)
932{ 931{
933 struct msix_entry entries[3]; 932 struct msix_entry entries[3];
934 int err; 933 int err;
@@ -1213,7 +1212,7 @@ int __mthca_restart_one(struct pci_dev *pdev)
1213} 1212}
1214 1213
1215static int __devinit mthca_init_one(struct pci_dev *pdev, 1214static int __devinit mthca_init_one(struct pci_dev *pdev,
1216 const struct pci_device_id *id) 1215 const struct pci_device_id *id)
1217{ 1216{
1218 static int mthca_version_printed = 0; 1217 static int mthca_version_printed = 0;
1219 int ret; 1218 int ret;
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 47ca8a9b7247..a8ad072be074 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -32,7 +32,6 @@
32 * $Id: mthca_mcg.c 1349 2004-12-16 21:09:43Z roland $ 32 * $Id: mthca_mcg.c 1349 2004-12-16 21:09:43Z roland $
33 */ 33 */
34 34
35#include <linux/init.h>
36#include <linux/string.h> 35#include <linux/string.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
38 37
@@ -371,7 +370,7 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
371 return err; 370 return err;
372} 371}
373 372
374int __devinit mthca_init_mcg_table(struct mthca_dev *dev) 373int mthca_init_mcg_table(struct mthca_dev *dev)
375{ 374{
376 int err; 375 int err;
377 int table_size = dev->limits.num_mgms + dev->limits.num_amgms; 376 int table_size = dev->limits.num_mgms + dev->limits.num_amgms;
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index a486dec1707e..f71ffa88db3a 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -34,7 +34,6 @@
34 */ 34 */
35 35
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/init.h>
38#include <linux/errno.h> 37#include <linux/errno.h>
39 38
40#include "mthca_dev.h" 39#include "mthca_dev.h"
@@ -135,7 +134,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
135 spin_unlock(&buddy->lock); 134 spin_unlock(&buddy->lock);
136} 135}
137 136
138static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order) 137static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
139{ 138{
140 int i, s; 139 int i, s;
141 140
@@ -759,7 +758,7 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
759 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; 758 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
760} 759}
761 760
762int __devinit mthca_init_mr_table(struct mthca_dev *dev) 761int mthca_init_mr_table(struct mthca_dev *dev)
763{ 762{
764 unsigned long addr; 763 unsigned long addr;
765 int err, i; 764 int err, i;
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c
index 59df51614c85..c1e950764bd8 100644
--- a/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -34,7 +34,6 @@
34 * $Id: mthca_pd.c 1349 2004-12-16 21:09:43Z roland $ 34 * $Id: mthca_pd.c 1349 2004-12-16 21:09:43Z roland $
35 */ 35 */
36 36
37#include <linux/init.h>
38#include <linux/errno.h> 37#include <linux/errno.h>
39 38
40#include "mthca_dev.h" 39#include "mthca_dev.h"
@@ -69,7 +68,7 @@ void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd)
69 mthca_free(&dev->pd_table.alloc, pd->pd_num); 68 mthca_free(&dev->pd_table.alloc, pd->pd_num);
70} 69}
71 70
72int __devinit mthca_init_pd_table(struct mthca_dev *dev) 71int mthca_init_pd_table(struct mthca_dev *dev)
73{ 72{
74 return mthca_alloc_init(&dev->pd_table.alloc, 73 return mthca_alloc_init(&dev->pd_table.alloc,
75 dev->limits.num_pds, 74 dev->limits.num_pds,
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index fc67f780581b..21422a3336ad 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1100,11 +1100,10 @@ static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1100 struct mthca_fmr *fmr; 1100 struct mthca_fmr *fmr;
1101 int err; 1101 int err;
1102 1102
1103 fmr = kmalloc(sizeof *fmr, GFP_KERNEL); 1103 fmr = kmemdup(fmr_attr, sizeof *fmr, GFP_KERNEL);
1104 if (!fmr) 1104 if (!fmr)
1105 return ERR_PTR(-ENOMEM); 1105 return ERR_PTR(-ENOMEM);
1106 1106
1107 memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1108 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, 1107 err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1109 convert_access(mr_access_flags), fmr); 1108 convert_access(mr_access_flags), fmr);
1110 1109
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 5e5c58b9920b..33e3ba7937f1 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -35,10 +35,11 @@
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $ 35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
36 */ 36 */
37 37
38#include <linux/init.h>
39#include <linux/string.h> 38#include <linux/string.h>
40#include <linux/slab.h> 39#include <linux/slab.h>
41 40
41#include <asm/io.h>
42
42#include <rdma/ib_verbs.h> 43#include <rdma/ib_verbs.h>
43#include <rdma/ib_cache.h> 44#include <rdma/ib_cache.h>
44#include <rdma/ib_pack.h> 45#include <rdma/ib_pack.h>
@@ -1732,6 +1733,11 @@ out:
1732 mthca_write64(doorbell, 1733 mthca_write64(doorbell,
1733 dev->kar + MTHCA_SEND_DOORBELL, 1734 dev->kar + MTHCA_SEND_DOORBELL,
1734 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1735 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1736 /*
1737 * Make sure doorbells don't leak out of SQ spinlock
1738 * and reach the HCA out of order:
1739 */
1740 mmiowb();
1735 } 1741 }
1736 1742
1737 qp->sq.next_ind = ind; 1743 qp->sq.next_ind = ind;
@@ -1851,6 +1857,12 @@ out:
1851 qp->rq.next_ind = ind; 1857 qp->rq.next_ind = ind;
1852 qp->rq.head += nreq; 1858 qp->rq.head += nreq;
1853 1859
1860 /*
1861 * Make sure doorbells don't leak out of RQ spinlock and reach
1862 * the HCA out of order:
1863 */
1864 mmiowb();
1865
1854 spin_unlock_irqrestore(&qp->rq.lock, flags); 1866 spin_unlock_irqrestore(&qp->rq.lock, flags);
1855 return err; 1867 return err;
1856} 1868}
@@ -2112,6 +2124,12 @@ out:
2112 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 2124 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2113 } 2125 }
2114 2126
2127 /*
2128 * Make sure doorbells don't leak out of SQ spinlock and reach
2129 * the HCA out of order:
2130 */
2131 mmiowb();
2132
2115 spin_unlock_irqrestore(&qp->sq.lock, flags); 2133 spin_unlock_irqrestore(&qp->sq.lock, flags);
2116 return err; 2134 return err;
2117} 2135}
@@ -2222,7 +2240,7 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2222 *new_wqe = 0; 2240 *new_wqe = 0;
2223} 2241}
2224 2242
2225int __devinit mthca_init_qp_table(struct mthca_dev *dev) 2243int mthca_init_qp_table(struct mthca_dev *dev)
2226{ 2244{
2227 int err; 2245 int err;
2228 u8 status; 2246 u8 status;
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 92a72f521528..34d2c4768962 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -35,6 +35,8 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/string.h> 36#include <linux/string.h>
37 37
38#include <asm/io.h>
39
38#include "mthca_dev.h" 40#include "mthca_dev.h"
39#include "mthca_cmd.h" 41#include "mthca_cmd.h"
40#include "mthca_memfree.h" 42#include "mthca_memfree.h"
@@ -118,7 +120,7 @@ static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
118 120
119 memset(context, 0, sizeof *context); 121 memset(context, 0, sizeof *context);
120 122
121 logsize = long_log2(srq->max) + srq->wqe_shift; 123 logsize = long_log2(srq->max);
122 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); 124 context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
123 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 125 context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
124 context->db_index = cpu_to_be32(srq->db_index); 126 context->db_index = cpu_to_be32(srq->db_index);
@@ -595,6 +597,12 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
595 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 597 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
596 } 598 }
597 599
600 /*
601 * Make sure doorbells don't leak out of SRQ spinlock and
602 * reach the HCA out of order:
603 */
604 mmiowb();
605
598 spin_unlock_irqrestore(&srq->lock, flags); 606 spin_unlock_irqrestore(&srq->lock, flags);
599 return err; 607 return err;
600} 608}
@@ -707,7 +715,7 @@ int mthca_max_srq_sge(struct mthca_dev *dev)
707 sizeof (struct mthca_data_seg)); 715 sizeof (struct mthca_data_seg));
708} 716}
709 717
710int __devinit mthca_init_srq_table(struct mthca_dev *dev) 718int mthca_init_srq_table(struct mthca_dev *dev)
711{ 719{
712 int err; 720 int err;
713 721