aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-10-20 13:41:44 -0400
committerTony Luck <tony.luck@intel.com>2005-10-20 13:41:44 -0400
commit9cec58dc138d6fcad9f447a19c8ff69f6540e667 (patch)
tree4fe1cca94fdba8b705c87615bee06d3346f687ce /drivers/infiniband
parent17e5ad6c0ce5a970e2830d0de8bdd60a2f077d38 (diff)
parentac9b9c667c2e1194e22ebe0a441ae1c37aaa9b90 (diff)
Update from upstream with manual merge of Yasunori Goto's
changes to swiotlb.c made in commit 281dd25cdc0d6903929b79183816d151ea626341 since this file has been moved from arch/ia64/lib/swiotlb.c to lib/swiotlb.c Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/mad.c2
-rw-r--r--drivers/infiniband/core/mad_rmpp.c19
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/user_mad.c5
-rw-r--r--drivers/infiniband/core/uverbs.h1
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c120
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c18
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c45
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c19
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c51
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c13
18 files changed, 194 insertions, 175 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a4a4d9c1eef..a14ca87fda1 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -783,7 +783,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
783 u32 remote_qpn, u16 pkey_index, 783 u32 remote_qpn, u16 pkey_index,
784 struct ib_ah *ah, int rmpp_active, 784 struct ib_ah *ah, int rmpp_active,
785 int hdr_len, int data_len, 785 int hdr_len, int data_len,
786 unsigned int __nocast gfp_mask) 786 gfp_t gfp_mask)
787{ 787{
788 struct ib_mad_agent_private *mad_agent_priv; 788 struct ib_mad_agent_private *mad_agent_priv;
789 struct ib_mad_send_buf *send_buf; 789 struct ib_mad_send_buf *send_buf;
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 2bd8b1cc57c..e23836d0e21 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -412,8 +412,8 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
412 412
413 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); 413 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
414 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 414 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
415 pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 415 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
416 if (pad > data_size || pad < 0) 416 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
417 pad = 0; 417 pad = 0;
418 418
419 return hdr_size + rmpp_recv->seg_num * data_size - pad; 419 return hdr_size + rmpp_recv->seg_num * data_size - pad;
@@ -583,6 +583,7 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
583{ 583{
584 struct ib_rmpp_mad *rmpp_mad; 584 struct ib_rmpp_mad *rmpp_mad;
585 int timeout; 585 int timeout;
586 u32 paylen;
586 587
587 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 588 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
588 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 589 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
@@ -590,11 +591,9 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
590 591
591 if (mad_send_wr->seg_num == 1) { 592 if (mad_send_wr->seg_num == 1) {
592 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; 593 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
593 rmpp_mad->rmpp_hdr.paylen_newwin = 594 paylen = mad_send_wr->total_seg * IB_MGMT_RMPP_DATA -
594 cpu_to_be32(mad_send_wr->total_seg * 595 mad_send_wr->pad;
595 (sizeof(struct ib_rmpp_mad) - 596 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
596 offsetof(struct ib_rmpp_mad, data)) -
597 mad_send_wr->pad);
598 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad); 597 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
599 } else { 598 } else {
600 mad_send_wr->send_wr.num_sge = 2; 599 mad_send_wr->send_wr.num_sge = 2;
@@ -608,10 +607,8 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
608 607
609 if (mad_send_wr->seg_num == mad_send_wr->total_seg) { 608 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
610 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; 609 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
611 rmpp_mad->rmpp_hdr.paylen_newwin = 610 paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad;
612 cpu_to_be32(sizeof(struct ib_rmpp_mad) - 611 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
613 offsetof(struct ib_rmpp_mad, data) -
614 mad_send_wr->pad);
615 } 612 }
616 613
617 /* 2 seconds for an ACK until we can find the packet lifetime */ 614 /* 2 seconds for an ACK until we can find the packet lifetime */
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 78de2dd1a4f..262618210c1 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -574,7 +574,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
574int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 574int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
575 struct ib_sa_path_rec *rec, 575 struct ib_sa_path_rec *rec,
576 ib_sa_comp_mask comp_mask, 576 ib_sa_comp_mask comp_mask,
577 int timeout_ms, unsigned int __nocast gfp_mask, 577 int timeout_ms, gfp_t gfp_mask,
578 void (*callback)(int status, 578 void (*callback)(int status,
579 struct ib_sa_path_rec *resp, 579 struct ib_sa_path_rec *resp,
580 void *context), 580 void *context),
@@ -676,7 +676,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
676int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method, 676int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
677 struct ib_sa_service_rec *rec, 677 struct ib_sa_service_rec *rec,
678 ib_sa_comp_mask comp_mask, 678 ib_sa_comp_mask comp_mask,
679 int timeout_ms, unsigned int __nocast gfp_mask, 679 int timeout_ms, gfp_t gfp_mask,
680 void (*callback)(int status, 680 void (*callback)(int status,
681 struct ib_sa_service_rec *resp, 681 struct ib_sa_service_rec *resp,
682 void *context), 682 void *context),
@@ -759,7 +759,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
759 u8 method, 759 u8 method,
760 struct ib_sa_mcmember_rec *rec, 760 struct ib_sa_mcmember_rec *rec,
761 ib_sa_comp_mask comp_mask, 761 ib_sa_comp_mask comp_mask,
762 int timeout_ms, unsigned int __nocast gfp_mask, 762 int timeout_ms, gfp_t gfp_mask,
763 void (*callback)(int status, 763 void (*callback)(int status,
764 struct ib_sa_mcmember_rec *resp, 764 struct ib_sa_mcmember_rec *resp,
765 void *context), 765 void *context),
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7c2f03057dd..a64d6b4dcc1 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -334,10 +334,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
334 ret = -EINVAL; 334 ret = -EINVAL;
335 goto err_ah; 335 goto err_ah;
336 } 336 }
337 /* Validate that management class can support RMPP */ 337
338 /* Validate that the management class can support RMPP */
338 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 339 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
339 hdr_len = offsetof(struct ib_sa_mad, data); 340 hdr_len = offsetof(struct ib_sa_mad, data);
340 data_len = length; 341 data_len = length - hdr_len;
341 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 342 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
342 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { 343 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) {
343 hdr_len = offsetof(struct ib_vendor_mad, data); 344 hdr_len = offsetof(struct ib_vendor_mad, data);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b1897bed14a..cc124344dd2 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -69,6 +69,7 @@ struct ib_uverbs_event_file {
69 69
70struct ib_uverbs_file { 70struct ib_uverbs_file {
71 struct kref ref; 71 struct kref ref;
72 struct semaphore mutex;
72 struct ib_uverbs_device *device; 73 struct ib_uverbs_device *device;
73 struct ib_ucontext *ucontext; 74 struct ib_ucontext *ucontext;
74 struct ib_event_handler event_handler; 75 struct ib_event_handler event_handler;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e91ebde4648..562445165d2 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -76,8 +76,9 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
76 struct ib_uverbs_get_context_resp resp; 76 struct ib_uverbs_get_context_resp resp;
77 struct ib_udata udata; 77 struct ib_udata udata;
78 struct ib_device *ibdev = file->device->ib_dev; 78 struct ib_device *ibdev = file->device->ib_dev;
79 struct ib_ucontext *ucontext;
79 int i; 80 int i;
80 int ret = in_len; 81 int ret;
81 82
82 if (out_len < sizeof resp) 83 if (out_len < sizeof resp)
83 return -ENOSPC; 84 return -ENOSPC;
@@ -85,45 +86,56 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
85 if (copy_from_user(&cmd, buf, sizeof cmd)) 86 if (copy_from_user(&cmd, buf, sizeof cmd))
86 return -EFAULT; 87 return -EFAULT;
87 88
89 down(&file->mutex);
90
91 if (file->ucontext) {
92 ret = -EINVAL;
93 goto err;
94 }
95
88 INIT_UDATA(&udata, buf + sizeof cmd, 96 INIT_UDATA(&udata, buf + sizeof cmd,
89 (unsigned long) cmd.response + sizeof resp, 97 (unsigned long) cmd.response + sizeof resp,
90 in_len - sizeof cmd, out_len - sizeof resp); 98 in_len - sizeof cmd, out_len - sizeof resp);
91 99
92 file->ucontext = ibdev->alloc_ucontext(ibdev, &udata); 100 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
93 if (IS_ERR(file->ucontext)) { 101 if (IS_ERR(ucontext))
94 ret = PTR_ERR(file->ucontext); 102 return PTR_ERR(file->ucontext);
95 file->ucontext = NULL;
96 return ret;
97 }
98 103
99 file->ucontext->device = ibdev; 104 ucontext->device = ibdev;
100 INIT_LIST_HEAD(&file->ucontext->pd_list); 105 INIT_LIST_HEAD(&ucontext->pd_list);
101 INIT_LIST_HEAD(&file->ucontext->mr_list); 106 INIT_LIST_HEAD(&ucontext->mr_list);
102 INIT_LIST_HEAD(&file->ucontext->mw_list); 107 INIT_LIST_HEAD(&ucontext->mw_list);
103 INIT_LIST_HEAD(&file->ucontext->cq_list); 108 INIT_LIST_HEAD(&ucontext->cq_list);
104 INIT_LIST_HEAD(&file->ucontext->qp_list); 109 INIT_LIST_HEAD(&ucontext->qp_list);
105 INIT_LIST_HEAD(&file->ucontext->srq_list); 110 INIT_LIST_HEAD(&ucontext->srq_list);
106 INIT_LIST_HEAD(&file->ucontext->ah_list); 111 INIT_LIST_HEAD(&ucontext->ah_list);
107 spin_lock_init(&file->ucontext->lock);
108 112
109 resp.async_fd = file->async_file.fd; 113 resp.async_fd = file->async_file.fd;
110 for (i = 0; i < file->device->num_comp; ++i) 114 for (i = 0; i < file->device->num_comp; ++i)
111 if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab + 115 if (copy_to_user((void __user *) (unsigned long) cmd.cq_fd_tab +
112 i * sizeof (__u32), 116 i * sizeof (__u32),
113 &file->comp_file[i].fd, sizeof (__u32))) 117 &file->comp_file[i].fd, sizeof (__u32))) {
114 goto err; 118 ret = -EFAULT;
119 goto err_free;
120 }
115 121
116 if (copy_to_user((void __user *) (unsigned long) cmd.response, 122 if (copy_to_user((void __user *) (unsigned long) cmd.response,
117 &resp, sizeof resp)) 123 &resp, sizeof resp)) {
118 goto err; 124 ret = -EFAULT;
125 goto err_free;
126 }
127
128 file->ucontext = ucontext;
129 up(&file->mutex);
119 130
120 return in_len; 131 return in_len;
121 132
122err: 133err_free:
123 ibdev->dealloc_ucontext(file->ucontext); 134 ibdev->dealloc_ucontext(ucontext);
124 file->ucontext = NULL;
125 135
126 return -EFAULT; 136err:
137 up(&file->mutex);
138 return ret;
127} 139}
128 140
129ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 141ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
@@ -352,9 +364,9 @@ retry:
352 if (ret) 364 if (ret)
353 goto err_pd; 365 goto err_pd;
354 366
355 spin_lock_irq(&file->ucontext->lock); 367 down(&file->mutex);
356 list_add_tail(&uobj->list, &file->ucontext->pd_list); 368 list_add_tail(&uobj->list, &file->ucontext->pd_list);
357 spin_unlock_irq(&file->ucontext->lock); 369 up(&file->mutex);
358 370
359 memset(&resp, 0, sizeof resp); 371 memset(&resp, 0, sizeof resp);
360 resp.pd_handle = uobj->id; 372 resp.pd_handle = uobj->id;
@@ -368,9 +380,9 @@ retry:
368 return in_len; 380 return in_len;
369 381
370err_list: 382err_list:
371 spin_lock_irq(&file->ucontext->lock); 383 down(&file->mutex);
372 list_del(&uobj->list); 384 list_del(&uobj->list);
373 spin_unlock_irq(&file->ucontext->lock); 385 up(&file->mutex);
374 386
375 down(&ib_uverbs_idr_mutex); 387 down(&ib_uverbs_idr_mutex);
376 idr_remove(&ib_uverbs_pd_idr, uobj->id); 388 idr_remove(&ib_uverbs_pd_idr, uobj->id);
@@ -410,9 +422,9 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
410 422
411 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle); 423 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
412 424
413 spin_lock_irq(&file->ucontext->lock); 425 down(&file->mutex);
414 list_del(&uobj->list); 426 list_del(&uobj->list);
415 spin_unlock_irq(&file->ucontext->lock); 427 up(&file->mutex);
416 428
417 kfree(uobj); 429 kfree(uobj);
418 430
@@ -512,9 +524,9 @@ retry:
512 524
513 resp.mr_handle = obj->uobject.id; 525 resp.mr_handle = obj->uobject.id;
514 526
515 spin_lock_irq(&file->ucontext->lock); 527 down(&file->mutex);
516 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list); 528 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
517 spin_unlock_irq(&file->ucontext->lock); 529 up(&file->mutex);
518 530
519 if (copy_to_user((void __user *) (unsigned long) cmd.response, 531 if (copy_to_user((void __user *) (unsigned long) cmd.response,
520 &resp, sizeof resp)) { 532 &resp, sizeof resp)) {
@@ -527,9 +539,9 @@ retry:
527 return in_len; 539 return in_len;
528 540
529err_list: 541err_list:
530 spin_lock_irq(&file->ucontext->lock); 542 down(&file->mutex);
531 list_del(&obj->uobject.list); 543 list_del(&obj->uobject.list);
532 spin_unlock_irq(&file->ucontext->lock); 544 up(&file->mutex);
533 545
534err_unreg: 546err_unreg:
535 ib_dereg_mr(mr); 547 ib_dereg_mr(mr);
@@ -570,9 +582,9 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
570 582
571 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle); 583 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
572 584
573 spin_lock_irq(&file->ucontext->lock); 585 down(&file->mutex);
574 list_del(&memobj->uobject.list); 586 list_del(&memobj->uobject.list);
575 spin_unlock_irq(&file->ucontext->lock); 587 up(&file->mutex);
576 588
577 ib_umem_release(file->device->ib_dev, &memobj->umem); 589 ib_umem_release(file->device->ib_dev, &memobj->umem);
578 kfree(memobj); 590 kfree(memobj);
@@ -647,9 +659,9 @@ retry:
647 if (ret) 659 if (ret)
648 goto err_cq; 660 goto err_cq;
649 661
650 spin_lock_irq(&file->ucontext->lock); 662 down(&file->mutex);
651 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 663 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
652 spin_unlock_irq(&file->ucontext->lock); 664 up(&file->mutex);
653 665
654 memset(&resp, 0, sizeof resp); 666 memset(&resp, 0, sizeof resp);
655 resp.cq_handle = uobj->uobject.id; 667 resp.cq_handle = uobj->uobject.id;
@@ -664,9 +676,9 @@ retry:
664 return in_len; 676 return in_len;
665 677
666err_list: 678err_list:
667 spin_lock_irq(&file->ucontext->lock); 679 down(&file->mutex);
668 list_del(&uobj->uobject.list); 680 list_del(&uobj->uobject.list);
669 spin_unlock_irq(&file->ucontext->lock); 681 up(&file->mutex);
670 682
671 down(&ib_uverbs_idr_mutex); 683 down(&ib_uverbs_idr_mutex);
672 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 684 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
@@ -712,9 +724,9 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
712 724
713 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 725 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
714 726
715 spin_lock_irq(&file->ucontext->lock); 727 down(&file->mutex);
716 list_del(&uobj->uobject.list); 728 list_del(&uobj->uobject.list);
717 spin_unlock_irq(&file->ucontext->lock); 729 up(&file->mutex);
718 730
719 spin_lock_irq(&file->comp_file[0].lock); 731 spin_lock_irq(&file->comp_file[0].lock);
720 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { 732 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
@@ -847,9 +859,9 @@ retry:
847 859
848 resp.qp_handle = uobj->uobject.id; 860 resp.qp_handle = uobj->uobject.id;
849 861
850 spin_lock_irq(&file->ucontext->lock); 862 down(&file->mutex);
851 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 863 list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list);
852 spin_unlock_irq(&file->ucontext->lock); 864 up(&file->mutex);
853 865
854 if (copy_to_user((void __user *) (unsigned long) cmd.response, 866 if (copy_to_user((void __user *) (unsigned long) cmd.response,
855 &resp, sizeof resp)) { 867 &resp, sizeof resp)) {
@@ -862,9 +874,9 @@ retry:
862 return in_len; 874 return in_len;
863 875
864err_list: 876err_list:
865 spin_lock_irq(&file->ucontext->lock); 877 down(&file->mutex);
866 list_del(&uobj->uobject.list); 878 list_del(&uobj->uobject.list);
867 spin_unlock_irq(&file->ucontext->lock); 879 up(&file->mutex);
868 880
869err_destroy: 881err_destroy:
870 ib_destroy_qp(qp); 882 ib_destroy_qp(qp);
@@ -989,9 +1001,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
989 1001
990 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1002 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
991 1003
992 spin_lock_irq(&file->ucontext->lock); 1004 down(&file->mutex);
993 list_del(&uobj->uobject.list); 1005 list_del(&uobj->uobject.list);
994 spin_unlock_irq(&file->ucontext->lock); 1006 up(&file->mutex);
995 1007
996 spin_lock_irq(&file->async_file.lock); 1008 spin_lock_irq(&file->async_file.lock);
997 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 1009 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
@@ -1136,9 +1148,9 @@ retry:
1136 1148
1137 resp.srq_handle = uobj->uobject.id; 1149 resp.srq_handle = uobj->uobject.id;
1138 1150
1139 spin_lock_irq(&file->ucontext->lock); 1151 down(&file->mutex);
1140 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1152 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
1141 spin_unlock_irq(&file->ucontext->lock); 1153 up(&file->mutex);
1142 1154
1143 if (copy_to_user((void __user *) (unsigned long) cmd.response, 1155 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1144 &resp, sizeof resp)) { 1156 &resp, sizeof resp)) {
@@ -1151,9 +1163,9 @@ retry:
1151 return in_len; 1163 return in_len;
1152 1164
1153err_list: 1165err_list:
1154 spin_lock_irq(&file->ucontext->lock); 1166 down(&file->mutex);
1155 list_del(&uobj->uobject.list); 1167 list_del(&uobj->uobject.list);
1156 spin_unlock_irq(&file->ucontext->lock); 1168 up(&file->mutex);
1157 1169
1158err_destroy: 1170err_destroy:
1159 ib_destroy_srq(srq); 1171 ib_destroy_srq(srq);
@@ -1227,9 +1239,9 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1227 1239
1228 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1240 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1229 1241
1230 spin_lock_irq(&file->ucontext->lock); 1242 down(&file->mutex);
1231 list_del(&uobj->uobject.list); 1243 list_del(&uobj->uobject.list);
1232 spin_unlock_irq(&file->ucontext->lock); 1244 up(&file->mutex);
1233 1245
1234 spin_lock_irq(&file->async_file.lock); 1246 spin_lock_irq(&file->async_file.lock);
1235 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 1247 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index ce5bdb7af30..12511808de2 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -448,7 +448,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
448 if (hdr.in_words * 4 != count) 448 if (hdr.in_words * 4 != count)
449 return -EINVAL; 449 return -EINVAL;
450 450
451 if (hdr.command < 0 || hdr.command >= ARRAY_SIZE(uverbs_cmd_table)) 451 if (hdr.command < 0 ||
452 hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
453 !uverbs_cmd_table[hdr.command])
452 return -EINVAL; 454 return -EINVAL;
453 455
454 if (!file->ucontext && 456 if (!file->ucontext &&
@@ -484,27 +486,29 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
484 file = kmalloc(sizeof *file + 486 file = kmalloc(sizeof *file +
485 (dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file), 487 (dev->num_comp - 1) * sizeof (struct ib_uverbs_event_file),
486 GFP_KERNEL); 488 GFP_KERNEL);
487 if (!file) 489 if (!file) {
488 return -ENOMEM; 490 ret = -ENOMEM;
491 goto err;
492 }
489 493
490 file->device = dev; 494 file->device = dev;
491 kref_init(&file->ref); 495 kref_init(&file->ref);
496 init_MUTEX(&file->mutex);
492 497
493 file->ucontext = NULL; 498 file->ucontext = NULL;
494 499
500 kref_get(&file->ref);
495 ret = ib_uverbs_event_init(&file->async_file, file); 501 ret = ib_uverbs_event_init(&file->async_file, file);
496 if (ret) 502 if (ret)
497 goto err; 503 goto err_kref;
498 504
499 file->async_file.is_async = 1; 505 file->async_file.is_async = 1;
500 506
501 kref_get(&file->ref);
502
503 for (i = 0; i < dev->num_comp; ++i) { 507 for (i = 0; i < dev->num_comp; ++i) {
508 kref_get(&file->ref);
504 ret = ib_uverbs_event_init(&file->comp_file[i], file); 509 ret = ib_uverbs_event_init(&file->comp_file[i], file);
505 if (ret) 510 if (ret)
506 goto err_async; 511 goto err_async;
507 kref_get(&file->ref);
508 file->comp_file[i].is_async = 0; 512 file->comp_file[i].is_async = 0;
509 } 513 }
510 514
@@ -524,9 +528,16 @@ err_async:
524 528
525 ib_uverbs_event_release(&file->async_file); 529 ib_uverbs_event_release(&file->async_file);
526 530
527err: 531err_kref:
532 /*
533 * One extra kref_put() because we took a reference before the
534 * event file creation that failed and got us here.
535 */
536 kref_put(&file->ref, ib_uverbs_release_file);
528 kref_put(&file->ref, ib_uverbs_release_file); 537 kref_put(&file->ref, ib_uverbs_release_file);
529 538
539err:
540 module_put(dev->ib_dev->owner);
530 return ret; 541 return ret;
531} 542}
532 543
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index cc758a2d2bc..f6a8ac02655 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -605,7 +605,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
605 err = -EINVAL; 605 err = -EINVAL;
606 goto out; 606 goto out;
607 } 607 }
608 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { 608 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) {
609 if (virt != -1) { 609 if (virt != -1) {
610 pages[nent * 2] = cpu_to_be64(virt); 610 pages[nent * 2] = cpu_to_be64(virt);
611 virt += 1 << lg; 611 virt += 1 << lg;
@@ -616,7 +616,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
616 ts += 1 << (lg - 10); 616 ts += 1 << (lg - 10);
617 ++tc; 617 ++tc;
618 618
619 if (nent == MTHCA_MAILBOX_SIZE / 16) { 619 if (++nent == MTHCA_MAILBOX_SIZE / 16) {
620 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, 620 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
621 CMD_TIME_CLASS_B, status); 621 CMD_TIME_CLASS_B, status);
622 if (err || *status) 622 if (err || *status)
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 18f0981eb0c..c81fa8e975e 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -476,12 +476,8 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
476 int i; 476 int i;
477 u8 status; 477 u8 status;
478 478
479 /* Make sure EQ size is aligned to a power of 2 size. */ 479 eq->dev = dev;
480 for (i = 1; i < nent; i <<= 1) 480 eq->nent = roundup_pow_of_two(max(nent, 2));
481 ; /* nothing */
482 nent = i;
483
484 eq->dev = dev;
485 481
486 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 482 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
487 GFP_KERNEL); 483 GFP_KERNEL);
@@ -512,7 +508,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
512 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 508 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
513 } 509 }
514 510
515 for (i = 0; i < nent; ++i) 511 for (i = 0; i < eq->nent; ++i)
516 set_eqe_hw(get_eqe(eq, i)); 512 set_eqe_hw(get_eqe(eq, i));
517 513
518 eq->eqn = mthca_alloc(&dev->eq_table.alloc); 514 eq->eqn = mthca_alloc(&dev->eq_table.alloc);
@@ -528,8 +524,6 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
528 if (err) 524 if (err)
529 goto err_out_free_eq; 525 goto err_out_free_eq;
530 526
531 eq->nent = nent;
532
533 memset(eq_context, 0, sizeof *eq_context); 527 memset(eq_context, 0, sizeof *eq_context);
534 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK | 528 eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
535 MTHCA_EQ_OWNER_HW | 529 MTHCA_EQ_OWNER_HW |
@@ -538,7 +532,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
538 if (mthca_is_memfree(dev)) 532 if (mthca_is_memfree(dev))
539 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL); 533 eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
540 534
541 eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); 535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
542 if (mthca_is_memfree(dev)) { 536 if (mthca_is_memfree(dev)) {
543 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num); 537 eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
544 } else { 538 } else {
@@ -569,7 +563,7 @@ static int __devinit mthca_create_eq(struct mthca_dev *dev,
569 dev->eq_table.arm_mask |= eq->eqn_mask; 563 dev->eq_table.arm_mask |= eq->eqn_mask;
570 564
571 mthca_dbg(dev, "Allocated EQ %d with %d entries\n", 565 mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
572 eq->eqn, nent); 566 eq->eqn, eq->nent);
573 567
574 return err; 568 return err;
575 569
@@ -842,7 +836,7 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
842 dev->eq_table.clr_mask = 836 dev->eq_table.clr_mask =
843 swab32(1 << (dev->eq_table.inta_pin & 31)); 837 swab32(1 << (dev->eq_table.inta_pin & 31));
844 dev->eq_table.clr_int = dev->clr_base + 838 dev->eq_table.clr_int = dev->clr_base +
845 (dev->eq_table.inta_pin < 31 ? 4 : 0); 839 (dev->eq_table.inta_pin < 32 ? 4 : 0);
846 } 840 }
847 841
848 dev->eq_table.arm_mask = 0; 842 dev->eq_table.arm_mask = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index ffbcd40418d..23a3f56c789 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -503,6 +503,25 @@ err_free_aux:
503 return err; 503 return err;
504} 504}
505 505
506static void mthca_free_icms(struct mthca_dev *mdev)
507{
508 u8 status;
509
510 mthca_free_icm_table(mdev, mdev->mcg_table.table);
511 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
512 mthca_free_icm_table(mdev, mdev->srq_table.table);
513 mthca_free_icm_table(mdev, mdev->cq_table.table);
514 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
515 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
516 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
517 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
518 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
519 mthca_unmap_eq_icm(mdev);
520
521 mthca_UNMAP_ICM_AUX(mdev, &status);
522 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
523}
524
506static int __devinit mthca_init_arbel(struct mthca_dev *mdev) 525static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
507{ 526{
508 struct mthca_dev_lim dev_lim; 527 struct mthca_dev_lim dev_lim;
@@ -580,18 +599,7 @@ static int __devinit mthca_init_arbel(struct mthca_dev *mdev)
580 return 0; 599 return 0;
581 600
582err_free_icm: 601err_free_icm:
583 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 602 mthca_free_icms(mdev);
584 mthca_free_icm_table(mdev, mdev->srq_table.table);
585 mthca_free_icm_table(mdev, mdev->cq_table.table);
586 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
587 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
588 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
589 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
590 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
591 mthca_unmap_eq_icm(mdev);
592
593 mthca_UNMAP_ICM_AUX(mdev, &status);
594 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
595 603
596err_stop_fw: 604err_stop_fw:
597 mthca_UNMAP_FA(mdev, &status); 605 mthca_UNMAP_FA(mdev, &status);
@@ -611,18 +619,7 @@ static void mthca_close_hca(struct mthca_dev *mdev)
611 mthca_CLOSE_HCA(mdev, 0, &status); 619 mthca_CLOSE_HCA(mdev, 0, &status);
612 620
613 if (mthca_is_memfree(mdev)) { 621 if (mthca_is_memfree(mdev)) {
614 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 622 mthca_free_icms(mdev);
615 mthca_free_icm_table(mdev, mdev->srq_table.table);
616 mthca_free_icm_table(mdev, mdev->cq_table.table);
617 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
618 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
619 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
620 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
621 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
622 mthca_unmap_eq_icm(mdev);
623
624 mthca_UNMAP_ICM_AUX(mdev, &status);
625 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
626 623
627 mthca_UNMAP_FA(mdev, &status); 624 mthca_UNMAP_FA(mdev, &status);
628 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); 625 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 1827400f189..7bd7a4bec7b 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -290,7 +290,7 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
290 int i; 290 int i;
291 u8 status; 291 u8 status;
292 292
293 num_icm = obj_size * nobj / MTHCA_TABLE_CHUNK_SIZE; 293 num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;
294 294
295 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); 295 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
296 if (!table) 296 if (!table)
@@ -529,12 +529,25 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
529 goto found; 529 goto found;
530 } 530 }
531 531
532 for (i = start; i != end; i += dir)
533 if (!dev->db_tab->page[i].db_rec) {
534 page = dev->db_tab->page + i;
535 goto alloc;
536 }
537
532 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { 538 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
533 ret = -ENOMEM; 539 ret = -ENOMEM;
534 goto out; 540 goto out;
535 } 541 }
536 542
543 if (group == 0)
544 ++dev->db_tab->max_group1;
545 else
546 --dev->db_tab->min_group2;
547
537 page = dev->db_tab->page + end; 548 page = dev->db_tab->page + end;
549
550alloc:
538 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096, 551 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096,
539 &page->mapping, GFP_KERNEL); 552 &page->mapping, GFP_KERNEL);
540 if (!page->db_rec) { 553 if (!page->db_rec) {
@@ -554,10 +567,6 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, __be32 **db)
554 } 567 }
555 568
556 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); 569 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
557 if (group == 0)
558 ++dev->db_tab->max_group1;
559 else
560 --dev->db_tab->min_group2;
561 570
562found: 571found:
563 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); 572 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1c1c2e23087..3f5319a4657 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -84,7 +84,7 @@ static int mthca_query_device(struct ib_device *ibdev,
84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
85 0xffffff; 85 0xffffff;
86 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30)); 86 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
87 props->hw_ver = be16_to_cpup((__be16 *) (out_mad->data + 32)); 87 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
88 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 88 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
89 memcpy(&props->node_guid, out_mad->data + 12, 8); 89 memcpy(&props->node_guid, out_mad->data + 12, 8);
90 90
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index bcef06bf15e..5fa00669f9b 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -227,7 +227,6 @@ static void mthca_wq_init(struct mthca_wq *wq)
227 wq->last_comp = wq->max - 1; 227 wq->last_comp = wq->max - 1;
228 wq->head = 0; 228 wq->head = 0;
229 wq->tail = 0; 229 wq->tail = 0;
230 wq->last = NULL;
231} 230}
232 231
233void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 232void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
@@ -687,7 +686,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
687 } 686 }
688 687
689 if (attr_mask & IB_QP_TIMEOUT) { 688 if (attr_mask & IB_QP_TIMEOUT) {
690 qp_context->pri_path.ackto = attr->timeout; 689 qp_context->pri_path.ackto = attr->timeout << 3;
691 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 690 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
692 } 691 }
693 692
@@ -1103,6 +1102,9 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1103 } 1102 }
1104 } 1103 }
1105 1104
1105 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1106 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1107
1106 return 0; 1108 return 0;
1107} 1109}
1108 1110
@@ -1583,15 +1585,13 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1583 goto out; 1585 goto out;
1584 } 1586 }
1585 1587
1586 if (prev_wqe) { 1588 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1587 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1589 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1588 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1590 qp->send_wqe_offset) |
1589 qp->send_wqe_offset) | 1591 mthca_opcode[wr->opcode]);
1590 mthca_opcode[wr->opcode]); 1592 wmb();
1591 wmb(); 1593 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1592 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1594 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1593 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1594 }
1595 1595
1596 if (!size0) { 1596 if (!size0) {
1597 size0 = size; 1597 size0 = size;
@@ -1688,13 +1688,11 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1688 1688
1689 qp->wrid[ind] = wr->wr_id; 1689 qp->wrid[ind] = wr->wr_id;
1690 1690
1691 if (likely(prev_wqe)) { 1691 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1692 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1692 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1693 cpu_to_be32((ind << qp->rq.wqe_shift) | 1); 1693 wmb();
1694 wmb(); 1694 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1695 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1695 cpu_to_be32(MTHCA_NEXT_DBD | size);
1696 cpu_to_be32(MTHCA_NEXT_DBD | size);
1697 }
1698 1696
1699 if (!size0) 1697 if (!size0)
1700 size0 = size; 1698 size0 = size;
@@ -1905,15 +1903,13 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1905 goto out; 1903 goto out;
1906 } 1904 }
1907 1905
1908 if (likely(prev_wqe)) { 1906 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1909 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1907 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1910 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1908 qp->send_wqe_offset) |
1911 qp->send_wqe_offset) | 1909 mthca_opcode[wr->opcode]);
1912 mthca_opcode[wr->opcode]); 1910 wmb();
1913 wmb(); 1911 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1914 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1912 cpu_to_be32(MTHCA_NEXT_DBD | size);
1915 cpu_to_be32(MTHCA_NEXT_DBD | size);
1916 }
1917 1913
1918 if (!size0) { 1914 if (!size0) {
1919 size0 = size; 1915 size0 = size;
@@ -2127,5 +2123,6 @@ void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
2127 for (i = 0; i < 2; ++i) 2123 for (i = 0; i < 2; ++i)
2128 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2124 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2129 2125
2126 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2130 mthca_alloc_cleanup(&dev->qp_table.alloc); 2127 mthca_alloc_cleanup(&dev->qp_table.alloc);
2131} 2128}
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 75cd2d84ef1..18998d48c53 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -172,6 +172,8 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 172 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
173 } 173 }
174 174
175 srq->last = get_wqe(srq, srq->max - 1);
176
175 return 0; 177 return 0;
176} 178}
177 179
@@ -189,7 +191,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
189 191
190 srq->max = attr->max_wr; 192 srq->max = attr->max_wr;
191 srq->max_gs = attr->max_sge; 193 srq->max_gs = attr->max_sge;
192 srq->last = NULL;
193 srq->counter = 0; 194 srq->counter = 0;
194 195
195 if (mthca_is_memfree(dev)) 196 if (mthca_is_memfree(dev))
@@ -409,7 +410,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
409 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 410 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
410 err = -ENOMEM; 411 err = -ENOMEM;
411 *bad_wr = wr; 412 *bad_wr = wr;
412 return nreq; 413 break;
413 } 414 }
414 415
415 wqe = get_wqe(srq, ind); 416 wqe = get_wqe(srq, ind);
@@ -427,7 +428,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
427 err = -EINVAL; 428 err = -EINVAL;
428 *bad_wr = wr; 429 *bad_wr = wr;
429 srq->last = prev_wqe; 430 srq->last = prev_wqe;
430 return nreq; 431 break;
431 } 432 }
432 433
433 for (i = 0; i < wr->num_sge; ++i) { 434 for (i = 0; i < wr->num_sge; ++i) {
@@ -446,20 +447,16 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
446 ((struct mthca_data_seg *) wqe)->addr = 0; 447 ((struct mthca_data_seg *) wqe)->addr = 0;
447 } 448 }
448 449
449 if (likely(prev_wqe)) { 450 ((struct mthca_next_seg *) prev_wqe)->nda_op =
450 ((struct mthca_next_seg *) prev_wqe)->nda_op = 451 cpu_to_be32((ind << srq->wqe_shift) | 1);
451 cpu_to_be32((ind << srq->wqe_shift) | 1); 452 wmb();
452 wmb(); 453 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
453 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 454 cpu_to_be32(MTHCA_NEXT_DBD);
454 cpu_to_be32(MTHCA_NEXT_DBD);
455 }
456 455
457 srq->wrid[ind] = wr->wr_id; 456 srq->wrid[ind] = wr->wr_id;
458 srq->first_free = next_ind; 457 srq->first_free = next_ind;
459 } 458 }
460 459
461 return nreq;
462
463 if (likely(nreq)) { 460 if (likely(nreq)) {
464 __be32 doorbell[2]; 461 __be32 doorbell[2];
465 462
@@ -503,7 +500,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
503 mthca_err(dev, "SRQ %06x full\n", srq->srqn); 500 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
504 err = -ENOMEM; 501 err = -ENOMEM;
505 *bad_wr = wr; 502 *bad_wr = wr;
506 return nreq; 503 break;
507 } 504 }
508 505
509 wqe = get_wqe(srq, ind); 506 wqe = get_wqe(srq, ind);
@@ -519,7 +516,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
519 if (unlikely(wr->num_sge > srq->max_gs)) { 516 if (unlikely(wr->num_sge > srq->max_gs)) {
520 err = -EINVAL; 517 err = -EINVAL;
521 *bad_wr = wr; 518 *bad_wr = wr;
522 return nreq; 519 break;
523 } 520 }
524 521
525 for (i = 0; i < wr->num_sge; ++i) { 522 for (i = 0; i < wr->num_sge; ++i) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index bea960b8191..4ea1c1ca85b 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -257,7 +257,7 @@ void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
257 257
258void ipoib_mcast_restart_task(void *dev_ptr); 258void ipoib_mcast_restart_task(void *dev_ptr);
259int ipoib_mcast_start_thread(struct net_device *dev); 259int ipoib_mcast_start_thread(struct net_device *dev);
260int ipoib_mcast_stop_thread(struct net_device *dev); 260int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
261 261
262void ipoib_mcast_dev_down(struct net_device *dev); 262void ipoib_mcast_dev_down(struct net_device *dev);
263void ipoib_mcast_dev_flush(struct net_device *dev); 263void ipoib_mcast_dev_flush(struct net_device *dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ef0e3894863..f7440096b5e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -432,7 +432,7 @@ int ipoib_ib_dev_down(struct net_device *dev)
432 flush_workqueue(ipoib_workqueue); 432 flush_workqueue(ipoib_workqueue);
433 } 433 }
434 434
435 ipoib_mcast_stop_thread(dev); 435 ipoib_mcast_stop_thread(dev, 1);
436 436
437 /* 437 /*
438 * Flush the multicast groups first so we stop any multicast joins. The 438 * Flush the multicast groups first so we stop any multicast joins. The
@@ -599,7 +599,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
599 599
600 ipoib_dbg(priv, "cleaning up ib_dev\n"); 600 ipoib_dbg(priv, "cleaning up ib_dev\n");
601 601
602 ipoib_mcast_stop_thread(dev); 602 ipoib_mcast_stop_thread(dev, 1);
603 603
604 /* Delete the broadcast address and the local address */ 604 /* Delete the broadcast address and the local address */
605 ipoib_mcast_dev_down(dev); 605 ipoib_mcast_dev_down(dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 49d120d2b92..6c5bf07489f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -474,7 +474,7 @@ err:
474 spin_unlock(&priv->lock); 474 spin_unlock(&priv->lock);
475} 475}
476 476
477static void path_lookup(struct sk_buff *skb, struct net_device *dev) 477static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
478{ 478{
479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev); 479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
480 480
@@ -569,7 +569,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
569 569
570 if (skb->dst && skb->dst->neighbour) { 570 if (skb->dst && skb->dst->neighbour) {
571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
572 path_lookup(skb, dev); 572 ipoib_path_lookup(skb, dev);
573 goto out; 573 goto out;
574 } 574 }
575 575
@@ -1005,6 +1005,7 @@ debug_failed:
1005 1005
1006register_failed: 1006register_failed:
1007 ib_unregister_event_handler(&priv->event_handler); 1007 ib_unregister_event_handler(&priv->event_handler);
1008 flush_scheduled_work();
1008 1009
1009event_failed: 1010event_failed:
1010 ipoib_dev_cleanup(priv->dev); 1011 ipoib_dev_cleanup(priv->dev);
@@ -1057,6 +1058,7 @@ static void ipoib_remove_one(struct ib_device *device)
1057 1058
1058 list_for_each_entry_safe(priv, tmp, dev_list, list) { 1059 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1059 ib_unregister_event_handler(&priv->event_handler); 1060 ib_unregister_event_handler(&priv->event_handler);
1061 flush_scheduled_work();
1060 1062
1061 unregister_netdev(priv->dev); 1063 unregister_netdev(priv->dev);
1062 ipoib_dev_cleanup(priv->dev); 1064 ipoib_dev_cleanup(priv->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index aca7aea18a6..36ce29836bf 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -145,7 +145,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
145 145
146 mcast->dev = dev; 146 mcast->dev = dev;
147 mcast->created = jiffies; 147 mcast->created = jiffies;
148 mcast->backoff = HZ; 148 mcast->backoff = 1;
149 mcast->logcount = 0; 149 mcast->logcount = 0;
150 150
151 INIT_LIST_HEAD(&mcast->list); 151 INIT_LIST_HEAD(&mcast->list);
@@ -396,7 +396,7 @@ static void ipoib_mcast_join_complete(int status,
396 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 396 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
397 397
398 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) { 398 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
399 mcast->backoff = HZ; 399 mcast->backoff = 1;
400 down(&mcast_mutex); 400 down(&mcast_mutex);
401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 401 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
402 queue_work(ipoib_workqueue, &priv->mcast_task); 402 queue_work(ipoib_workqueue, &priv->mcast_task);
@@ -496,7 +496,7 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
496 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) 496 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
497 queue_delayed_work(ipoib_workqueue, 497 queue_delayed_work(ipoib_workqueue,
498 &priv->mcast_task, 498 &priv->mcast_task,
499 mcast->backoff); 499 mcast->backoff * HZ);
500 up(&mcast_mutex); 500 up(&mcast_mutex);
501 } else 501 } else
502 mcast->query_id = ret; 502 mcast->query_id = ret;
@@ -598,7 +598,7 @@ int ipoib_mcast_start_thread(struct net_device *dev)
598 return 0; 598 return 0;
599} 599}
600 600
601int ipoib_mcast_stop_thread(struct net_device *dev) 601int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
602{ 602{
603 struct ipoib_dev_priv *priv = netdev_priv(dev); 603 struct ipoib_dev_priv *priv = netdev_priv(dev);
604 struct ipoib_mcast *mcast; 604 struct ipoib_mcast *mcast;
@@ -610,7 +610,8 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
610 cancel_delayed_work(&priv->mcast_task); 610 cancel_delayed_work(&priv->mcast_task);
611 up(&mcast_mutex); 611 up(&mcast_mutex);
612 612
613 flush_workqueue(ipoib_workqueue); 613 if (flush)
614 flush_workqueue(ipoib_workqueue);
614 615
615 if (priv->broadcast && priv->broadcast->query) { 616 if (priv->broadcast && priv->broadcast->query) {
616 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query); 617 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
@@ -832,7 +833,7 @@ void ipoib_mcast_restart_task(void *dev_ptr)
832 833
833 ipoib_dbg_mcast(priv, "restarting multicast task\n"); 834 ipoib_dbg_mcast(priv, "restarting multicast task\n");
834 835
835 ipoib_mcast_stop_thread(dev); 836 ipoib_mcast_stop_thread(dev, 0);
836 837
837 spin_lock_irqsave(&priv->lock, flags); 838 spin_lock_irqsave(&priv->lock, flags);
838 839