aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 13:33:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 13:33:42 -0400
commit0c2fe82a9b106f1c03719783134360586d718a69 (patch)
treeec1a18ca49f1e6724ef3a93727f5f73b7df61931 /drivers/infiniband/hw
parent5f0e685f316a1de6d3af8b23eaf46651faca32ab (diff)
parentf0e88aeb19dac00ed2e09fd4d39ee65f32d5e968 (diff)
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA changes for the 3.4 merge window from Roland Dreier: "Nothing big really stands out; by patch count lots of fixes to the mlx4 driver plus some cleanups and fixes to the core and other drivers." * tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (28 commits) mlx4_core: Scale size of MTT table with system RAM mlx4_core: Allow dynamic MTU configuration for IB ports IB/mlx4: Fix info returned when querying IBoE ports IB/mlx4: Fix possible missed completion event mlx4_core: Report thermal error events mlx4_core: Fix one more static exported function IB: Change CQE "csum_ok" field to a bit flag RDMA/iwcm: Reject connect requests if cmid is not in LISTEN state RDMA/cxgb3: Don't pass irq flags to flush_qp() mlx4_core: Get rid of redundant ext_port_cap flags RDMA/ucma: Fix AB-BA deadlock IB/ehca: Fix ilog2() compile failure IB: Use central enum for speed instead of hard-coded values IB/iser: Post initial receive buffers before sending the final login request IB/iser: Free IB connection resources in the proper place IB/srp: Consolidate repetitive sysfs code IB/srp: Use pr_fmt() and pr_err()/pr_warn() IB/core: Fix SDR rates in sysfs mlx4: Enforce device max FMR maps in FMR alloc IB/mlx4: Set bad_wr for invalid send opcode ...
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c40
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c130
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c3
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c39
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib.h10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c107
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c1
20 files changed, 230 insertions, 150 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 12f923d64e42..07eb3a8067d8 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -94,7 +94,7 @@ static int c2_query_port(struct ib_device *ibdev,
94 props->pkey_tbl_len = 1; 94 props->pkey_tbl_len = 1;
95 props->qkey_viol_cntr = 0; 95 props->qkey_viol_cntr = 0;
96 props->active_width = 1; 96 props->active_width = 1;
97 props->active_speed = 1; 97 props->active_speed = IB_SPEED_SDR;
98 98
99 return 0; 99 return 0;
100} 100}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 37c224fc3ad9..0bdf09aa6f42 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1227,7 +1227,7 @@ static int iwch_query_port(struct ib_device *ibdev,
1227 props->gid_tbl_len = 1; 1227 props->gid_tbl_len = 1;
1228 props->pkey_tbl_len = 1; 1228 props->pkey_tbl_len = 1;
1229 props->active_width = 2; 1229 props->active_width = 2;
1230 props->active_speed = 2; 1230 props->active_speed = IB_SPEED_DDR;
1231 props->max_msg_sz = -1; 1231 props->max_msg_sz = -1;
1232 1232
1233 return 0; 1233 return 0;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index bea5839d89ee..6de8463f453b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
803 * Assumes qhp lock is held. 803 * Assumes qhp lock is held.
804 */ 804 */
805static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, 805static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
806 struct iwch_cq *schp, unsigned long *flag) 806 struct iwch_cq *schp)
807{ 807{
808 int count; 808 int count;
809 int flushed; 809 int flushed;
@@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
812 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); 812 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
813 /* take a ref on the qhp since we must release the lock */ 813 /* take a ref on the qhp since we must release the lock */
814 atomic_inc(&qhp->refcnt); 814 atomic_inc(&qhp->refcnt);
815 spin_unlock_irqrestore(&qhp->lock, *flag); 815 spin_unlock(&qhp->lock);
816 816
817 /* locking hierarchy: cq lock first, then qp lock. */ 817 /* locking hierarchy: cq lock first, then qp lock. */
818 spin_lock_irqsave(&rchp->lock, *flag); 818 spin_lock(&rchp->lock);
819 spin_lock(&qhp->lock); 819 spin_lock(&qhp->lock);
820 cxio_flush_hw_cq(&rchp->cq); 820 cxio_flush_hw_cq(&rchp->cq);
821 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); 821 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
822 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); 822 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
823 spin_unlock(&qhp->lock); 823 spin_unlock(&qhp->lock);
824 spin_unlock_irqrestore(&rchp->lock, *flag); 824 spin_unlock(&rchp->lock);
825 if (flushed) { 825 if (flushed) {
826 spin_lock_irqsave(&rchp->comp_handler_lock, *flag); 826 spin_lock(&rchp->comp_handler_lock);
827 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 827 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
828 spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); 828 spin_unlock(&rchp->comp_handler_lock);
829 } 829 }
830 830
831 /* locking hierarchy: cq lock first, then qp lock. */ 831 /* locking hierarchy: cq lock first, then qp lock. */
832 spin_lock_irqsave(&schp->lock, *flag); 832 spin_lock(&schp->lock);
833 spin_lock(&qhp->lock); 833 spin_lock(&qhp->lock);
834 cxio_flush_hw_cq(&schp->cq); 834 cxio_flush_hw_cq(&schp->cq);
835 cxio_count_scqes(&schp->cq, &qhp->wq, &count); 835 cxio_count_scqes(&schp->cq, &qhp->wq, &count);
836 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); 836 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
837 spin_unlock(&qhp->lock); 837 spin_unlock(&qhp->lock);
838 spin_unlock_irqrestore(&schp->lock, *flag); 838 spin_unlock(&schp->lock);
839 if (flushed) { 839 if (flushed) {
840 spin_lock_irqsave(&schp->comp_handler_lock, *flag); 840 spin_lock(&schp->comp_handler_lock);
841 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); 841 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
842 spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); 842 spin_unlock(&schp->comp_handler_lock);
843 } 843 }
844 844
845 /* deref */ 845 /* deref */
846 if (atomic_dec_and_test(&qhp->refcnt)) 846 if (atomic_dec_and_test(&qhp->refcnt))
847 wake_up(&qhp->wait); 847 wake_up(&qhp->wait);
848 848
849 spin_lock_irqsave(&qhp->lock, *flag); 849 spin_lock(&qhp->lock);
850} 850}
851 851
852static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) 852static void flush_qp(struct iwch_qp *qhp)
853{ 853{
854 struct iwch_cq *rchp, *schp; 854 struct iwch_cq *rchp, *schp;
855 855
@@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
859 if (qhp->ibqp.uobject) { 859 if (qhp->ibqp.uobject) {
860 cxio_set_wq_in_error(&qhp->wq); 860 cxio_set_wq_in_error(&qhp->wq);
861 cxio_set_cq_in_error(&rchp->cq); 861 cxio_set_cq_in_error(&rchp->cq);
862 spin_lock_irqsave(&rchp->comp_handler_lock, *flag); 862 spin_lock(&rchp->comp_handler_lock);
863 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); 863 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
864 spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); 864 spin_unlock(&rchp->comp_handler_lock);
865 if (schp != rchp) { 865 if (schp != rchp) {
866 cxio_set_cq_in_error(&schp->cq); 866 cxio_set_cq_in_error(&schp->cq);
867 spin_lock_irqsave(&schp->comp_handler_lock, *flag); 867 spin_lock(&schp->comp_handler_lock);
868 (*schp->ibcq.comp_handler)(&schp->ibcq, 868 (*schp->ibcq.comp_handler)(&schp->ibcq,
869 schp->ibcq.cq_context); 869 schp->ibcq.cq_context);
870 spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); 870 spin_unlock(&schp->comp_handler_lock);
871 } 871 }
872 return; 872 return;
873 } 873 }
874 __flush_qp(qhp, rchp, schp, flag); 874 __flush_qp(qhp, rchp, schp);
875} 875}
876 876
877 877
@@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
1030 break; 1030 break;
1031 case IWCH_QP_STATE_ERROR: 1031 case IWCH_QP_STATE_ERROR:
1032 qhp->attr.state = IWCH_QP_STATE_ERROR; 1032 qhp->attr.state = IWCH_QP_STATE_ERROR;
1033 flush_qp(qhp, &flag); 1033 flush_qp(qhp);
1034 break; 1034 break;
1035 default: 1035 default:
1036 ret = -EINVAL; 1036 ret = -EINVAL;
@@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
1078 } 1078 }
1079 switch (attrs->next_state) { 1079 switch (attrs->next_state) {
1080 case IWCH_QP_STATE_IDLE: 1080 case IWCH_QP_STATE_IDLE:
1081 flush_qp(qhp, &flag); 1081 flush_qp(qhp);
1082 qhp->attr.state = IWCH_QP_STATE_IDLE; 1082 qhp->attr.state = IWCH_QP_STATE_IDLE;
1083 qhp->attr.llp_stream_handle = NULL; 1083 qhp->attr.llp_stream_handle = NULL;
1084 put_ep(&qhp->ep->com); 1084 put_ep(&qhp->ep->com);
@@ -1132,7 +1132,7 @@ err:
1132 free=1; 1132 free=1;
1133 wake_up(&qhp->wait); 1133 wake_up(&qhp->wait);
1134 BUG_ON(!ep); 1134 BUG_ON(!ep);
1135 flush_qp(qhp, &flag); 1135 flush_qp(qhp);
1136out: 1136out:
1137 spin_unlock_irqrestore(&qhp->lock, flag); 1137 spin_unlock_irqrestore(&qhp->lock, flag);
1138 1138
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0cf61554f176..92b4c2b0308b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1114,7 +1114,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1114 * generated when moving QP to RTS state. 1114 * generated when moving QP to RTS state.
1115 * A TERM message will be sent after QP has moved to RTS state 1115 * A TERM message will be sent after QP has moved to RTS state
1116 */ 1116 */
1117 if ((ep->mpa_attr.version == 2) && 1117 if ((ep->mpa_attr.version == 2) && peer2peer &&
1118 (ep->mpa_attr.p2p_type != p2p_type)) { 1118 (ep->mpa_attr.p2p_type != p2p_type)) {
1119 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1119 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1120 rtr_mismatch = 1; 1120 rtr_mismatch = 1;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 247fe706e7fa..be1c18f44400 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -329,7 +329,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
329 props->gid_tbl_len = 1; 329 props->gid_tbl_len = 1;
330 props->pkey_tbl_len = 1; 330 props->pkey_tbl_len = 1;
331 props->active_width = 2; 331 props->active_width = 2;
332 props->active_speed = 2; 332 props->active_speed = IB_SPEED_DDR;
333 props->max_msg_sz = -1; 333 props->max_msg_sz = -1;
334 334
335 return 0; 335 return 0;
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 73edc3668663..9ed4d2588304 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -233,7 +233,7 @@ int ehca_query_port(struct ib_device *ibdev,
233 props->phys_state = 5; 233 props->phys_state = 5;
234 props->state = rblock->state; 234 props->state = rblock->state;
235 props->active_width = IB_WIDTH_12X; 235 props->active_width = IB_WIDTH_12X;
236 props->active_speed = 0x1; 236 props->active_speed = IB_SPEED_SDR;
237 } 237 }
238 238
239query_port1: 239query_port1:
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index e571e60ecb88..53589000fd07 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -786,7 +786,8 @@ static struct task_struct *create_comp_task(struct ehca_comp_pool *pool,
786 spin_lock_init(&cct->task_lock); 786 spin_lock_init(&cct->task_lock);
787 INIT_LIST_HEAD(&cct->cq_list); 787 INIT_LIST_HEAD(&cct->cq_list);
788 init_waitqueue_head(&cct->wait_queue); 788 init_waitqueue_head(&cct->wait_queue);
789 cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu); 789 cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu),
790 "ehca_comp/%d", cpu);
790 791
791 return cct->task; 792 return cct->task;
792} 793}
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 43cae84005f0..b781b2cb0624 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -112,7 +112,7 @@ static u32 ehca_encode_hwpage_size(u32 pgsize)
112 112
113static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca) 113static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
114{ 114{
115 return 1UL << ilog2(shca->hca_cap_mr_pgsize); 115 return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
116} 116}
117 117
118static struct ehca_mr *ehca_mr_new(void) 118static struct ehca_mr *ehca_mr_new(void)
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5ecf38d97269..77c8cb4c5073 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -720,7 +720,8 @@ repoll:
720 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 720 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
721 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 721 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
722 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 722 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
723 wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum); 723 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status,
724 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
724 if (rdma_port_get_link_layer(wc->qp->device, 725 if (rdma_port_get_link_layer(wc->qp->device,
725 (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET) 726 (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
726 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 727 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
@@ -747,8 +748,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
747 break; 748 break;
748 } 749 }
749 750
750 if (npolled) 751 mlx4_cq_set_ci(&cq->mcq);
751 mlx4_cq_set_ci(&cq->mcq);
752 752
753 spin_unlock_irqrestore(&cq->lock, flags); 753 spin_unlock_irqrestore(&cq->lock, flags);
754 754
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 7b445df6a667..75d305629300 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -163,7 +163,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
163 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 163 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
164 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 164 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
165 props->max_mcast_grp; 165 props->max_mcast_grp;
166 props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1; 166 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
167 167
168out: 168out:
169 kfree(in_mad); 169 kfree(in_mad);
@@ -182,12 +182,27 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
182} 182}
183 183
184static int ib_link_query_port(struct ib_device *ibdev, u8 port, 184static int ib_link_query_port(struct ib_device *ibdev, u8 port,
185 struct ib_port_attr *props, 185 struct ib_port_attr *props)
186 struct ib_smp *in_mad,
187 struct ib_smp *out_mad)
188{ 186{
187 struct ib_smp *in_mad = NULL;
188 struct ib_smp *out_mad = NULL;
189 int ext_active_speed; 189 int ext_active_speed;
190 int err; 190 int err = -ENOMEM;
191
192 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
193 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
194 if (!in_mad || !out_mad)
195 goto out;
196
197 init_query_mad(in_mad);
198 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
199 in_mad->attr_mod = cpu_to_be32(port);
200
201 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL,
202 in_mad, out_mad);
203 if (err)
204 goto out;
205
191 206
192 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 207 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
193 props->lmc = out_mad->data[34] & 0x7; 208 props->lmc = out_mad->data[34] & 0x7;
@@ -215,34 +230,33 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
215 230
216 switch (ext_active_speed) { 231 switch (ext_active_speed) {
217 case 1: 232 case 1:
218 props->active_speed = 16; /* FDR */ 233 props->active_speed = IB_SPEED_FDR;
219 break; 234 break;
220 case 2: 235 case 2:
221 props->active_speed = 32; /* EDR */ 236 props->active_speed = IB_SPEED_EDR;
222 break; 237 break;
223 } 238 }
224 } 239 }
225 240
226 /* If reported active speed is QDR, check if is FDR-10 */ 241 /* If reported active speed is QDR, check if is FDR-10 */
227 if (props->active_speed == 4) { 242 if (props->active_speed == IB_SPEED_QDR) {
228 if (to_mdev(ibdev)->dev->caps.ext_port_cap[port] & 243 init_query_mad(in_mad);
229 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 244 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
230 init_query_mad(in_mad); 245 in_mad->attr_mod = cpu_to_be32(port);
231 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
232 in_mad->attr_mod = cpu_to_be32(port);
233
234 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
235 NULL, NULL, in_mad, out_mad);
236 if (err)
237 return err;
238 246
239 /* Checking LinkSpeedActive for FDR-10 */ 247 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port,
240 if (out_mad->data[15] & 0x1) 248 NULL, NULL, in_mad, out_mad);
241 props->active_speed = 8; 249 if (err)
242 } 250 return err;
243 }
244 251
245 return 0; 252 /* Checking LinkSpeedActive for FDR-10 */
253 if (out_mad->data[15] & 0x1)
254 props->active_speed = IB_SPEED_FDR10;
255 }
256out:
257 kfree(in_mad);
258 kfree(out_mad);
259 return err;
246} 260}
247 261
248static u8 state_to_phys_state(enum ib_port_state state) 262static u8 state_to_phys_state(enum ib_port_state state)
@@ -251,32 +265,42 @@ static u8 state_to_phys_state(enum ib_port_state state)
251} 265}
252 266
253static int eth_link_query_port(struct ib_device *ibdev, u8 port, 267static int eth_link_query_port(struct ib_device *ibdev, u8 port,
254 struct ib_port_attr *props, 268 struct ib_port_attr *props)
255 struct ib_smp *out_mad)
256{ 269{
257 struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe; 270
271 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
272 struct mlx4_ib_iboe *iboe = &mdev->iboe;
258 struct net_device *ndev; 273 struct net_device *ndev;
259 enum ib_mtu tmp; 274 enum ib_mtu tmp;
275 struct mlx4_cmd_mailbox *mailbox;
276 int err = 0;
277
278 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
279 if (IS_ERR(mailbox))
280 return PTR_ERR(mailbox);
281
282 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
283 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
284 MLX4_CMD_WRAPPED);
285 if (err)
286 goto out;
260 287
261 props->active_width = IB_WIDTH_1X; 288 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
262 props->active_speed = 4; 289 IB_WIDTH_4X : IB_WIDTH_1X;
290 props->active_speed = IB_SPEED_QDR;
263 props->port_cap_flags = IB_PORT_CM_SUP; 291 props->port_cap_flags = IB_PORT_CM_SUP;
264 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; 292 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
265 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; 293 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
266 props->pkey_tbl_len = 1; 294 props->pkey_tbl_len = 1;
267 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
268 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
269 props->max_mtu = IB_MTU_4096; 295 props->max_mtu = IB_MTU_4096;
270 props->subnet_timeout = 0; 296 props->max_vl_num = 2;
271 props->max_vl_num = out_mad->data[37] >> 4;
272 props->init_type_reply = 0;
273 props->state = IB_PORT_DOWN; 297 props->state = IB_PORT_DOWN;
274 props->phys_state = state_to_phys_state(props->state); 298 props->phys_state = state_to_phys_state(props->state);
275 props->active_mtu = IB_MTU_256; 299 props->active_mtu = IB_MTU_256;
276 spin_lock(&iboe->lock); 300 spin_lock(&iboe->lock);
277 ndev = iboe->netdevs[port - 1]; 301 ndev = iboe->netdevs[port - 1];
278 if (!ndev) 302 if (!ndev)
279 goto out; 303 goto out_unlock;
280 304
281 tmp = iboe_get_mtu(ndev->mtu); 305 tmp = iboe_get_mtu(ndev->mtu);
282 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; 306 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
@@ -284,41 +308,23 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
284 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? 308 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
285 IB_PORT_ACTIVE : IB_PORT_DOWN; 309 IB_PORT_ACTIVE : IB_PORT_DOWN;
286 props->phys_state = state_to_phys_state(props->state); 310 props->phys_state = state_to_phys_state(props->state);
287 311out_unlock:
288out:
289 spin_unlock(&iboe->lock); 312 spin_unlock(&iboe->lock);
290 return 0; 313out:
314 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
315 return err;
291} 316}
292 317
293static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 318static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
294 struct ib_port_attr *props) 319 struct ib_port_attr *props)
295{ 320{
296 struct ib_smp *in_mad = NULL; 321 int err;
297 struct ib_smp *out_mad = NULL;
298 int err = -ENOMEM;
299
300 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
301 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
302 if (!in_mad || !out_mad)
303 goto out;
304 322
305 memset(props, 0, sizeof *props); 323 memset(props, 0, sizeof *props);
306 324
307 init_query_mad(in_mad);
308 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
309 in_mad->attr_mod = cpu_to_be32(port);
310
311 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
312 if (err)
313 goto out;
314
315 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? 325 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
316 ib_link_query_port(ibdev, port, props, in_mad, out_mad) : 326 ib_link_query_port(ibdev, port, props) :
317 eth_link_query_port(ibdev, port, props, out_mad); 327 eth_link_query_port(ibdev, port, props);
318
319out:
320 kfree(in_mad);
321 kfree(out_mad);
322 328
323 return err; 329 return err;
324} 330}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index aa2aefa4236c..3a7848966627 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1884,6 +1884,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1884 wmb(); 1884 wmb();
1885 1885
1886 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { 1886 if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) {
1887 *bad_wr = wr;
1887 err = -EINVAL; 1888 err = -EINVAL;
1888 goto out; 1889 goto out;
1889 } 1890 }
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 53157b86a1ba..40ba83338155 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -643,7 +643,8 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
643 entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0; 643 entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0;
644 checksum = (be32_to_cpu(cqe->rqpn) >> 24) | 644 checksum = (be32_to_cpu(cqe->rqpn) >> 24) |
645 ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00); 645 ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00);
646 entry->csum_ok = (cqe->sl_ipok & 1 && checksum == 0xffff); 646 entry->wc_flags |= (cqe->sl_ipok & 1 && checksum == 0xffff) ?
647 IB_WC_IP_CSUM_OK : 0;
647 } 648 }
648 649
649 entry->status = IB_WC_SUCCESS; 650 entry->status = IB_WC_SUCCESS;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index c5e4cb2d3223..71edfbbcce1c 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -338,18 +338,21 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
338 case IETF_MPA_V2: { 338 case IETF_MPA_V2: {
339 u16 ird_size; 339 u16 ird_size;
340 u16 ord_size; 340 u16 ord_size;
341 u16 rtr_ctrl_ird;
342 u16 rtr_ctrl_ord;
343
341 mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; 344 mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
342 mpa_hdr_len += IETF_RTR_MSG_SIZE; 345 mpa_hdr_len += IETF_RTR_MSG_SIZE;
343 cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE; 346 cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE;
344 rtr_msg = &mpa_v2_frame->rtr_msg; 347 rtr_msg = &mpa_v2_frame->rtr_msg;
345 348
346 /* parse rtr message */ 349 /* parse rtr message */
347 rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird); 350 rtr_ctrl_ird = ntohs(rtr_msg->ctrl_ird);
348 rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord); 351 rtr_ctrl_ord = ntohs(rtr_msg->ctrl_ord);
349 ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD; 352 ird_size = rtr_ctrl_ird & IETF_NO_IRD_ORD;
350 ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD; 353 ord_size = rtr_ctrl_ord & IETF_NO_IRD_ORD;
351 354
352 if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) { 355 if (!(rtr_ctrl_ird & IETF_PEER_TO_PEER)) {
353 /* send reset */ 356 /* send reset */
354 return -EINVAL; 357 return -EINVAL;
355 } 358 }
@@ -370,9 +373,9 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type,
370 } 373 }
371 } 374 }
372 375
373 if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) { 376 if (rtr_ctrl_ord & IETF_RDMA0_READ) {
374 cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; 377 cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
375 } else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) { 378 } else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) {
376 cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; 379 cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
377 } else { /* Not supported RDMA0 operation */ 380 } else { /* Not supported RDMA0 operation */
378 return -EINVAL; 381 return -EINVAL;
@@ -543,6 +546,8 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
543{ 546{
544 struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; 547 struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
545 struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; 548 struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
549 u16 ctrl_ird;
550 u16 ctrl_ord;
546 551
547 /* initialize the upper 5 bytes of the frame */ 552 /* initialize the upper 5 bytes of the frame */
548 build_mpa_v1(cm_node, start_addr, mpa_key); 553 build_mpa_v1(cm_node, start_addr, mpa_key);
@@ -550,31 +555,31 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
550 mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); 555 mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
551 556
552 /* initialize RTR msg */ 557 /* initialize RTR msg */
553 rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? 558 ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
554 IETF_NO_IRD_ORD : cm_node->ird_size; 559 IETF_NO_IRD_ORD : cm_node->ird_size;
555 rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? 560 ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
556 IETF_NO_IRD_ORD : cm_node->ord_size; 561 IETF_NO_IRD_ORD : cm_node->ord_size;
557 562
558 rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER; 563 ctrl_ird |= IETF_PEER_TO_PEER;
559 rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN; 564 ctrl_ird |= IETF_FLPDU_ZERO_LEN;
560 565
561 switch (mpa_key) { 566 switch (mpa_key) {
562 case MPA_KEY_REQUEST: 567 case MPA_KEY_REQUEST:
563 rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; 568 ctrl_ord |= IETF_RDMA0_WRITE;
564 rtr_msg->ctrl_ord |= IETF_RDMA0_READ; 569 ctrl_ord |= IETF_RDMA0_READ;
565 break; 570 break;
566 case MPA_KEY_REPLY: 571 case MPA_KEY_REPLY:
567 switch (cm_node->send_rdma0_op) { 572 switch (cm_node->send_rdma0_op) {
568 case SEND_RDMA_WRITE_ZERO: 573 case SEND_RDMA_WRITE_ZERO:
569 rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; 574 ctrl_ord |= IETF_RDMA0_WRITE;
570 break; 575 break;
571 case SEND_RDMA_READ_ZERO: 576 case SEND_RDMA_READ_ZERO:
572 rtr_msg->ctrl_ord |= IETF_RDMA0_READ; 577 ctrl_ord |= IETF_RDMA0_READ;
573 break; 578 break;
574 } 579 }
575 } 580 }
576 rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird); 581 rtr_msg->ctrl_ird = htons(ctrl_ird);
577 rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord); 582 rtr_msg->ctrl_ord = htons(ctrl_ord);
578} 583}
579 584
580/** 585/**
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 0927b5cc65d3..8b8812de4b5c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -597,7 +597,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
597 props->pkey_tbl_len = 1; 597 props->pkey_tbl_len = 1;
598 props->qkey_viol_cntr = 0; 598 props->qkey_viol_cntr = 0;
599 props->active_width = IB_WIDTH_4X; 599 props->active_width = IB_WIDTH_4X;
600 props->active_speed = 1; 600 props->active_speed = IB_SPEED_SDR;
601 props->max_msg_sz = 0x80000000; 601 props->max_msg_sz = 0x80000000;
602 602
603 return 0; 603 return 0;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index b881bdc401f5..6b811e3e8bd1 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -427,6 +427,14 @@ struct qib_verbs_txreq {
427/* how often we check for packet activity for "power on hours (in seconds) */ 427/* how often we check for packet activity for "power on hours (in seconds) */
428#define ACTIVITY_TIMER 5 428#define ACTIVITY_TIMER 5
429 429
430#define MAX_NAME_SIZE 64
431struct qib_msix_entry {
432 struct msix_entry msix;
433 void *arg;
434 char name[MAX_NAME_SIZE];
435 cpumask_var_t mask;
436};
437
430/* Below is an opaque struct. Each chip (device) can maintain 438/* Below is an opaque struct. Each chip (device) can maintain
431 * private data needed for its operation, but not germane to the 439 * private data needed for its operation, but not germane to the
432 * rest of the driver. For convenience, we define another that 440 * rest of the driver. For convenience, we define another that
@@ -1355,7 +1363,7 @@ int qib_pcie_init(struct pci_dev *, const struct pci_device_id *);
1355int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *, 1363int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *,
1356 const struct pci_device_id *); 1364 const struct pci_device_id *);
1357void qib_pcie_ddcleanup(struct qib_devdata *); 1365void qib_pcie_ddcleanup(struct qib_devdata *);
1358int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *); 1366int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *);
1359int qib_reinit_intr(struct qib_devdata *); 1367int qib_reinit_intr(struct qib_devdata *);
1360void qib_enable_intx(struct pci_dev *); 1368void qib_enable_intx(struct pci_dev *);
1361void qib_nomsi(struct qib_devdata *); 1369void qib_nomsi(struct qib_devdata *);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 41e92089e41b..060b96064469 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -541,8 +541,7 @@ struct qib_chip_specific {
541 u32 lastbuf_for_pio; 541 u32 lastbuf_for_pio;
542 u32 stay_in_freeze; 542 u32 stay_in_freeze;
543 u32 recovery_ports_initted; 543 u32 recovery_ports_initted;
544 struct msix_entry *msix_entries; 544 struct qib_msix_entry *msix_entries;
545 void **msix_arg;
546 unsigned long *sendchkenable; 545 unsigned long *sendchkenable;
547 unsigned long *sendgrhchk; 546 unsigned long *sendgrhchk;
548 unsigned long *sendibchk; 547 unsigned long *sendibchk;
@@ -639,24 +638,24 @@ static struct {
639 int lsb; 638 int lsb;
640 int port; /* 0 if not port-specific, else port # */ 639 int port; /* 0 if not port-specific, else port # */
641} irq_table[] = { 640} irq_table[] = {
642 { QIB_DRV_NAME, qib_7322intr, -1, 0 }, 641 { "", qib_7322intr, -1, 0 },
643 { QIB_DRV_NAME " (buf avail)", qib_7322bufavail, 642 { " (buf avail)", qib_7322bufavail,
644 SYM_LSB(IntStatus, SendBufAvail), 0 }, 643 SYM_LSB(IntStatus, SendBufAvail), 0 },
645 { QIB_DRV_NAME " (sdma 0)", sdma_intr, 644 { " (sdma 0)", sdma_intr,
646 SYM_LSB(IntStatus, SDmaInt_0), 1 }, 645 SYM_LSB(IntStatus, SDmaInt_0), 1 },
647 { QIB_DRV_NAME " (sdma 1)", sdma_intr, 646 { " (sdma 1)", sdma_intr,
648 SYM_LSB(IntStatus, SDmaInt_1), 2 }, 647 SYM_LSB(IntStatus, SDmaInt_1), 2 },
649 { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr, 648 { " (sdmaI 0)", sdma_idle_intr,
650 SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, 649 SYM_LSB(IntStatus, SDmaIdleInt_0), 1 },
651 { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr, 650 { " (sdmaI 1)", sdma_idle_intr,
652 SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, 651 SYM_LSB(IntStatus, SDmaIdleInt_1), 2 },
653 { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr, 652 { " (sdmaP 0)", sdma_progress_intr,
654 SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, 653 SYM_LSB(IntStatus, SDmaProgressInt_0), 1 },
655 { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr, 654 { " (sdmaP 1)", sdma_progress_intr,
656 SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, 655 SYM_LSB(IntStatus, SDmaProgressInt_1), 2 },
657 { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr, 656 { " (sdmaC 0)", sdma_cleanup_intr,
658 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, 657 SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 },
659 { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr, 658 { " (sdmaC 1)", sdma_cleanup_intr,
660 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, 659 SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 },
661}; 660};
662 661
@@ -2567,9 +2566,13 @@ static void qib_7322_nomsix(struct qib_devdata *dd)
2567 int i; 2566 int i;
2568 2567
2569 dd->cspec->num_msix_entries = 0; 2568 dd->cspec->num_msix_entries = 0;
2570 for (i = 0; i < n; i++) 2569 for (i = 0; i < n; i++) {
2571 free_irq(dd->cspec->msix_entries[i].vector, 2570 irq_set_affinity_hint(
2572 dd->cspec->msix_arg[i]); 2571 dd->cspec->msix_entries[i].msix.vector, NULL);
2572 free_cpumask_var(dd->cspec->msix_entries[i].mask);
2573 free_irq(dd->cspec->msix_entries[i].msix.vector,
2574 dd->cspec->msix_entries[i].arg);
2575 }
2573 qib_nomsix(dd); 2576 qib_nomsix(dd);
2574 } 2577 }
2575 /* make sure no MSIx interrupts are left pending */ 2578 /* make sure no MSIx interrupts are left pending */
@@ -2597,7 +2600,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2597 kfree(dd->cspec->sendgrhchk); 2600 kfree(dd->cspec->sendgrhchk);
2598 kfree(dd->cspec->sendibchk); 2601 kfree(dd->cspec->sendibchk);
2599 kfree(dd->cspec->msix_entries); 2602 kfree(dd->cspec->msix_entries);
2600 kfree(dd->cspec->msix_arg);
2601 for (i = 0; i < dd->num_pports; i++) { 2603 for (i = 0; i < dd->num_pports; i++) {
2602 unsigned long flags; 2604 unsigned long flags;
2603 u32 mask = QSFP_GPIO_MOD_PRS_N | 2605 u32 mask = QSFP_GPIO_MOD_PRS_N |
@@ -3070,6 +3072,8 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3070 int ret, i, msixnum; 3072 int ret, i, msixnum;
3071 u64 redirect[6]; 3073 u64 redirect[6];
3072 u64 mask; 3074 u64 mask;
3075 const struct cpumask *local_mask;
3076 int firstcpu, secondcpu = 0, currrcvcpu = 0;
3073 3077
3074 if (!dd->num_pports) 3078 if (!dd->num_pports)
3075 return; 3079 return;
@@ -3118,13 +3122,28 @@ try_intx:
3118 memset(redirect, 0, sizeof redirect); 3122 memset(redirect, 0, sizeof redirect);
3119 mask = ~0ULL; 3123 mask = ~0ULL;
3120 msixnum = 0; 3124 msixnum = 0;
3125 local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3126 firstcpu = cpumask_first(local_mask);
3127 if (firstcpu >= nr_cpu_ids ||
3128 cpumask_weight(local_mask) == num_online_cpus()) {
3129 local_mask = topology_core_cpumask(0);
3130 firstcpu = cpumask_first(local_mask);
3131 }
3132 if (firstcpu < nr_cpu_ids) {
3133 secondcpu = cpumask_next(firstcpu, local_mask);
3134 if (secondcpu >= nr_cpu_ids)
3135 secondcpu = firstcpu;
3136 currrcvcpu = secondcpu;
3137 }
3121 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { 3138 for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3122 irq_handler_t handler; 3139 irq_handler_t handler;
3123 const char *name;
3124 void *arg; 3140 void *arg;
3125 u64 val; 3141 u64 val;
3126 int lsb, reg, sh; 3142 int lsb, reg, sh;
3127 3143
3144 dd->cspec->msix_entries[msixnum].
3145 name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1]
3146 = '\0';
3128 if (i < ARRAY_SIZE(irq_table)) { 3147 if (i < ARRAY_SIZE(irq_table)) {
3129 if (irq_table[i].port) { 3148 if (irq_table[i].port) {
3130 /* skip if for a non-configured port */ 3149 /* skip if for a non-configured port */
@@ -3135,7 +3154,11 @@ try_intx:
3135 arg = dd; 3154 arg = dd;
3136 lsb = irq_table[i].lsb; 3155 lsb = irq_table[i].lsb;
3137 handler = irq_table[i].handler; 3156 handler = irq_table[i].handler;
3138 name = irq_table[i].name; 3157 snprintf(dd->cspec->msix_entries[msixnum].name,
3158 sizeof(dd->cspec->msix_entries[msixnum].name)
3159 - 1,
3160 QIB_DRV_NAME "%d%s", dd->unit,
3161 irq_table[i].name);
3139 } else { 3162 } else {
3140 unsigned ctxt; 3163 unsigned ctxt;
3141 3164
@@ -3148,23 +3171,28 @@ try_intx:
3148 continue; 3171 continue;
3149 lsb = QIB_I_RCVAVAIL_LSB + ctxt; 3172 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3150 handler = qib_7322pintr; 3173 handler = qib_7322pintr;
3151 name = QIB_DRV_NAME " (kctx)"; 3174 snprintf(dd->cspec->msix_entries[msixnum].name,
3175 sizeof(dd->cspec->msix_entries[msixnum].name)
3176 - 1,
3177 QIB_DRV_NAME "%d (kctx)", dd->unit);
3152 } 3178 }
3153 ret = request_irq(dd->cspec->msix_entries[msixnum].vector, 3179 ret = request_irq(
3154 handler, 0, name, arg); 3180 dd->cspec->msix_entries[msixnum].msix.vector,
3181 handler, 0, dd->cspec->msix_entries[msixnum].name,
3182 arg);
3155 if (ret) { 3183 if (ret) {
3156 /* 3184 /*
3157 * Shouldn't happen since the enable said we could 3185 * Shouldn't happen since the enable said we could
3158 * have as many as we are trying to setup here. 3186 * have as many as we are trying to setup here.
3159 */ 3187 */
3160 qib_dev_err(dd, "Couldn't setup MSIx " 3188 qib_dev_err(dd, "Couldn't setup MSIx "
3161 "interrupt (vec=%d, irq=%d): %d\n", msixnum, 3189 "interrupt (vec=%d, irq=%d): %d\n", msixnum,
3162 dd->cspec->msix_entries[msixnum].vector, 3190 dd->cspec->msix_entries[msixnum].msix.vector,
3163 ret); 3191 ret);
3164 qib_7322_nomsix(dd); 3192 qib_7322_nomsix(dd);
3165 goto try_intx; 3193 goto try_intx;
3166 } 3194 }
3167 dd->cspec->msix_arg[msixnum] = arg; 3195 dd->cspec->msix_entries[msixnum].arg = arg;
3168 if (lsb >= 0) { 3196 if (lsb >= 0) {
3169 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; 3197 reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3170 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * 3198 sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
@@ -3174,6 +3202,25 @@ try_intx:
3174 } 3202 }
3175 val = qib_read_kreg64(dd, 2 * msixnum + 1 + 3203 val = qib_read_kreg64(dd, 2 * msixnum + 1 +
3176 (QIB_7322_MsixTable_OFFS / sizeof(u64))); 3204 (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3205 if (firstcpu < nr_cpu_ids &&
3206 zalloc_cpumask_var(
3207 &dd->cspec->msix_entries[msixnum].mask,
3208 GFP_KERNEL)) {
3209 if (handler == qib_7322pintr) {
3210 cpumask_set_cpu(currrcvcpu,
3211 dd->cspec->msix_entries[msixnum].mask);
3212 currrcvcpu = cpumask_next(currrcvcpu,
3213 local_mask);
3214 if (currrcvcpu >= nr_cpu_ids)
3215 currrcvcpu = secondcpu;
3216 } else {
3217 cpumask_set_cpu(firstcpu,
3218 dd->cspec->msix_entries[msixnum].mask);
3219 }
3220 irq_set_affinity_hint(
3221 dd->cspec->msix_entries[msixnum].msix.vector,
3222 dd->cspec->msix_entries[msixnum].mask);
3223 }
3177 msixnum++; 3224 msixnum++;
3178 } 3225 }
3179 /* Initialize the vector mapping */ 3226 /* Initialize the vector mapping */
@@ -3365,7 +3412,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
3365 if (msix_entries) { 3412 if (msix_entries) {
3366 /* restore the MSIx vector address and data if saved above */ 3413 /* restore the MSIx vector address and data if saved above */
3367 for (i = 0; i < msix_entries; i++) { 3414 for (i = 0; i < msix_entries; i++) {
3368 dd->cspec->msix_entries[i].entry = i; 3415 dd->cspec->msix_entries[i].msix.entry = i;
3369 if (!msix_vecsave || !msix_vecsave[2 * i]) 3416 if (!msix_vecsave || !msix_vecsave[2 * i])
3370 continue; 3417 continue;
3371 qib_write_kreg(dd, 2 * i + 3418 qib_write_kreg(dd, 2 * i +
@@ -6865,15 +6912,13 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6865 6912
6866 tabsize = actual_cnt; 6913 tabsize = actual_cnt;
6867 dd->cspec->msix_entries = kmalloc(tabsize * 6914 dd->cspec->msix_entries = kmalloc(tabsize *
6868 sizeof(struct msix_entry), GFP_KERNEL); 6915 sizeof(struct qib_msix_entry), GFP_KERNEL);
6869 dd->cspec->msix_arg = kmalloc(tabsize * 6916 if (!dd->cspec->msix_entries) {
6870 sizeof(void *), GFP_KERNEL);
6871 if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) {
6872 qib_dev_err(dd, "No memory for MSIx table\n"); 6917 qib_dev_err(dd, "No memory for MSIx table\n");
6873 tabsize = 0; 6918 tabsize = 0;
6874 } 6919 }
6875 for (i = 0; i < tabsize; i++) 6920 for (i = 0; i < tabsize; i++)
6876 dd->cspec->msix_entries[i].entry = i; 6921 dd->cspec->msix_entries[i].msix.entry = i;
6877 6922
6878 if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) 6923 if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
6879 qib_dev_err(dd, "Failed to setup PCIe or interrupts; " 6924 qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 3b3745f261f0..c4ff788823b5 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -433,7 +433,6 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
433 struct qib_pportdata *ppd; 433 struct qib_pportdata *ppd;
434 struct qib_ibport *ibp; 434 struct qib_ibport *ibp;
435 struct ib_port_info *pip = (struct ib_port_info *)smp->data; 435 struct ib_port_info *pip = (struct ib_port_info *)smp->data;
436 u16 lid;
437 u8 mtu; 436 u8 mtu;
438 int ret; 437 int ret;
439 u32 state; 438 u32 state;
@@ -469,8 +468,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
469 ibp->mkeyprot == 1)) 468 ibp->mkeyprot == 1))
470 pip->mkey = ibp->mkey; 469 pip->mkey = ibp->mkey;
471 pip->gid_prefix = ibp->gid_prefix; 470 pip->gid_prefix = ibp->gid_prefix;
472 lid = ppd->lid; 471 pip->lid = cpu_to_be16(ppd->lid);
473 pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
474 pip->sm_lid = cpu_to_be16(ibp->sm_lid); 472 pip->sm_lid = cpu_to_be16(ibp->sm_lid);
475 pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); 473 pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
476 /* pip->diag_code; */ 474 /* pip->diag_code; */
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 0fde788e1100..790646ef5106 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -194,11 +194,24 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd)
194} 194}
195 195
196static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, 196static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
197 struct msix_entry *msix_entry) 197 struct qib_msix_entry *qib_msix_entry)
198{ 198{
199 int ret; 199 int ret;
200 u32 tabsize = 0; 200 u32 tabsize = 0;
201 u16 msix_flags; 201 u16 msix_flags;
202 struct msix_entry *msix_entry;
203 int i;
204
205 /* We can't pass qib_msix_entry array to qib_msix_setup
206 * so use a dummy msix_entry array and copy the allocated
207 * irq back to the qib_msix_entry array. */
208 msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL);
209 if (!msix_entry) {
210 ret = -ENOMEM;
211 goto do_intx;
212 }
213 for (i = 0; i < *msixcnt; i++)
214 msix_entry[i] = qib_msix_entry[i].msix;
202 215
203 pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); 216 pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
204 tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); 217 tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
@@ -209,11 +222,15 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
209 tabsize = ret; 222 tabsize = ret;
210 ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); 223 ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
211 } 224 }
225do_intx:
212 if (ret) { 226 if (ret) {
213 qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " 227 qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
214 "falling back to INTx\n", tabsize, ret); 228 "falling back to INTx\n", tabsize, ret);
215 tabsize = 0; 229 tabsize = 0;
216 } 230 }
231 for (i = 0; i < tabsize; i++)
232 qib_msix_entry[i].msix = msix_entry[i];
233 kfree(msix_entry);
217 *msixcnt = tabsize; 234 *msixcnt = tabsize;
218 235
219 if (ret) 236 if (ret)
@@ -251,7 +268,7 @@ static int qib_msi_setup(struct qib_devdata *dd, int pos)
251} 268}
252 269
253int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, 270int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
254 struct msix_entry *entry) 271 struct qib_msix_entry *entry)
255{ 272{
256 u16 linkstat, speed; 273 u16 linkstat, speed;
257 int pos = 0, pose, ret = 1; 274 int pos = 0, pose, ret = 1;
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 894afac26f3b..765b4cbaa020 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -2048,7 +2048,6 @@ send_last:
2048 wc.pkey_index = 0; 2048 wc.pkey_index = 0;
2049 wc.dlid_path_bits = 0; 2049 wc.dlid_path_bits = 0;
2050 wc.port_num = 0; 2050 wc.port_num = 0;
2051 wc.csum_ok = 0;
2052 /* Signal completion event if the solicited bit is set. */ 2051 /* Signal completion event if the solicited bit is set. */
2053 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 2052 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
2054 (ohdr->bth[0] & 2053 (ohdr->bth[0] &
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 847e7afdfd94..7ce2ac2ed219 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -422,7 +422,6 @@ last_imm:
422 wc.pkey_index = 0; 422 wc.pkey_index = 0;
423 wc.dlid_path_bits = 0; 423 wc.dlid_path_bits = 0;
424 wc.port_num = 0; 424 wc.port_num = 0;
425 wc.csum_ok = 0;
426 /* Signal completion event if the solicited bit is set. */ 425 /* Signal completion event if the solicited bit is set. */
427 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 426 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
428 (ohdr->bth[0] & 427 (ohdr->bth[0] &