aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-11 19:30:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-11 19:30:08 -0500
commitf1d6d6cd9029daa7e7d4a0b14347b5392320f22a (patch)
tree673e1940d385cfc625ed7583b54117ca97216734 /drivers
parentb9d919a4ac6cf031b8e065f82ad8f1b0c9ed74b1 (diff)
parent2b76c05794e66655e10633d2d78287854c991f63 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (42 commits) IB/qib: Fix refcount leak in lkey/rkey validation IB/qib: Improve SERDES tunning on QMH boards IB/qib: Unnecessary delayed completions on RC connection IB/qib: Issue pre-emptive NAKs on eager buffer overflow IB/qib: RDMA lkey/rkey validation is inefficient for large MRs IB/qib: Change QPN increment IB/qib: Add fix missing from earlier patch IB/qib: Change receive queue/QPN selection IB/qib: Fix interrupt mitigation IB/qib: Avoid duplicate writes to the rcv head register IB/qib: Add a few new SERDES tunings IB/qib: Reset packet list after freeing IB/qib: New SERDES init routine and improvements to SI quality IB/qib: Clear WAIT_SEND flags when setting QP to error state IB/qib: Fix context allocation with multiple HCAs IB/qib: Fix multi-Florida HCA host panic on reboot IB/qib: Handle transitions from ACTIVE_DEFERRED to ACTIVE better IB/qib: UD send with immediate receive completion has wrong size IB/qib: Set port physical state even if other fields are invalid IB/qib: Generate completion callback on errors ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c56
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c32
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c5
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c4
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c155
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c373
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c80
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c45
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c57
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h11
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c51
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c62
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c392
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h46
-rw-r--r--drivers/net/mlx4/alloc.c3
-rw-r--r--drivers/net/mlx4/fw.c4
37 files changed, 889 insertions, 622 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 09dda0b8740e..c3f5aca4ef00 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -189,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
189 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 189 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
190} 190}
191 191
192#ifdef notyet
192int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) 193int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
193{ 194{
194 struct rdma_cq_setup setup; 195 struct rdma_cq_setup setup;
@@ -200,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
200 setup.ovfl_mode = 1; 201 setup.ovfl_mode = 1;
201 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 202 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
202} 203}
204#endif
203 205
204static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) 206static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
205{ 207{
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index a237d49bdcc9..c5406da3f4cd 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -335,8 +335,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
335int iwch_post_zb_read(struct iwch_qp *qhp); 335int iwch_post_zb_read(struct iwch_qp *qhp);
336int iwch_register_device(struct iwch_dev *dev); 336int iwch_register_device(struct iwch_dev *dev);
337void iwch_unregister_device(struct iwch_dev *dev); 337void iwch_unregister_device(struct iwch_dev *dev);
338int iwch_quiesce_qps(struct iwch_cq *chp);
339int iwch_resume_qps(struct iwch_cq *chp);
340void stop_read_rep_timer(struct iwch_qp *qhp); 338void stop_read_rep_timer(struct iwch_qp *qhp);
341int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, 339int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
342 struct iwch_mr *mhp, int shift); 340 struct iwch_mr *mhp, int shift);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0993137181d7..1b4cd09f74dc 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -1149,59 +1149,3 @@ out:
1149 PDBG("%s exit state %d\n", __func__, qhp->attr.state); 1149 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1150 return ret; 1150 return ret;
1151} 1151}
1152
1153static int quiesce_qp(struct iwch_qp *qhp)
1154{
1155 spin_lock_irq(&qhp->lock);
1156 iwch_quiesce_tid(qhp->ep);
1157 qhp->flags |= QP_QUIESCED;
1158 spin_unlock_irq(&qhp->lock);
1159 return 0;
1160}
1161
1162static int resume_qp(struct iwch_qp *qhp)
1163{
1164 spin_lock_irq(&qhp->lock);
1165 iwch_resume_tid(qhp->ep);
1166 qhp->flags &= ~QP_QUIESCED;
1167 spin_unlock_irq(&qhp->lock);
1168 return 0;
1169}
1170
1171int iwch_quiesce_qps(struct iwch_cq *chp)
1172{
1173 int i;
1174 struct iwch_qp *qhp;
1175
1176 for (i=0; i < T3_MAX_NUM_QP; i++) {
1177 qhp = get_qhp(chp->rhp, i);
1178 if (!qhp)
1179 continue;
1180 if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
1181 quiesce_qp(qhp);
1182 continue;
1183 }
1184 if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
1185 quiesce_qp(qhp);
1186 }
1187 return 0;
1188}
1189
1190int iwch_resume_qps(struct iwch_cq *chp)
1191{
1192 int i;
1193 struct iwch_qp *qhp;
1194
1195 for (i=0; i < T3_MAX_NUM_QP; i++) {
1196 qhp = get_qhp(chp->rhp, i);
1197 if (!qhp)
1198 continue;
1199 if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
1200 resume_qp(qhp);
1201 continue;
1202 }
1203 if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
1204 resume_qp(qhp);
1205 }
1206 return 0;
1207}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 16032cdb4337..cc600c2dd0b3 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -760,7 +760,6 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
760int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); 760int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
761int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); 761int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
762u16 c4iw_rqes_posted(struct c4iw_qp *qhp); 762u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
763int c4iw_post_zb_read(struct c4iw_qp *qhp);
764int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); 763int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
765u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); 764u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
766void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, 765void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 057cb2505ea1..20800900ef3f 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -892,36 +892,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
892 } 892 }
893} 893}
894 894
895int c4iw_post_zb_read(struct c4iw_qp *qhp)
896{
897 union t4_wr *wqe;
898 struct sk_buff *skb;
899 u8 len16;
900
901 PDBG("%s enter\n", __func__);
902 skb = alloc_skb(40, GFP_KERNEL);
903 if (!skb) {
904 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
905 return -ENOMEM;
906 }
907 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
908
909 wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
910 memset(wqe, 0, sizeof wqe->read);
911 wqe->read.r2 = cpu_to_be64(0);
912 wqe->read.stag_sink = cpu_to_be32(1);
913 wqe->read.to_sink_hi = cpu_to_be32(0);
914 wqe->read.to_sink_lo = cpu_to_be32(1);
915 wqe->read.stag_src = cpu_to_be32(1);
916 wqe->read.plen = cpu_to_be32(0);
917 wqe->read.to_src_hi = cpu_to_be32(0);
918 wqe->read.to_src_lo = cpu_to_be32(1);
919 len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
920 init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
921
922 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
923}
924
925static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, 895static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
926 gfp_t gfp) 896 gfp_t gfp)
927{ 897{
@@ -1029,7 +999,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1029 wqe->cookie = (unsigned long) &ep->com.wr_wait; 999 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1030 1000
1031 wqe->u.fini.type = FW_RI_TYPE_FINI; 1001 wqe->u.fini.type = FW_RI_TYPE_FINI;
1032 c4iw_init_wr_wait(&ep->com.wr_wait);
1033 ret = c4iw_ofld_send(&rhp->rdev, skb); 1002 ret = c4iw_ofld_send(&rhp->rdev, skb);
1034 if (ret) 1003 if (ret)
1035 goto out; 1004 goto out;
@@ -1125,7 +1094,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1125 if (qhp->attr.mpa_attr.initiator) 1094 if (qhp->attr.mpa_attr.initiator)
1126 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); 1095 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1127 1096
1128 c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
1129 ret = c4iw_ofld_send(&rhp->rdev, skb); 1097 ret = c4iw_ofld_send(&rhp->rdev, skb);
1130 if (ret) 1098 if (ret)
1131 goto out; 1099 goto out;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 765f0fc1da76..b33f0457a1ff 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -530,9 +530,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
530 for (j = 0; j < 6; j++) { 530 for (j = 0; j < 6; j++) {
531 if (!pdev->resource[j].start) 531 if (!pdev->resource[j].start)
532 continue; 532 continue;
533 ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n", 533 ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
534 j, (unsigned long long)pdev->resource[j].start, 534 j, &pdev->resource[j],
535 (unsigned long long)pdev->resource[j].end,
536 (unsigned long long)pci_resource_len(pdev, j)); 535 (unsigned long long)pci_resource_len(pdev, j));
537 } 536 }
538 537
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5a219a2fdf16..e8df155bc3b0 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -397,10 +397,14 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
397 cq->resize_buf = NULL; 397 cq->resize_buf = NULL;
398 cq->resize_umem = NULL; 398 cq->resize_umem = NULL;
399 } else { 399 } else {
400 struct mlx4_ib_cq_buf tmp_buf;
401 int tmp_cqe = 0;
402
400 spin_lock_irq(&cq->lock); 403 spin_lock_irq(&cq->lock);
401 if (cq->resize_buf) { 404 if (cq->resize_buf) {
402 mlx4_ib_cq_resize_copy_cqes(cq); 405 mlx4_ib_cq_resize_copy_cqes(cq);
403 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 406 tmp_buf = cq->buf;
407 tmp_cqe = cq->ibcq.cqe;
404 cq->buf = cq->resize_buf->buf; 408 cq->buf = cq->resize_buf->buf;
405 cq->ibcq.cqe = cq->resize_buf->cqe; 409 cq->ibcq.cqe = cq->resize_buf->cqe;
406 410
@@ -408,6 +412,9 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
408 cq->resize_buf = NULL; 412 cq->resize_buf = NULL;
409 } 413 }
410 spin_unlock_irq(&cq->lock); 414 spin_unlock_irq(&cq->lock);
415
416 if (tmp_cqe)
417 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
411 } 418 }
412 419
413 goto out; 420 goto out;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c9a8dd63b9e2..57ffa50f509e 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
211 if (agent) { 211 if (agent) {
212 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 212 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
213 IB_MGMT_MAD_DATA, GFP_ATOMIC); 213 IB_MGMT_MAD_DATA, GFP_ATOMIC);
214 if (IS_ERR(send_buf))
215 return;
214 /* 216 /*
215 * We rely here on the fact that MLX QPs don't use the 217 * We rely here on the fact that MLX QPs don't use the
216 * address handle after the send is posted (this is 218 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 5648659ff0b0..03a59534f59e 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev,
171 if (agent) { 171 if (agent) {
172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
173 IB_MGMT_MAD_DATA, GFP_ATOMIC); 173 IB_MGMT_MAD_DATA, GFP_ATOMIC);
174 if (IS_ERR(send_buf))
175 return;
174 /* 176 /*
175 * We rely here on the fact that MLX QPs don't use the 177 * We rely here on the fact that MLX QPs don't use the
176 * address handle after the send is posted (this is 178 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3892e2c0e95a..5a4c36484722 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -908,8 +908,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
908 nesvnic->nic_index && 908 nesvnic->nic_index &&
909 mc_index < max_pft_entries_avaiable) { 909 mc_index < max_pft_entries_avaiable) {
910 nes_debug(NES_DBG_NIC_RX, 910 nes_debug(NES_DBG_NIC_RX,
911 "mc_index=%d skipping nic_index=%d,\ 911 "mc_index=%d skipping nic_index=%d, "
912 used for=%d \n", mc_index, 912 "used for=%d \n", mc_index,
913 nesvnic->nic_index, 913 nesvnic->nic_index,
914 nesadapter->pft_mcast_map[mc_index]); 914 nesadapter->pft_mcast_map[mc_index]);
915 mc_index++; 915 mc_index++;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 64c9e7d02d4a..73225eee3cc6 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -766,7 +766,7 @@ struct qib_devdata {
766 void (*f_sdma_hw_start_up)(struct qib_pportdata *); 766 void (*f_sdma_hw_start_up)(struct qib_pportdata *);
767 void (*f_sdma_init_early)(struct qib_pportdata *); 767 void (*f_sdma_init_early)(struct qib_pportdata *);
768 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); 768 void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
769 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32); 769 void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
770 u32 (*f_hdrqempty)(struct qib_ctxtdata *); 770 u32 (*f_hdrqempty)(struct qib_ctxtdata *);
771 u64 (*f_portcntr)(struct qib_pportdata *, u32); 771 u64 (*f_portcntr)(struct qib_pportdata *, u32);
772 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, 772 u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
index a86cbf880f98..5246aa486bbe 100644
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
100 wc->head = next; 100 wc->head = next;
101 101
102 if (cq->notify == IB_CQ_NEXT_COMP || 102 if (cq->notify == IB_CQ_NEXT_COMP ||
103 (cq->notify == IB_CQ_SOLICITED && solicited)) { 103 (cq->notify == IB_CQ_SOLICITED &&
104 (solicited || entry->status != IB_WC_SUCCESS))) {
104 cq->notify = IB_CQ_NONE; 105 cq->notify = IB_CQ_NONE;
105 cq->triggered++; 106 cq->triggered++;
106 /* 107 /*
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 9cd193603fb1..23e584f4c36c 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver");
71 */ 71 */
72#define QIB_PIO_MAXIBHDR 128 72#define QIB_PIO_MAXIBHDR 128
73 73
74/*
75 * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
76 */
77#define QIB_MAX_PKT_RECV 64
78
74struct qlogic_ib_stats qib_stats; 79struct qlogic_ib_stats qib_stats;
75 80
76const char *qib_get_unit_name(int unit) 81const char *qib_get_unit_name(int unit)
@@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail)
284 * Returns 1 if error was a CRC, else 0. 289 * Returns 1 if error was a CRC, else 0.
285 * Needed for some chip's synthesized error counters. 290 * Needed for some chip's synthesized error counters.
286 */ 291 */
287static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt, 292static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
288 u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, 293 u32 ctxt, u32 eflags, u32 l, u32 etail,
289 struct qib_message_header *hdr) 294 __le32 *rhf_addr, struct qib_message_header *rhdr)
290{ 295{
291 u32 ret = 0; 296 u32 ret = 0;
292 297
293 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) 298 if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
294 ret = 1; 299 ret = 1;
300 else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
301 /* For TIDERR and RC QPs premptively schedule a NAK */
302 struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
303 struct qib_other_headers *ohdr = NULL;
304 struct qib_ibport *ibp = &ppd->ibport_data;
305 struct qib_qp *qp = NULL;
306 u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
307 u16 lid = be16_to_cpu(hdr->lrh[1]);
308 int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
309 u32 qp_num;
310 u32 opcode;
311 u32 psn;
312 int diff;
313 unsigned long flags;
314
315 /* Sanity check packet */
316 if (tlen < 24)
317 goto drop;
318
319 if (lid < QIB_MULTICAST_LID_BASE) {
320 lid &= ~((1 << ppd->lmc) - 1);
321 if (unlikely(lid != ppd->lid))
322 goto drop;
323 }
324
325 /* Check for GRH */
326 if (lnh == QIB_LRH_BTH)
327 ohdr = &hdr->u.oth;
328 else if (lnh == QIB_LRH_GRH) {
329 u32 vtf;
330
331 ohdr = &hdr->u.l.oth;
332 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
333 goto drop;
334 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
335 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
336 goto drop;
337 } else
338 goto drop;
339
340 /* Get opcode and PSN from packet */
341 opcode = be32_to_cpu(ohdr->bth[0]);
342 opcode >>= 24;
343 psn = be32_to_cpu(ohdr->bth[2]);
344
345 /* Get the destination QP number. */
346 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
347 if (qp_num != QIB_MULTICAST_QPN) {
348 int ruc_res;
349 qp = qib_lookup_qpn(ibp, qp_num);
350 if (!qp)
351 goto drop;
352
353 /*
354 * Handle only RC QPs - for other QP types drop error
355 * packet.
356 */
357 spin_lock(&qp->r_lock);
358
359 /* Check for valid receive state. */
360 if (!(ib_qib_state_ops[qp->state] &
361 QIB_PROCESS_RECV_OK)) {
362 ibp->n_pkt_drops++;
363 goto unlock;
364 }
365
366 switch (qp->ibqp.qp_type) {
367 case IB_QPT_RC:
368 spin_lock_irqsave(&qp->s_lock, flags);
369 ruc_res =
370 qib_ruc_check_hdr(
371 ibp, hdr,
372 lnh == QIB_LRH_GRH,
373 qp,
374 be32_to_cpu(ohdr->bth[0]));
375 if (ruc_res) {
376 spin_unlock_irqrestore(&qp->s_lock,
377 flags);
378 goto unlock;
379 }
380 spin_unlock_irqrestore(&qp->s_lock, flags);
381
382 /* Only deal with RDMA Writes for now */
383 if (opcode <
384 IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
385 diff = qib_cmp24(psn, qp->r_psn);
386 if (!qp->r_nak_state && diff >= 0) {
387 ibp->n_rc_seqnak++;
388 qp->r_nak_state =
389 IB_NAK_PSN_ERROR;
390 /* Use the expected PSN. */
391 qp->r_ack_psn = qp->r_psn;
392 /*
393 * Wait to send the sequence
394 * NAK until all packets
395 * in the receive queue have
396 * been processed.
397 * Otherwise, we end up
398 * propagating congestion.
399 */
400 if (list_empty(&qp->rspwait)) {
401 qp->r_flags |=
402 QIB_R_RSP_NAK;
403 atomic_inc(
404 &qp->refcount);
405 list_add_tail(
406 &qp->rspwait,
407 &rcd->qp_wait_list);
408 }
409 } /* Out of sequence NAK */
410 } /* QP Request NAKs */
411 break;
412 case IB_QPT_SMI:
413 case IB_QPT_GSI:
414 case IB_QPT_UD:
415 case IB_QPT_UC:
416 default:
417 /* For now don't handle any other QP types */
418 break;
419 }
420
421unlock:
422 spin_unlock(&qp->r_lock);
423 /*
424 * Notify qib_destroy_qp() if it is waiting
425 * for us to finish.
426 */
427 if (atomic_dec_and_test(&qp->refcount))
428 wake_up(&qp->wait);
429 } /* Unicast QP */
430 } /* Valid packet with TIDErr */
431
432drop:
295 return ret; 433 return ret;
296} 434}
297 435
@@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
335 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ 473 smp_rmb(); /* prevent speculative reads of dma'ed hdrq */
336 } 474 }
337 475
338 for (last = 0, i = 1; !last && i <= 64; i += !last) { 476 for (last = 0, i = 1; !last; i += !last) {
339 hdr = dd->f_get_msgheader(dd, rhf_addr); 477 hdr = dd->f_get_msgheader(dd, rhf_addr);
340 eflags = qib_hdrget_err_flags(rhf_addr); 478 eflags = qib_hdrget_err_flags(rhf_addr);
341 etype = qib_hdrget_rcv_type(rhf_addr); 479 etype = qib_hdrget_rcv_type(rhf_addr);
@@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
371 * packets; only qibhdrerr should be set. 509 * packets; only qibhdrerr should be set.
372 */ 510 */
373 if (unlikely(eflags)) 511 if (unlikely(eflags))
374 crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l, 512 crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
375 etail, rhf_addr, hdr); 513 etail, rhf_addr, hdr);
376 else if (etype == RCVHQ_RCV_TYPE_NON_KD) { 514 else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
377 qib_ib_rcv(rcd, hdr, ebuf, tlen); 515 qib_ib_rcv(rcd, hdr, ebuf, tlen);
@@ -384,6 +522,9 @@ move_along:
384 l += rsize; 522 l += rsize;
385 if (l >= maxcnt) 523 if (l >= maxcnt)
386 l = 0; 524 l = 0;
525 if (i == QIB_MAX_PKT_RECV)
526 last = 1;
527
387 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 528 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
388 if (dd->flags & QIB_NODMA_RTAIL) { 529 if (dd->flags & QIB_NODMA_RTAIL) {
389 u32 seq = qib_hdrget_seq(rhf_addr); 530 u32 seq = qib_hdrget_seq(rhf_addr);
@@ -402,7 +543,7 @@ move_along:
402 */ 543 */
403 lval = l; 544 lval = l;
404 if (!last && !(i & 0xf)) { 545 if (!last && !(i & 0xf)) {
405 dd->f_update_usrhead(rcd, lval, updegr, etail); 546 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
406 updegr = 0; 547 updegr = 0;
407 } 548 }
408 } 549 }
@@ -444,7 +585,7 @@ bail:
444 * if no packets were processed. 585 * if no packets were processed.
445 */ 586 */
446 lval = (u64)rcd->head | dd->rhdrhead_intr_off; 587 lval = (u64)rcd->head | dd->rhdrhead_intr_off;
447 dd->f_update_usrhead(rcd, lval, updegr, etail); 588 dd->f_update_usrhead(rcd, lval, updegr, etail, i);
448 return crcs; 589 return crcs;
449} 590}
450 591
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 79d9971aff1f..75bfad16c114 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1379,17 +1379,17 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1379 /* find device (with ACTIVE ports) with fewest ctxts in use */ 1379 /* find device (with ACTIVE ports) with fewest ctxts in use */
1380 for (ndev = 0; ndev < devmax; ndev++) { 1380 for (ndev = 0; ndev < devmax; ndev++) {
1381 struct qib_devdata *dd = qib_lookup(ndev); 1381 struct qib_devdata *dd = qib_lookup(ndev);
1382 unsigned cused = 0, cfree = 0; 1382 unsigned cused = 0, cfree = 0, pusable = 0;
1383 if (!dd) 1383 if (!dd)
1384 continue; 1384 continue;
1385 if (port && port <= dd->num_pports && 1385 if (port && port <= dd->num_pports &&
1386 usable(dd->pport + port - 1)) 1386 usable(dd->pport + port - 1))
1387 dusable = 1; 1387 pusable = 1;
1388 else 1388 else
1389 for (i = 0; i < dd->num_pports; i++) 1389 for (i = 0; i < dd->num_pports; i++)
1390 if (usable(dd->pport + i)) 1390 if (usable(dd->pport + i))
1391 dusable++; 1391 pusable++;
1392 if (!dusable) 1392 if (!pusable)
1393 continue; 1393 continue;
1394 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; 1394 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1395 ctxt++) 1395 ctxt++)
@@ -1397,7 +1397,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1397 cused++; 1397 cused++;
1398 else 1398 else
1399 cfree++; 1399 cfree++;
1400 if (cfree && cused < inuse) { 1400 if (pusable && cfree && cused < inuse) {
1401 udd = dd; 1401 udd = dd;
1402 inuse = cused; 1402 inuse = cused;
1403 } 1403 }
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a5e29dbb9537..774dea897e9c 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
2074} 2074}
2075 2075
2076static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, 2076static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2077 u32 updegr, u32 egrhd) 2077 u32 updegr, u32 egrhd, u32 npkts)
2078{ 2078{
2079 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 2079 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2080 if (updegr) 2080 if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 6fd8d74e7392..127a0d5069f0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2297,7 +2297,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd)
2297 nchipctxts = qib_read_kreg32(dd, kr_portcnt); 2297 nchipctxts = qib_read_kreg32(dd, kr_portcnt);
2298 dd->cspec->numctxts = nchipctxts; 2298 dd->cspec->numctxts = nchipctxts;
2299 if (qib_n_krcv_queues > 1) { 2299 if (qib_n_krcv_queues > 1) {
2300 dd->qpn_mask = 0x3f; 2300 dd->qpn_mask = 0x3e;
2301 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; 2301 dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
2302 if (dd->first_user_ctxt > nchipctxts) 2302 if (dd->first_user_ctxt > nchipctxts)
2303 dd->first_user_ctxt = nchipctxts; 2303 dd->first_user_ctxt = nchipctxts;
@@ -2703,7 +2703,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
2703} 2703}
2704 2704
2705static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, 2705static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
2706 u32 updegr, u32 egrhd) 2706 u32 updegr, u32 egrhd, u32 npkts)
2707{ 2707{
2708 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 2708 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
2709 if (updegr) 2709 if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 584d443b5335..dbbb0e85afe4 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -71,6 +71,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
71 71
72static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); 72static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
73static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); 73static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
74static void serdes_7322_los_enable(struct qib_pportdata *, int);
75static int serdes_7322_init_old(struct qib_pportdata *);
76static int serdes_7322_init_new(struct qib_pportdata *);
74 77
75#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) 78#define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
76 79
@@ -111,6 +114,21 @@ static ushort qib_singleport;
111module_param_named(singleport, qib_singleport, ushort, S_IRUGO); 114module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
112MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); 115MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
113 116
117/*
118 * Receive header queue sizes
119 */
120static unsigned qib_rcvhdrcnt;
121module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
122MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
123
124static unsigned qib_rcvhdrsize;
125module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
126MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
127
128static unsigned qib_rcvhdrentsize;
129module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
130MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
131
114#define MAX_ATTEN_LEN 64 /* plenty for any real system */ 132#define MAX_ATTEN_LEN 64 /* plenty for any real system */
115/* for read back, default index is ~5m copper cable */ 133/* for read back, default index is ~5m copper cable */
116static char txselect_list[MAX_ATTEN_LEN] = "10"; 134static char txselect_list[MAX_ATTEN_LEN] = "10";
@@ -544,6 +562,7 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
544 562
545#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ 563#define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
546#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ 564#define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
565#define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */
547#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ 566#define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
548 567
549#define H1_FORCE_VAL 8 568#define H1_FORCE_VAL 8
@@ -604,6 +623,7 @@ struct qib_chippport_specific {
604 u8 ibmalfusesnap; 623 u8 ibmalfusesnap;
605 struct qib_qsfp_data qsfp_data; 624 struct qib_qsfp_data qsfp_data;
606 char epmsgbuf[192]; /* for port error interrupt msg buffer */ 625 char epmsgbuf[192]; /* for port error interrupt msg buffer */
626 u8 bounced;
607}; 627};
608 628
609static struct { 629static struct {
@@ -1677,6 +1697,8 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1677 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { 1697 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1678 force_h1(ppd); 1698 force_h1(ppd);
1679 ppd->cpspec->qdr_reforce = 1; 1699 ppd->cpspec->qdr_reforce = 1;
1700 if (!ppd->dd->cspec->r1)
1701 serdes_7322_los_enable(ppd, 0);
1680 } else if (ppd->cpspec->qdr_reforce && 1702 } else if (ppd->cpspec->qdr_reforce &&
1681 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && 1703 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1682 (ibclt == IB_7322_LT_STATE_CFGENH || 1704 (ibclt == IB_7322_LT_STATE_CFGENH ||
@@ -1692,18 +1714,37 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1692 ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) 1714 ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1693 adj_tx_serdes(ppd); 1715 adj_tx_serdes(ppd);
1694 1716
1695 if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP && 1717 if (ibclt != IB_7322_LT_STATE_LINKUP) {
1696 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { 1718 u8 ltstate = qib_7322_phys_portstate(ibcst);
1697 ppd->cpspec->qdr_dfe_on = 1; 1719 u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1698 ppd->cpspec->qdr_dfe_time = 0; 1720 LinkTrainingState);
1699 /* On link down, reenable QDR adaptation */ 1721 if (!ppd->dd->cspec->r1 &&
1700 qib_write_kreg_port(ppd, krp_static_adapt_dis(2), 1722 pibclt == IB_7322_LT_STATE_LINKUP &&
1701 ppd->dd->cspec->r1 ? 1723 ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1702 QDR_STATIC_ADAPT_DOWN_R1 : 1724 ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1703 QDR_STATIC_ADAPT_DOWN); 1725 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1726 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1727 /* If the link went down (but no into recovery,
1728 * turn LOS back on */
1729 serdes_7322_los_enable(ppd, 1);
1730 if (!ppd->cpspec->qdr_dfe_on &&
1731 ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1732 ppd->cpspec->qdr_dfe_on = 1;
1733 ppd->cpspec->qdr_dfe_time = 0;
1734 /* On link down, reenable QDR adaptation */
1735 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1736 ppd->dd->cspec->r1 ?
1737 QDR_STATIC_ADAPT_DOWN_R1 :
1738 QDR_STATIC_ADAPT_DOWN);
1739 printk(KERN_INFO QIB_DRV_NAME
1740 " IB%u:%u re-enabled QDR adaptation "
1741 "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
1742 }
1704 } 1743 }
1705} 1744}
1706 1745
1746static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1747
1707/* 1748/*
1708 * This is per-pport error handling. 1749 * This is per-pport error handling.
1709 * will likely get it's own MSIx interrupt (one for each port, 1750 * will likely get it's own MSIx interrupt (one for each port,
@@ -1840,7 +1881,23 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1840 IB_PHYSPORTSTATE_DISABLED) 1881 IB_PHYSPORTSTATE_DISABLED)
1841 qib_set_ib_7322_lstate(ppd, 0, 1882 qib_set_ib_7322_lstate(ppd, 0,
1842 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); 1883 QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1843 else 1884 else {
1885 u32 lstate;
1886 /*
1887 * We need the current logical link state before
1888 * lflags are set in handle_e_ibstatuschanged.
1889 */
1890 lstate = qib_7322_iblink_state(ibcs);
1891
1892 if (IS_QMH(dd) && !ppd->cpspec->bounced &&
1893 ltstate == IB_PHYSPORTSTATE_LINKUP &&
1894 (lstate >= IB_PORT_INIT &&
1895 lstate <= IB_PORT_ACTIVE)) {
1896 ppd->cpspec->bounced = 1;
1897 qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
1898 IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
1899 }
1900
1844 /* 1901 /*
1845 * Since going into a recovery state causes the link 1902 * Since going into a recovery state causes the link
1846 * state to go down and since recovery is transitory, 1903 * state to go down and since recovery is transitory,
@@ -1854,6 +1911,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1854 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && 1911 ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1855 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) 1912 ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1856 qib_handle_e_ibstatuschanged(ppd, ibcs); 1913 qib_handle_e_ibstatuschanged(ppd, ibcs);
1914 }
1857 } 1915 }
1858 if (*msg && iserr) 1916 if (*msg && iserr)
1859 qib_dev_porterr(dd, ppd->port, "%s error\n", msg); 1917 qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2785,7 +2843,6 @@ static irqreturn_t qib_7322intr(int irq, void *data)
2785 ctxtrbits &= ~rmask; 2843 ctxtrbits &= ~rmask;
2786 if (dd->rcd[i]) { 2844 if (dd->rcd[i]) {
2787 qib_kreceive(dd->rcd[i], NULL, &npkts); 2845 qib_kreceive(dd->rcd[i], NULL, &npkts);
2788 adjust_rcv_timeout(dd->rcd[i], npkts);
2789 } 2846 }
2790 } 2847 }
2791 rmask <<= 1; 2848 rmask <<= 1;
@@ -2835,7 +2892,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data)
2835 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); 2892 (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
2836 2893
2837 qib_kreceive(rcd, NULL, &npkts); 2894 qib_kreceive(rcd, NULL, &npkts);
2838 adjust_rcv_timeout(rcd, npkts);
2839 2895
2840 return IRQ_HANDLED; 2896 return IRQ_HANDLED;
2841} 2897}
@@ -3157,6 +3213,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
3157 case BOARD_QME7342: 3213 case BOARD_QME7342:
3158 n = "InfiniPath_QME7342"; 3214 n = "InfiniPath_QME7342";
3159 break; 3215 break;
3216 case 8:
3217 n = "InfiniPath_QME7362";
3218 dd->flags |= QIB_HAS_QSFP;
3219 break;
3160 case 15: 3220 case 15:
3161 n = "InfiniPath_QLE7342_TEST"; 3221 n = "InfiniPath_QLE7342_TEST";
3162 dd->flags |= QIB_HAS_QSFP; 3222 dd->flags |= QIB_HAS_QSFP;
@@ -3475,11 +3535,6 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
3475 nchipctxts = qib_read_kreg32(dd, kr_contextcnt); 3535 nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3476 dd->cspec->numctxts = nchipctxts; 3536 dd->cspec->numctxts = nchipctxts;
3477 if (qib_n_krcv_queues > 1 && dd->num_pports) { 3537 if (qib_n_krcv_queues > 1 && dd->num_pports) {
3478 /*
3479 * Set the mask for which bits from the QPN are used
3480 * to select a context number.
3481 */
3482 dd->qpn_mask = 0x3f;
3483 dd->first_user_ctxt = NUM_IB_PORTS + 3538 dd->first_user_ctxt = NUM_IB_PORTS +
3484 (qib_n_krcv_queues - 1) * dd->num_pports; 3539 (qib_n_krcv_queues - 1) * dd->num_pports;
3485 if (dd->first_user_ctxt > nchipctxts) 3540 if (dd->first_user_ctxt > nchipctxts)
@@ -3530,8 +3585,11 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
3530 3585
3531 /* kr_rcvegrcnt changes based on the number of contexts enabled */ 3586 /* kr_rcvegrcnt changes based on the number of contexts enabled */
3532 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); 3587 dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3533 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, 3588 if (qib_rcvhdrcnt)
3534 dd->num_pports > 1 ? 1024U : 2048U); 3589 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3590 else
3591 dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
3592 dd->num_pports > 1 ? 1024U : 2048U);
3535} 3593}
3536 3594
3537static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) 3595static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@ -4002,8 +4060,14 @@ static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4002} 4060}
4003 4061
4004static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, 4062static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4005 u32 updegr, u32 egrhd) 4063 u32 updegr, u32 egrhd, u32 npkts)
4006{ 4064{
4065 /*
4066 * Need to write timeout register before updating rcvhdrhead to ensure
4067 * that the timer is enabled on reception of a packet.
4068 */
4069 if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4070 adjust_rcv_timeout(rcd, npkts);
4007 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 4071 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4008 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); 4072 qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4009 if (updegr) 4073 if (updegr)
@@ -5522,7 +5586,7 @@ static void qsfp_7322_event(struct work_struct *work)
5522 u64 now = get_jiffies_64(); 5586 u64 now = get_jiffies_64();
5523 if (time_after64(now, pwrup)) 5587 if (time_after64(now, pwrup))
5524 break; 5588 break;
5525 msleep(1); 5589 msleep(20);
5526 } 5590 }
5527 ret = qib_refresh_qsfp_cache(ppd, &qd->cache); 5591 ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5528 /* 5592 /*
@@ -5579,6 +5643,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5579 u32 pidx, unit, port, deflt, h1; 5643 u32 pidx, unit, port, deflt, h1;
5580 unsigned long val; 5644 unsigned long val;
5581 int any = 0, seth1; 5645 int any = 0, seth1;
5646 int txdds_size;
5582 5647
5583 str = txselect_list; 5648 str = txselect_list;
5584 5649
@@ -5587,6 +5652,10 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5587 for (pidx = 0; pidx < dd->num_pports; ++pidx) 5652 for (pidx = 0; pidx < dd->num_pports; ++pidx)
5588 dd->pport[pidx].cpspec->no_eep = deflt; 5653 dd->pport[pidx].cpspec->no_eep = deflt;
5589 5654
5655 txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
5656 if (IS_QME(dd) || IS_QMH(dd))
5657 txdds_size += TXDDS_MFG_SZ;
5658
5590 while (*nxt && nxt[1]) { 5659 while (*nxt && nxt[1]) {
5591 str = ++nxt; 5660 str = ++nxt;
5592 unit = simple_strtoul(str, &nxt, 0); 5661 unit = simple_strtoul(str, &nxt, 0);
@@ -5609,7 +5678,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
5609 ; 5678 ;
5610 continue; 5679 continue;
5611 } 5680 }
5612 if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ) 5681 if (val >= txdds_size)
5613 continue; 5682 continue;
5614 seth1 = 0; 5683 seth1 = 0;
5615 h1 = 0; /* gcc thinks it might be used uninitted */ 5684 h1 = 0; /* gcc thinks it might be used uninitted */
@@ -5661,10 +5730,11 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
5661 return -ENOSPC; 5730 return -ENOSPC;
5662 } 5731 }
5663 val = simple_strtoul(str, &n, 0); 5732 val = simple_strtoul(str, &n, 0);
5664 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { 5733 if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
5734 TXDDS_MFG_SZ)) {
5665 printk(KERN_INFO QIB_DRV_NAME 5735 printk(KERN_INFO QIB_DRV_NAME
5666 "txselect_values must start with a number < %d\n", 5736 "txselect_values must start with a number < %d\n",
5667 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); 5737 TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
5668 return -EINVAL; 5738 return -EINVAL;
5669 } 5739 }
5670 strcpy(txselect_list, str); 5740 strcpy(txselect_list, str);
@@ -5810,7 +5880,8 @@ static void write_7322_initregs(struct qib_devdata *dd)
5810 unsigned n, regno; 5880 unsigned n, regno;
5811 unsigned long flags; 5881 unsigned long flags;
5812 5882
5813 if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported) 5883 if (dd->n_krcv_queues < 2 ||
5884 !dd->pport[pidx].link_speed_supported)
5814 continue; 5885 continue;
5815 5886
5816 ppd = &dd->pport[pidx]; 5887 ppd = &dd->pport[pidx];
@@ -6097,8 +6168,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6097 ppd++; 6168 ppd++;
6098 } 6169 }
6099 6170
6100 dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; 6171 dd->rcvhdrentsize = qib_rcvhdrentsize ?
6101 dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; 6172 qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6173 dd->rcvhdrsize = qib_rcvhdrsize ?
6174 qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6102 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); 6175 dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6103 6176
6104 /* we always allocate at least 2048 bytes for eager buffers */ 6177 /* we always allocate at least 2048 bytes for eager buffers */
@@ -6495,7 +6568,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
6495 /* make sure we see an updated copy next time around */ 6568 /* make sure we see an updated copy next time around */
6496 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); 6569 sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
6497 sleeps++; 6570 sleeps++;
6498 msleep(1); 6571 msleep(20);
6499 } 6572 }
6500 6573
6501 switch (which) { 6574 switch (which) {
@@ -6993,6 +7066,12 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
6993 { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ 7066 { 0, 1, 0, 12 }, /* QMH7342 backplane settings */
6994}; 7067};
6995 7068
7069static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7070 /* amp, pre, main, post */
7071 { 0, 0, 0, 0 }, /* QME7342 mfg settings */
7072 { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */
7073};
7074
6996static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, 7075static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
6997 unsigned atten) 7076 unsigned atten)
6998{ 7077{
@@ -7066,6 +7145,16 @@ static void find_best_ent(struct qib_pportdata *ppd,
7066 *sdr_dds = &txdds_extra_sdr[idx]; 7145 *sdr_dds = &txdds_extra_sdr[idx];
7067 *ddr_dds = &txdds_extra_ddr[idx]; 7146 *ddr_dds = &txdds_extra_ddr[idx];
7068 *qdr_dds = &txdds_extra_qdr[idx]; 7147 *qdr_dds = &txdds_extra_qdr[idx];
7148 } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7149 ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7150 TXDDS_MFG_SZ)) {
7151 idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7152 printk(KERN_INFO QIB_DRV_NAME
7153 " IB%u:%u use idx %u into txdds_mfg\n",
7154 ppd->dd->unit, ppd->port, idx);
7155 *sdr_dds = &txdds_extra_mfg[idx];
7156 *ddr_dds = &txdds_extra_mfg[idx];
7157 *qdr_dds = &txdds_extra_mfg[idx];
7069 } else { 7158 } else {
7070 /* this shouldn't happen, it's range checked */ 7159 /* this shouldn't happen, it's range checked */
7071 *sdr_dds = txdds_sdr + qib_long_atten; 7160 *sdr_dds = txdds_sdr + qib_long_atten;
@@ -7210,9 +7299,30 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7210 } 7299 }
7211} 7300}
7212 7301
7302static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7303{
7304 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7305 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
7306 ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
7307 if (enable)
7308 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7309 else
7310 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7311 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7312}
7313
7213static int serdes_7322_init(struct qib_pportdata *ppd) 7314static int serdes_7322_init(struct qib_pportdata *ppd)
7214{ 7315{
7215 u64 data; 7316 int ret = 0;
7317 if (ppd->dd->cspec->r1)
7318 ret = serdes_7322_init_old(ppd);
7319 else
7320 ret = serdes_7322_init_new(ppd);
7321 return ret;
7322}
7323
7324static int serdes_7322_init_old(struct qib_pportdata *ppd)
7325{
7216 u32 le_val; 7326 u32 le_val;
7217 7327
7218 /* 7328 /*
@@ -7270,11 +7380,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
7270 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ 7380 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7271 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ 7381 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7272 7382
7273 data = qib_read_kreg_port(ppd, krp_serdesctrl); 7383 serdes_7322_los_enable(ppd, 1);
7274 /* Turn off IB latency mode */
7275 data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
7276 qib_write_kreg_port(ppd, krp_serdesctrl, data |
7277 SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
7278 7384
7279 /* rxbistena; set 0 to avoid effects of it switch later */ 7385 /* rxbistena; set 0 to avoid effects of it switch later */
7280 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); 7386 ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
@@ -7314,6 +7420,205 @@ static int serdes_7322_init(struct qib_pportdata *ppd)
7314 return 0; 7420 return 0;
7315} 7421}
7316 7422
7423static int serdes_7322_init_new(struct qib_pportdata *ppd)
7424{
7425 u64 tstart;
7426 u32 le_val, rxcaldone;
7427 int chan, chan_done = (1 << SERDES_CHANS) - 1;
7428
7429 /*
7430 * Initialize the Tx DDS tables. Also done every QSFP event,
7431 * for adapters with QSFP
7432 */
7433 init_txdds_table(ppd, 0);
7434
7435 /* Clear cmode-override, may be set from older driver */
7436 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7437
7438 /* ensure no tx overrides from earlier driver loads */
7439 qib_write_kreg_port(ppd, krp_tx_deemph_override,
7440 SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7441 reset_tx_deemphasis_override));
7442
7443 /* START OF LSI SUGGESTED SERDES BRINGUP */
7444 /* Reset - Calibration Setup */
7445 /* Stop DFE adaptaion */
7446 ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7447 /* Disable LE1 */
7448 ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7449 /* Disable autoadapt for LE1 */
7450 ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7451 /* Disable LE2 */
7452 ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7453 /* Disable VGA */
7454 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7455 /* Disable AFE Offset Cancel */
7456 ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7457 /* Disable Timing Loop */
7458 ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7459 /* Disable Frequency Loop */
7460 ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7461 /* Disable Baseline Wander Correction */
7462 ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7463 /* Disable RX Calibration */
7464 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7465 /* Disable RX Offset Calibration */
7466 ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7467 /* Select BB CDR */
7468 ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7469 /* CDR Step Size */
7470 ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7471 /* Enable phase Calibration */
7472 ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7473 /* DFE Bandwidth [2:14-12] */
7474 ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7475 /* DFE Config (4 taps only) */
7476 ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7477 /* Gain Loop Bandwidth */
7478 if (!ppd->dd->cspec->r1) {
7479 ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
7480 ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
7481 } else {
7482 ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
7483 }
7484 /* Baseline Wander Correction Gain [13:4-0] (leave as default) */
7485 /* Baseline Wander Correction Gain [3:7-5] (leave as default) */
7486 /* Data Rate Select [5:7-6] (leave as default) */
7487 /* RX Parralel Word Width [3:10-8] (leave as default) */
7488
7489 /* RX REST */
7490 /* Single- or Multi-channel reset */
7491 /* RX Analog reset */
7492 /* RX Digital reset */
7493 ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
7494 msleep(20);
7495 /* RX Analog reset */
7496 ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
7497 msleep(20);
7498 /* RX Digital reset */
7499 ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
7500 msleep(20);
7501
7502 /* setup LoS params; these are subsystem, so chan == 5 */
7503 /* LoS filter threshold_count on, ch 0-3, set to 8 */
7504 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7505 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7506 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7507 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7508
7509 /* LoS filter threshold_count off, ch 0-3, set to 4 */
7510 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7511 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7512 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7513 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7514
7515 /* LoS filter select enabled */
7516 ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7517
7518 /* LoS target data: SDR=4, DDR=2, QDR=1 */
7519 ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7520 ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7521 ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7522
7523 /* Turn on LOS on initial SERDES init */
7524 serdes_7322_los_enable(ppd, 1);
7525 /* FLoop LOS gate: PPM filter enabled */
7526 ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7527
7528 /* RX LATCH CALIBRATION */
7529 /* Enable Eyefinder Phase Calibration latch */
7530 ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
7531 /* Enable RX Offset Calibration latch */
7532 ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
7533 msleep(20);
7534 /* Start Calibration */
7535 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
7536 tstart = get_jiffies_64();
7537 while (chan_done &&
7538 !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
7539 msleep(20);
7540 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7541 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7542 (chan + (chan >> 1)),
7543 25, 0, 0);
7544 if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
7545 (~chan_done & (1 << chan)) == 0)
7546 chan_done &= ~(1 << chan);
7547 }
7548 }
7549 if (chan_done) {
7550 printk(KERN_INFO QIB_DRV_NAME
7551 " Serdes %d calibration not done after .5 sec: 0x%x\n",
7552 IBSD(ppd->hw_pidx), chan_done);
7553 } else {
7554 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7555 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
7556 (chan + (chan >> 1)),
7557 25, 0, 0);
7558 if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
7559 printk(KERN_INFO QIB_DRV_NAME
7560 " Serdes %d chan %d calibration "
7561 "failed\n", IBSD(ppd->hw_pidx), chan);
7562 }
7563 }
7564
7565 /* Turn off Calibration */
7566 ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7567 msleep(20);
7568
7569 /* BRING RX UP */
7570 /* Set LE2 value (May be overridden in qsfp_7322_event) */
7571 le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7572 ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7573 /* Set LE2 Loop bandwidth */
7574 ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
7575 /* Enable LE2 */
7576 ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
7577 msleep(20);
7578 /* Enable H0 only */
7579 ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
7580 /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7581 le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7582 ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7583 /* Enable VGA */
7584 ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7585 msleep(20);
7586 /* Set Frequency Loop Bandwidth */
7587 ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
7588 /* Enable Frequency Loop */
7589 ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
7590 /* Set Timing Loop Bandwidth */
7591 ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7592 /* Enable Timing Loop */
7593 ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
7594 msleep(50);
7595 /* Enable DFE
7596 * Set receive adaptation mode. SDR and DDR adaptation are
7597 * always on, and QDR is initially enabled; later disabled.
7598 */
7599 qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7600 qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7601 qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7602 ppd->dd->cspec->r1 ?
7603 QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7604 ppd->cpspec->qdr_dfe_on = 1;
7605 /* Disable LE1 */
7606 ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
7607 /* Disable auto adapt for LE1 */
7608 ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
7609 msleep(20);
7610 /* Enable AFE Offset Cancel */
7611 ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
7612 /* Enable Baseline Wander Correction */
7613 ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
7614 /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7615 ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7616 /* VGA output common mode */
7617 ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
7618
7619 return 0;
7620}
7621
7317/* start adjust QMH serdes parameters */ 7622/* start adjust QMH serdes parameters */
7318 7623
7319static void set_man_code(struct qib_pportdata *ppd, int chan, int code) 7624static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f3b503936043..7896afbb9ce8 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -92,9 +92,11 @@ unsigned long *qib_cpulist;
92/* set number of contexts we'll actually use */ 92/* set number of contexts we'll actually use */
93void qib_set_ctxtcnt(struct qib_devdata *dd) 93void qib_set_ctxtcnt(struct qib_devdata *dd)
94{ 94{
95 if (!qib_cfgctxts) 95 if (!qib_cfgctxts) {
96 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); 96 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
97 else if (qib_cfgctxts < dd->num_pports) 97 if (dd->cfgctxts > dd->ctxtcnt)
98 dd->cfgctxts = dd->ctxtcnt;
99 } else if (qib_cfgctxts < dd->num_pports)
98 dd->cfgctxts = dd->ctxtcnt; 100 dd->cfgctxts = dd->ctxtcnt;
99 else if (qib_cfgctxts <= dd->ctxtcnt) 101 else if (qib_cfgctxts <= dd->ctxtcnt)
100 dd->cfgctxts = qib_cfgctxts; 102 dd->cfgctxts = qib_cfgctxts;
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 54a40828a106..a693c56ec8a6 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -131,7 +131,8 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
131 /* start a 75msec timer to clear symbol errors */ 131 /* start a 75msec timer to clear symbol errors */
132 mod_timer(&ppd->symerr_clear_timer, 132 mod_timer(&ppd->symerr_clear_timer,
133 msecs_to_jiffies(75)); 133 msecs_to_jiffies(75));
134 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) { 134 } else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
135 !(ppd->lflags & QIBL_LINKACTIVE)) {
135 /* active, but not active defered */ 136 /* active, but not active defered */
136 qib_hol_up(ppd); /* useful only for 6120 now */ 137 qib_hol_up(ppd); /* useful only for 6120 now */
137 *ppd->statusp |= 138 *ppd->statusp |=
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 4b80eb153d57..8fd19a47df0c 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
136 struct qib_mregion *mr; 136 struct qib_mregion *mr;
137 unsigned n, m; 137 unsigned n, m;
138 size_t off; 138 size_t off;
139 int ret = 0;
140 unsigned long flags; 139 unsigned long flags;
141 140
142 /* 141 /*
@@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
152 if (!dev->dma_mr) 151 if (!dev->dma_mr)
153 goto bail; 152 goto bail;
154 atomic_inc(&dev->dma_mr->refcount); 153 atomic_inc(&dev->dma_mr->refcount);
154 spin_unlock_irqrestore(&rkt->lock, flags);
155
155 isge->mr = dev->dma_mr; 156 isge->mr = dev->dma_mr;
156 isge->vaddr = (void *) sge->addr; 157 isge->vaddr = (void *) sge->addr;
157 isge->length = sge->length; 158 isge->length = sge->length;
@@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
170 off + sge->length > mr->length || 171 off + sge->length > mr->length ||
171 (mr->access_flags & acc) != acc)) 172 (mr->access_flags & acc) != acc))
172 goto bail; 173 goto bail;
174 atomic_inc(&mr->refcount);
175 spin_unlock_irqrestore(&rkt->lock, flags);
173 176
174 off += mr->offset; 177 off += mr->offset;
175 m = 0; 178 if (mr->page_shift) {
176 n = 0; 179 /*
177 while (off >= mr->map[m]->segs[n].length) { 180 page sizes are uniform power of 2 so no loop is necessary
178 off -= mr->map[m]->segs[n].length; 181 entries_spanned_by_off is the number of times the loop below
179 n++; 182 would have executed.
180 if (n >= QIB_SEGSZ) { 183 */
181 m++; 184 size_t entries_spanned_by_off;
182 n = 0; 185
186 entries_spanned_by_off = off >> mr->page_shift;
187 off -= (entries_spanned_by_off << mr->page_shift);
188 m = entries_spanned_by_off/QIB_SEGSZ;
189 n = entries_spanned_by_off%QIB_SEGSZ;
190 } else {
191 m = 0;
192 n = 0;
193 while (off >= mr->map[m]->segs[n].length) {
194 off -= mr->map[m]->segs[n].length;
195 n++;
196 if (n >= QIB_SEGSZ) {
197 m++;
198 n = 0;
199 }
183 } 200 }
184 } 201 }
185 atomic_inc(&mr->refcount);
186 isge->mr = mr; 202 isge->mr = mr;
187 isge->vaddr = mr->map[m]->segs[n].vaddr + off; 203 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
188 isge->length = mr->map[m]->segs[n].length - off; 204 isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
190 isge->m = m; 206 isge->m = m;
191 isge->n = n; 207 isge->n = n;
192ok: 208ok:
193 ret = 1; 209 return 1;
194bail: 210bail:
195 spin_unlock_irqrestore(&rkt->lock, flags); 211 spin_unlock_irqrestore(&rkt->lock, flags);
196 return ret; 212 return 0;
197} 213}
198 214
199/** 215/**
@@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
214 struct qib_mregion *mr; 230 struct qib_mregion *mr;
215 unsigned n, m; 231 unsigned n, m;
216 size_t off; 232 size_t off;
217 int ret = 0;
218 unsigned long flags; 233 unsigned long flags;
219 234
220 /* 235 /*
@@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
231 if (!dev->dma_mr) 246 if (!dev->dma_mr)
232 goto bail; 247 goto bail;
233 atomic_inc(&dev->dma_mr->refcount); 248 atomic_inc(&dev->dma_mr->refcount);
249 spin_unlock_irqrestore(&rkt->lock, flags);
250
234 sge->mr = dev->dma_mr; 251 sge->mr = dev->dma_mr;
235 sge->vaddr = (void *) vaddr; 252 sge->vaddr = (void *) vaddr;
236 sge->length = len; 253 sge->length = len;
@@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
248 if (unlikely(vaddr < mr->iova || off + len > mr->length || 265 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
249 (mr->access_flags & acc) == 0)) 266 (mr->access_flags & acc) == 0))
250 goto bail; 267 goto bail;
268 atomic_inc(&mr->refcount);
269 spin_unlock_irqrestore(&rkt->lock, flags);
251 270
252 off += mr->offset; 271 off += mr->offset;
253 m = 0; 272 if (mr->page_shift) {
254 n = 0; 273 /*
255 while (off >= mr->map[m]->segs[n].length) { 274 page sizes are uniform power of 2 so no loop is necessary
256 off -= mr->map[m]->segs[n].length; 275 entries_spanned_by_off is the number of times the loop below
257 n++; 276 would have executed.
258 if (n >= QIB_SEGSZ) { 277 */
259 m++; 278 size_t entries_spanned_by_off;
260 n = 0; 279
280 entries_spanned_by_off = off >> mr->page_shift;
281 off -= (entries_spanned_by_off << mr->page_shift);
282 m = entries_spanned_by_off/QIB_SEGSZ;
283 n = entries_spanned_by_off%QIB_SEGSZ;
284 } else {
285 m = 0;
286 n = 0;
287 while (off >= mr->map[m]->segs[n].length) {
288 off -= mr->map[m]->segs[n].length;
289 n++;
290 if (n >= QIB_SEGSZ) {
291 m++;
292 n = 0;
293 }
261 } 294 }
262 } 295 }
263 atomic_inc(&mr->refcount);
264 sge->mr = mr; 296 sge->mr = mr;
265 sge->vaddr = mr->map[m]->segs[n].vaddr + off; 297 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
266 sge->length = mr->map[m]->segs[n].length - off; 298 sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
268 sge->m = m; 300 sge->m = m;
269 sge->n = n; 301 sge->n = n;
270ok: 302ok:
271 ret = 1; 303 return 1;
272bail: 304bail:
273 spin_unlock_irqrestore(&rkt->lock, flags); 305 spin_unlock_irqrestore(&rkt->lock, flags);
274 return ret; 306 return 0;
275} 307}
276 308
277/* 309/*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 94b0d1f3a8f0..5ad224e4a38b 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -668,8 +668,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
668 lid = be16_to_cpu(pip->lid); 668 lid = be16_to_cpu(pip->lid);
669 /* Must be a valid unicast LID address. */ 669 /* Must be a valid unicast LID address. */
670 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) 670 if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
671 goto err; 671 smp->status |= IB_SMP_INVALID_FIELD;
672 if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { 672 else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
673 if (ppd->lid != lid) 673 if (ppd->lid != lid)
674 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); 674 qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
675 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) 675 if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
@@ -683,8 +683,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
683 msl = pip->neighbormtu_mastersmsl & 0xF; 683 msl = pip->neighbormtu_mastersmsl & 0xF;
684 /* Must be a valid unicast LID address. */ 684 /* Must be a valid unicast LID address. */
685 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) 685 if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
686 goto err; 686 smp->status |= IB_SMP_INVALID_FIELD;
687 if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { 687 else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
688 spin_lock_irqsave(&ibp->lock, flags); 688 spin_lock_irqsave(&ibp->lock, flags);
689 if (ibp->sm_ah) { 689 if (ibp->sm_ah) {
690 if (smlid != ibp->sm_lid) 690 if (smlid != ibp->sm_lid)
@@ -707,8 +707,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
707 if (lwe == 0xFF) 707 if (lwe == 0xFF)
708 lwe = ppd->link_width_supported; 708 lwe = ppd->link_width_supported;
709 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) 709 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
710 goto err; 710 smp->status |= IB_SMP_INVALID_FIELD;
711 set_link_width_enabled(ppd, lwe); 711 else if (lwe != ppd->link_width_enabled)
712 set_link_width_enabled(ppd, lwe);
712 } 713 }
713 714
714 lse = pip->linkspeedactive_enabled & 0xF; 715 lse = pip->linkspeedactive_enabled & 0xF;
@@ -721,8 +722,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
721 if (lse == 15) 722 if (lse == 15)
722 lse = ppd->link_speed_supported; 723 lse = ppd->link_speed_supported;
723 else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) 724 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
724 goto err; 725 smp->status |= IB_SMP_INVALID_FIELD;
725 set_link_speed_enabled(ppd, lse); 726 else if (lse != ppd->link_speed_enabled)
727 set_link_speed_enabled(ppd, lse);
726 } 728 }
727 729
728 /* Set link down default state. */ 730 /* Set link down default state. */
@@ -738,7 +740,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
738 IB_LINKINITCMD_POLL); 740 IB_LINKINITCMD_POLL);
739 break; 741 break;
740 default: 742 default:
741 goto err; 743 smp->status |= IB_SMP_INVALID_FIELD;
742 } 744 }
743 745
744 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; 746 ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
@@ -748,15 +750,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
748 750
749 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); 751 mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
750 if (mtu == -1) 752 if (mtu == -1)
751 goto err; 753 smp->status |= IB_SMP_INVALID_FIELD;
752 qib_set_mtu(ppd, mtu); 754 else
755 qib_set_mtu(ppd, mtu);
753 756
754 /* Set operational VLs */ 757 /* Set operational VLs */
755 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; 758 vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
756 if (vls) { 759 if (vls) {
757 if (vls > ppd->vls_supported) 760 if (vls > ppd->vls_supported)
758 goto err; 761 smp->status |= IB_SMP_INVALID_FIELD;
759 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); 762 else
763 (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
760 } 764 }
761 765
762 if (pip->mkey_violations == 0) 766 if (pip->mkey_violations == 0)
@@ -770,10 +774,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
770 774
771 ore = pip->localphyerrors_overrunerrors; 775 ore = pip->localphyerrors_overrunerrors;
772 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) 776 if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
773 goto err; 777 smp->status |= IB_SMP_INVALID_FIELD;
774 778
775 if (set_overrunthreshold(ppd, (ore & 0xF))) 779 if (set_overrunthreshold(ppd, (ore & 0xF)))
776 goto err; 780 smp->status |= IB_SMP_INVALID_FIELD;
777 781
778 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; 782 ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
779 783
@@ -792,7 +796,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
792 state = pip->linkspeed_portstate & 0xF; 796 state = pip->linkspeed_portstate & 0xF;
793 lstate = (pip->portphysstate_linkdown >> 4) & 0xF; 797 lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
794 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) 798 if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
795 goto err; 799 smp->status |= IB_SMP_INVALID_FIELD;
796 800
797 /* 801 /*
798 * Only state changes of DOWN, ARM, and ACTIVE are valid 802 * Only state changes of DOWN, ARM, and ACTIVE are valid
@@ -812,8 +816,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
812 lstate = QIB_IB_LINKDOWN; 816 lstate = QIB_IB_LINKDOWN;
813 else if (lstate == 3) 817 else if (lstate == 3)
814 lstate = QIB_IB_LINKDOWN_DISABLE; 818 lstate = QIB_IB_LINKDOWN_DISABLE;
815 else 819 else {
816 goto err; 820 smp->status |= IB_SMP_INVALID_FIELD;
821 break;
822 }
817 spin_lock_irqsave(&ppd->lflags_lock, flags); 823 spin_lock_irqsave(&ppd->lflags_lock, flags);
818 ppd->lflags &= ~QIBL_LINKV; 824 ppd->lflags &= ~QIBL_LINKV;
819 spin_unlock_irqrestore(&ppd->lflags_lock, flags); 825 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
@@ -835,8 +841,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
835 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); 841 qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
836 break; 842 break;
837 default: 843 default:
838 /* XXX We have already partially updated our state! */ 844 smp->status |= IB_SMP_INVALID_FIELD;
839 goto err;
840 } 845 }
841 846
842 ret = subn_get_portinfo(smp, ibdev, port); 847 ret = subn_get_portinfo(smp, ibdev, port);
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 5f95f0f6385d..08944e2ee334 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -39,7 +39,6 @@
39/* Fast memory region */ 39/* Fast memory region */
40struct qib_fmr { 40struct qib_fmr {
41 struct ib_fmr ibfmr; 41 struct ib_fmr ibfmr;
42 u8 page_shift;
43 struct qib_mregion mr; /* must be last */ 42 struct qib_mregion mr; /* must be last */
44}; 43};
45 44
@@ -107,6 +106,7 @@ static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
107 goto bail; 106 goto bail;
108 } 107 }
109 mr->mr.mapsz = m; 108 mr->mr.mapsz = m;
109 mr->mr.page_shift = 0;
110 mr->mr.max_segs = count; 110 mr->mr.max_segs = count;
111 111
112 /* 112 /*
@@ -231,6 +231,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
231 mr->mr.access_flags = mr_access_flags; 231 mr->mr.access_flags = mr_access_flags;
232 mr->umem = umem; 232 mr->umem = umem;
233 233
234 if (is_power_of_2(umem->page_size))
235 mr->mr.page_shift = ilog2(umem->page_size);
234 m = 0; 236 m = 0;
235 n = 0; 237 n = 0;
236 list_for_each_entry(chunk, &umem->chunk_list, list) { 238 list_for_each_entry(chunk, &umem->chunk_list, list) {
@@ -390,7 +392,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
390 fmr->mr.offset = 0; 392 fmr->mr.offset = 0;
391 fmr->mr.access_flags = mr_access_flags; 393 fmr->mr.access_flags = mr_access_flags;
392 fmr->mr.max_segs = fmr_attr->max_pages; 394 fmr->mr.max_segs = fmr_attr->max_pages;
393 fmr->page_shift = fmr_attr->page_shift; 395 fmr->mr.page_shift = fmr_attr->page_shift;
394 396
395 atomic_set(&fmr->mr.refcount, 0); 397 atomic_set(&fmr->mr.refcount, 0);
396 ret = &fmr->ibfmr; 398 ret = &fmr->ibfmr;
@@ -437,7 +439,7 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
437 spin_lock_irqsave(&rkt->lock, flags); 439 spin_lock_irqsave(&rkt->lock, flags);
438 fmr->mr.user_base = iova; 440 fmr->mr.user_base = iova;
439 fmr->mr.iova = iova; 441 fmr->mr.iova = iova;
440 ps = 1 << fmr->page_shift; 442 ps = 1 << fmr->mr.page_shift;
441 fmr->mr.length = list_len * ps; 443 fmr->mr.length = list_len * ps;
442 m = 0; 444 m = 0;
443 n = 0; 445 n = 0;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6c39851d2ded..e16751f8639e 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
48 48
49static inline unsigned find_next_offset(struct qib_qpn_table *qpt, 49static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
50 struct qpn_map *map, unsigned off, 50 struct qpn_map *map, unsigned off,
51 unsigned r) 51 unsigned n)
52{ 52{
53 if (qpt->mask) { 53 if (qpt->mask) {
54 off++; 54 off++;
55 if ((off & qpt->mask) >> 1 != r) 55 if (((off & qpt->mask) >> 1) >= n)
56 off = ((off & qpt->mask) ? 56 off = (off | qpt->mask) + 2;
57 (off | qpt->mask) + 1 : off) | (r << 1);
58 } else 57 } else
59 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); 58 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60 return off; 59 return off;
@@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
123 u32 i, offset, max_scan, qpn; 122 u32 i, offset, max_scan, qpn;
124 struct qpn_map *map; 123 struct qpn_map *map;
125 u32 ret; 124 u32 ret;
126 int r;
127 125
128 if (type == IB_QPT_SMI || type == IB_QPT_GSI) { 126 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
129 unsigned n; 127 unsigned n;
@@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
139 goto bail; 137 goto bail;
140 } 138 }
141 139
142 r = smp_processor_id(); 140 qpn = qpt->last + 2;
143 if (r >= dd->n_krcv_queues)
144 r %= dd->n_krcv_queues;
145 qpn = qpt->last + 1;
146 if (qpn >= QPN_MAX) 141 if (qpn >= QPN_MAX)
147 qpn = 2; 142 qpn = 2;
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) != r) 143 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
149 qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) | 144 qpn = (qpn | qpt->mask) + 2;
150 (r << 1);
151 offset = qpn & BITS_PER_PAGE_MASK; 145 offset = qpn & BITS_PER_PAGE_MASK;
152 map = &qpt->map[qpn / BITS_PER_PAGE]; 146 map = &qpt->map[qpn / BITS_PER_PAGE];
153 max_scan = qpt->nmaps - !offset; 147 max_scan = qpt->nmaps - !offset;
@@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
163 ret = qpn; 157 ret = qpn;
164 goto bail; 158 goto bail;
165 } 159 }
166 offset = find_next_offset(qpt, map, offset, r); 160 offset = find_next_offset(qpt, map, offset,
161 dd->n_krcv_queues);
167 qpn = mk_qpn(qpt, map, offset); 162 qpn = mk_qpn(qpt, map, offset);
168 /* 163 /*
169 * This test differs from alloc_pidmap(). 164 * This test differs from alloc_pidmap().
@@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
183 if (qpt->nmaps == QPNMAP_ENTRIES) 178 if (qpt->nmaps == QPNMAP_ENTRIES)
184 break; 179 break;
185 map = &qpt->map[qpt->nmaps++]; 180 map = &qpt->map[qpt->nmaps++];
186 offset = qpt->mask ? (r << 1) : 0; 181 offset = 0;
187 } else if (map < &qpt->map[qpt->nmaps]) { 182 } else if (map < &qpt->map[qpt->nmaps]) {
188 ++map; 183 ++map;
189 offset = qpt->mask ? (r << 1) : 0; 184 offset = 0;
190 } else { 185 } else {
191 map = &qpt->map[0]; 186 map = &qpt->map[0];
192 offset = qpt->mask ? (r << 1) : 2; 187 offset = 2;
193 } 188 }
194 qpn = mk_qpn(qpt, map, offset); 189 qpn = mk_qpn(qpt, map, offset);
195 } 190 }
@@ -468,6 +463,10 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
468 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); 463 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
469 del_timer(&qp->s_timer); 464 del_timer(&qp->s_timer);
470 } 465 }
466
467 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
468 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
469
471 spin_lock(&dev->pending_lock); 470 spin_lock(&dev->pending_lock);
472 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { 471 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
473 qp->s_flags &= ~QIB_S_ANY_WAIT_IO; 472 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
@@ -1061,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
1061 } 1060 }
1062 qp->ibqp.qp_num = err; 1061 qp->ibqp.qp_num = err;
1063 qp->port_num = init_attr->port_num; 1062 qp->port_num = init_attr->port_num;
1064 qp->processor_id = smp_processor_id();
1065 qib_reset_qp(qp, init_attr->qp_type); 1063 qib_reset_qp(qp, init_attr->qp_type);
1066 break; 1064 break;
1067 1065
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 955fb7157793..8245237b67ce 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1407,6 +1407,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1407 struct qib_ctxtdata *rcd) 1407 struct qib_ctxtdata *rcd)
1408{ 1408{
1409 struct qib_swqe *wqe; 1409 struct qib_swqe *wqe;
1410 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1410 enum ib_wc_status status; 1411 enum ib_wc_status status;
1411 unsigned long flags; 1412 unsigned long flags;
1412 int diff; 1413 int diff;
@@ -1414,6 +1415,29 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1414 u32 aeth; 1415 u32 aeth;
1415 u64 val; 1416 u64 val;
1416 1417
1418 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1419 /*
1420 * If ACK'd PSN on SDMA busy list try to make progress to
1421 * reclaim SDMA credits.
1422 */
1423 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1424 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1425
1426 /*
1427 * If send tasklet not running attempt to progress
1428 * SDMA queue.
1429 */
1430 if (!(qp->s_flags & QIB_S_BUSY)) {
1431 /* Acquire SDMA Lock */
1432 spin_lock_irqsave(&ppd->sdma_lock, flags);
1433 /* Invoke sdma make progress */
1434 qib_sdma_make_progress(ppd);
1435 /* Release SDMA Lock */
1436 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1437 }
1438 }
1439 }
1440
1417 spin_lock_irqsave(&qp->s_lock, flags); 1441 spin_lock_irqsave(&qp->s_lock, flags);
1418 1442
1419 /* Ignore invalid responses. */ 1443 /* Ignore invalid responses. */
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index e1b3da2a1f85..4a51fd1e9cb7 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
445 qkey = be32_to_cpu(ohdr->u.ud.deth[0]); 445 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
446 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; 446 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
447 447
448 /* Get the number of bytes the message was padded by. */ 448 /*
449 * Get the number of bytes the message was padded by
450 * and drop incomplete packets.
451 */
449 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 452 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
450 if (unlikely(tlen < (hdrsize + pad + 4))) { 453 if (unlikely(tlen < (hdrsize + pad + 4)))
451 /* Drop incomplete packets. */ 454 goto drop;
452 ibp->n_pkt_drops++; 455
453 goto bail;
454 }
455 tlen -= hdrsize + pad + 4; 456 tlen -= hdrsize + pad + 4;
456 457
457 /* 458 /*
@@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
460 */ 461 */
461 if (qp->ibqp.qp_num) { 462 if (qp->ibqp.qp_num) {
462 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || 463 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
463 hdr->lrh[3] == IB_LID_PERMISSIVE)) { 464 hdr->lrh[3] == IB_LID_PERMISSIVE))
464 ibp->n_pkt_drops++; 465 goto drop;
465 goto bail;
466 }
467 if (qp->ibqp.qp_num > 1) { 466 if (qp->ibqp.qp_num > 1) {
468 u16 pkey1, pkey2; 467 u16 pkey1, pkey2;
469 468
@@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
476 0xF, 475 0xF,
477 src_qp, qp->ibqp.qp_num, 476 src_qp, qp->ibqp.qp_num,
478 hdr->lrh[3], hdr->lrh[1]); 477 hdr->lrh[3], hdr->lrh[1]);
479 goto bail; 478 return;
480 } 479 }
481 } 480 }
482 if (unlikely(qkey != qp->qkey)) { 481 if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
484 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, 483 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
485 src_qp, qp->ibqp.qp_num, 484 src_qp, qp->ibqp.qp_num,
486 hdr->lrh[3], hdr->lrh[1]); 485 hdr->lrh[3], hdr->lrh[1]);
487 goto bail; 486 return;
488 } 487 }
489 /* Drop invalid MAD packets (see 13.5.3.1). */ 488 /* Drop invalid MAD packets (see 13.5.3.1). */
490 if (unlikely(qp->ibqp.qp_num == 1 && 489 if (unlikely(qp->ibqp.qp_num == 1 &&
491 (tlen != 256 || 490 (tlen != 256 ||
492 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) { 491 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
493 ibp->n_pkt_drops++; 492 goto drop;
494 goto bail;
495 }
496 } else { 493 } else {
497 struct ib_smp *smp; 494 struct ib_smp *smp;
498 495
499 /* Drop invalid MAD packets (see 13.5.3.1). */ 496 /* Drop invalid MAD packets (see 13.5.3.1). */
500 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) { 497 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
501 ibp->n_pkt_drops++; 498 goto drop;
502 goto bail;
503 }
504 smp = (struct ib_smp *) data; 499 smp = (struct ib_smp *) data;
505 if ((hdr->lrh[1] == IB_LID_PERMISSIVE || 500 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
506 hdr->lrh[3] == IB_LID_PERMISSIVE) && 501 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
507 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 502 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
508 ibp->n_pkt_drops++; 503 goto drop;
509 goto bail;
510 }
511 } 504 }
512 505
513 /* 506 /*
@@ -519,14 +512,12 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
519 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { 512 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
520 wc.ex.imm_data = ohdr->u.ud.imm_data; 513 wc.ex.imm_data = ohdr->u.ud.imm_data;
521 wc.wc_flags = IB_WC_WITH_IMM; 514 wc.wc_flags = IB_WC_WITH_IMM;
522 hdrsize += sizeof(u32); 515 tlen -= sizeof(u32);
523 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { 516 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
524 wc.ex.imm_data = 0; 517 wc.ex.imm_data = 0;
525 wc.wc_flags = 0; 518 wc.wc_flags = 0;
526 } else { 519 } else
527 ibp->n_pkt_drops++; 520 goto drop;
528 goto bail;
529 }
530 521
531 /* 522 /*
532 * A GRH is expected to preceed the data even if not 523 * A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
556 /* Silently drop packets which are too big. */ 547 /* Silently drop packets which are too big. */
557 if (unlikely(wc.byte_len > qp->r_len)) { 548 if (unlikely(wc.byte_len > qp->r_len)) {
558 qp->r_flags |= QIB_R_REUSE_SGE; 549 qp->r_flags |= QIB_R_REUSE_SGE;
559 ibp->n_pkt_drops++; 550 goto drop;
560 return;
561 } 551 }
562 if (has_grh) { 552 if (has_grh) {
563 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, 553 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
594 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 584 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
595 (ohdr->bth[0] & 585 (ohdr->bth[0] &
596 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 586 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
597bail:; 587 return;
588
589drop:
590 ibp->n_pkt_drops++;
598} 591}
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 4c19e06b5e85..66208bcd7c13 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -382,6 +382,7 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
382 382
383 kmem_cache_free(pq->pkt_slab, pkt); 383 kmem_cache_free(pq->pkt_slab, pkt);
384 } 384 }
385 INIT_LIST_HEAD(list);
385} 386}
386 387
387/* 388/*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index bd57c1273225..63b22a9a7feb 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -301,6 +301,7 @@ struct qib_mregion {
301 int access_flags; 301 int access_flags;
302 u32 max_segs; /* number of qib_segs in all the arrays */ 302 u32 max_segs; /* number of qib_segs in all the arrays */
303 u32 mapsz; /* size of the map array */ 303 u32 mapsz; /* size of the map array */
304 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
304 atomic_t refcount; 305 atomic_t refcount;
305 struct qib_segarray *map[0]; /* the segments */ 306 struct qib_segarray *map[0]; /* the segments */
306}; 307};
@@ -435,7 +436,6 @@ struct qib_qp {
435 spinlock_t r_lock; /* used for APM */ 436 spinlock_t r_lock; /* used for APM */
436 spinlock_t s_lock; 437 spinlock_t s_lock;
437 atomic_t s_dma_busy; 438 atomic_t s_dma_busy;
438 unsigned processor_id; /* Processor ID QP is bound to */
439 u32 s_flags; 439 u32 s_flags;
440 u32 s_cur_size; /* size of send packet in bytes */ 440 u32 s_cur_size; /* size of send packet in bytes */
441 u32 s_len; /* total length of s_sge */ 441 u32 s_len; /* total length of s_sge */
@@ -813,13 +813,8 @@ extern struct workqueue_struct *qib_cq_wq;
813 */ 813 */
814static inline void qib_schedule_send(struct qib_qp *qp) 814static inline void qib_schedule_send(struct qib_qp *qp)
815{ 815{
816 if (qib_send_ok(qp)) { 816 if (qib_send_ok(qp))
817 if (qp->processor_id == smp_processor_id()) 817 queue_work(qib_wq, &qp->s_work);
818 queue_work(qib_wq, &qp->s_work);
819 else
820 queue_work_on(qp->processor_id,
821 qib_wq, &qp->s_work);
822 }
823} 818}
824 819
825static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) 820static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 9d9a9dc51f18..55855eeabae7 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,7 +1,6 @@
1config INFINIBAND_IPOIB 1config INFINIBAND_IPOIB
2 tristate "IP-over-InfiniBand" 2 tristate "IP-over-InfiniBand"
3 depends on NETDEVICES && INET && (IPV6 || IPV6=n) 3 depends on NETDEVICES && INET && (IPV6 || IPV6=n)
4 select INET_LRO
5 ---help--- 4 ---help---
6 Support for the IP-over-InfiniBand protocol (IPoIB). This 5 Support for the IP-over-InfiniBand protocol (IPoIB). This
7 transports IP packets over InfiniBand so you can use your IB 6 transports IP packets over InfiniBand so you can use your IB
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 753a983a5fdc..ab97f92fc257 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -50,7 +50,7 @@
50#include <rdma/ib_verbs.h> 50#include <rdma/ib_verbs.h>
51#include <rdma/ib_pack.h> 51#include <rdma/ib_pack.h>
52#include <rdma/ib_sa.h> 52#include <rdma/ib_sa.h>
53#include <linux/inet_lro.h> 53#include <linux/sched.h>
54 54
55/* constants */ 55/* constants */
56 56
@@ -100,9 +100,6 @@ enum {
100 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ 100 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
101 IPOIB_MCAST_FLAG_ATTACHED = 3, 101 IPOIB_MCAST_FLAG_ATTACHED = 3,
102 102
103 IPOIB_MAX_LRO_DESCRIPTORS = 8,
104 IPOIB_LRO_MAX_AGGR = 64,
105
106 MAX_SEND_CQE = 16, 103 MAX_SEND_CQE = 16,
107 IPOIB_CM_COPYBREAK = 256, 104 IPOIB_CM_COPYBREAK = 256,
108}; 105};
@@ -262,11 +259,6 @@ struct ipoib_ethtool_st {
262 u16 max_coalesced_frames; 259 u16 max_coalesced_frames;
263}; 260};
264 261
265struct ipoib_lro {
266 struct net_lro_mgr lro_mgr;
267 struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
268};
269
270/* 262/*
271 * Device private locking: network stack tx_lock protects members used 263 * Device private locking: network stack tx_lock protects members used
272 * in TX fast path, lock protects everything else. lock nests inside 264 * in TX fast path, lock protects everything else. lock nests inside
@@ -352,8 +344,6 @@ struct ipoib_dev_priv {
352 int hca_caps; 344 int hca_caps;
353 struct ipoib_ethtool_st ethtool; 345 struct ipoib_ethtool_st ethtool;
354 struct timer_list poll_timer; 346 struct timer_list poll_timer;
355
356 struct ipoib_lro lro;
357}; 347};
358 348
359struct ipoib_ah { 349struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index bb1004114dec..c1c49f2d35b5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1480,6 +1480,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr,
1480 1480
1481 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) { 1481 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
1482 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 1482 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1483 priv->dev->features |= NETIF_F_GRO;
1483 if (priv->hca_caps & IB_DEVICE_UD_TSO) 1484 if (priv->hca_caps & IB_DEVICE_UD_TSO)
1484 dev->features |= NETIF_F_TSO; 1485 dev->features |= NETIF_F_TSO;
1485 } 1486 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1a1657c82edd..19f7f5206f78 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -106,63 +106,12 @@ static int ipoib_set_coalesce(struct net_device *dev,
106 return 0; 106 return 0;
107} 107}
108 108
109static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
110 "LRO aggregated", "LRO flushed",
111 "LRO avg aggr", "LRO no desc"
112};
113
114static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
115{
116 switch (stringset) {
117 case ETH_SS_STATS:
118 memcpy(data, *ipoib_stats_keys, sizeof(ipoib_stats_keys));
119 break;
120 }
121}
122
123static int ipoib_get_sset_count(struct net_device *dev, int sset)
124{
125 switch (sset) {
126 case ETH_SS_STATS:
127 return ARRAY_SIZE(ipoib_stats_keys);
128 default:
129 return -EOPNOTSUPP;
130 }
131}
132
133static void ipoib_get_ethtool_stats(struct net_device *dev,
134 struct ethtool_stats *stats, uint64_t *data)
135{
136 struct ipoib_dev_priv *priv = netdev_priv(dev);
137 int index = 0;
138
139 /* Get LRO statistics */
140 data[index++] = priv->lro.lro_mgr.stats.aggregated;
141 data[index++] = priv->lro.lro_mgr.stats.flushed;
142 if (priv->lro.lro_mgr.stats.flushed)
143 data[index++] = priv->lro.lro_mgr.stats.aggregated /
144 priv->lro.lro_mgr.stats.flushed;
145 else
146 data[index++] = 0;
147 data[index++] = priv->lro.lro_mgr.stats.no_desc;
148}
149
150static int ipoib_set_flags(struct net_device *dev, u32 flags)
151{
152 return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
153}
154
155static const struct ethtool_ops ipoib_ethtool_ops = { 109static const struct ethtool_ops ipoib_ethtool_ops = {
156 .get_drvinfo = ipoib_get_drvinfo, 110 .get_drvinfo = ipoib_get_drvinfo,
157 .get_rx_csum = ipoib_get_rx_csum, 111 .get_rx_csum = ipoib_get_rx_csum,
158 .set_tso = ipoib_set_tso, 112 .set_tso = ipoib_set_tso,
159 .get_coalesce = ipoib_get_coalesce, 113 .get_coalesce = ipoib_get_coalesce,
160 .set_coalesce = ipoib_set_coalesce, 114 .set_coalesce = ipoib_set_coalesce,
161 .get_flags = ethtool_op_get_flags,
162 .set_flags = ipoib_set_flags,
163 .get_strings = ipoib_get_strings,
164 .get_sset_count = ipoib_get_sset_count,
165 .get_ethtool_stats = ipoib_get_ethtool_stats,
166}; 115};
167 116
168void ipoib_set_ethtool_ops(struct net_device *dev) 117void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dfa71903d6e4..806d0292dc39 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -295,10 +295,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
295 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok)) 295 if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
296 skb->ip_summed = CHECKSUM_UNNECESSARY; 296 skb->ip_summed = CHECKSUM_UNNECESSARY;
297 297
298 if (dev->features & NETIF_F_LRO) 298 napi_gro_receive(&priv->napi, skb);
299 lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
300 else
301 netif_receive_skb(skb);
302 299
303repost: 300repost:
304 if (unlikely(ipoib_ib_post_receive(dev, wr_id))) 301 if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -450,9 +447,6 @@ poll_more:
450 } 447 }
451 448
452 if (done < budget) { 449 if (done < budget) {
453 if (dev->features & NETIF_F_LRO)
454 lro_flush_all(&priv->lro.lro_mgr);
455
456 napi_complete(napi); 450 napi_complete(napi);
457 if (unlikely(ib_req_notify_cq(priv->recv_cq, 451 if (unlikely(ib_req_notify_cq(priv->recv_cq,
458 IB_CQ_NEXT_COMP | 452 IB_CQ_NEXT_COMP |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9ff7bc73ed95..7a07a728fe0d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -60,15 +60,6 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
60module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); 60module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
61MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); 61MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
62 62
63static int lro;
64module_param(lro, bool, 0444);
65MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)");
66
67static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
68module_param(lro_max_aggr, int, 0644);
69MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
70 "(default = 64)");
71
72#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG 63#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
73int ipoib_debug_level; 64int ipoib_debug_level;
74 65
@@ -976,54 +967,6 @@ static const struct header_ops ipoib_header_ops = {
976 .create = ipoib_hard_header, 967 .create = ipoib_hard_header,
977}; 968};
978 969
979static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
980 void **tcph, u64 *hdr_flags, void *priv)
981{
982 unsigned int ip_len;
983 struct iphdr *iph;
984
985 if (unlikely(skb->protocol != htons(ETH_P_IP)))
986 return -1;
987
988 /*
989 * In the future we may add an else clause that verifies the
990 * checksum and allows devices which do not calculate checksum
991 * to use LRO.
992 */
993 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
994 return -1;
995
996 /* Check for non-TCP packet */
997 skb_reset_network_header(skb);
998 iph = ip_hdr(skb);
999 if (iph->protocol != IPPROTO_TCP)
1000 return -1;
1001
1002 ip_len = ip_hdrlen(skb);
1003 skb_set_transport_header(skb, ip_len);
1004 *tcph = tcp_hdr(skb);
1005
1006 /* check if IP header and TCP header are complete */
1007 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
1008 return -1;
1009
1010 *hdr_flags = LRO_IPV4 | LRO_TCP;
1011 *iphdr = iph;
1012
1013 return 0;
1014}
1015
1016static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
1017{
1018 priv->lro.lro_mgr.max_aggr = lro_max_aggr;
1019 priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS;
1020 priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc;
1021 priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
1022 priv->lro.lro_mgr.features = LRO_F_NAPI;
1023 priv->lro.lro_mgr.dev = priv->dev;
1024 priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1025}
1026
1027static const struct net_device_ops ipoib_netdev_ops = { 970static const struct net_device_ops ipoib_netdev_ops = {
1028 .ndo_open = ipoib_open, 971 .ndo_open = ipoib_open,
1029 .ndo_stop = ipoib_stop, 972 .ndo_stop = ipoib_stop,
@@ -1067,8 +1010,6 @@ static void ipoib_setup(struct net_device *dev)
1067 1010
1068 priv->dev = dev; 1011 priv->dev = dev;
1069 1012
1070 ipoib_lro_setup(priv);
1071
1072 spin_lock_init(&priv->lock); 1013 spin_lock_init(&priv->lock);
1073 1014
1074 mutex_init(&priv->vlan_mutex); 1015 mutex_init(&priv->vlan_mutex);
@@ -1218,8 +1159,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
1218 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; 1159 priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1219 } 1160 }
1220 1161
1221 if (lro) 1162 priv->dev->features |= NETIF_F_GRO;
1222 priv->dev->features |= NETIF_F_LRO;
1223 1163
1224 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) 1164 if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
1225 priv->dev->features |= NETIF_F_TSO; 1165 priv->dev->features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1e1e347a7715..4b62105ed1e8 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -441,18 +441,28 @@ static void srp_disconnect_target(struct srp_target_port *target)
441 wait_for_completion(&target->done); 441 wait_for_completion(&target->done);
442} 442}
443 443
444static bool srp_change_state(struct srp_target_port *target,
445 enum srp_target_state old,
446 enum srp_target_state new)
447{
448 bool changed = false;
449
450 spin_lock_irq(&target->lock);
451 if (target->state == old) {
452 target->state = new;
453 changed = true;
454 }
455 spin_unlock_irq(&target->lock);
456 return changed;
457}
458
444static void srp_remove_work(struct work_struct *work) 459static void srp_remove_work(struct work_struct *work)
445{ 460{
446 struct srp_target_port *target = 461 struct srp_target_port *target =
447 container_of(work, struct srp_target_port, work); 462 container_of(work, struct srp_target_port, work);
448 463
449 spin_lock_irq(target->scsi_host->host_lock); 464 if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
450 if (target->state != SRP_TARGET_DEAD) {
451 spin_unlock_irq(target->scsi_host->host_lock);
452 return; 465 return;
453 }
454 target->state = SRP_TARGET_REMOVED;
455 spin_unlock_irq(target->scsi_host->host_lock);
456 466
457 spin_lock(&target->srp_host->target_lock); 467 spin_lock(&target->srp_host->target_lock);
458 list_del(&target->list); 468 list_del(&target->list);
@@ -539,33 +549,34 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
539 scsi_sg_count(scmnd), scmnd->sc_data_direction); 549 scsi_sg_count(scmnd), scmnd->sc_data_direction);
540} 550}
541 551
542static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) 552static void srp_remove_req(struct srp_target_port *target,
553 struct srp_request *req, s32 req_lim_delta)
543{ 554{
555 unsigned long flags;
556
544 srp_unmap_data(req->scmnd, target, req); 557 srp_unmap_data(req->scmnd, target, req);
545 list_move_tail(&req->list, &target->free_reqs); 558 spin_lock_irqsave(&target->lock, flags);
559 target->req_lim += req_lim_delta;
560 req->scmnd = NULL;
561 list_add_tail(&req->list, &target->free_reqs);
562 spin_unlock_irqrestore(&target->lock, flags);
546} 563}
547 564
548static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) 565static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
549{ 566{
550 req->scmnd->result = DID_RESET << 16; 567 req->scmnd->result = DID_RESET << 16;
551 req->scmnd->scsi_done(req->scmnd); 568 req->scmnd->scsi_done(req->scmnd);
552 srp_remove_req(target, req); 569 srp_remove_req(target, req, 0);
553} 570}
554 571
555static int srp_reconnect_target(struct srp_target_port *target) 572static int srp_reconnect_target(struct srp_target_port *target)
556{ 573{
557 struct ib_qp_attr qp_attr; 574 struct ib_qp_attr qp_attr;
558 struct srp_request *req, *tmp;
559 struct ib_wc wc; 575 struct ib_wc wc;
560 int ret; 576 int i, ret;
561 577
562 spin_lock_irq(target->scsi_host->host_lock); 578 if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
563 if (target->state != SRP_TARGET_LIVE) {
564 spin_unlock_irq(target->scsi_host->host_lock);
565 return -EAGAIN; 579 return -EAGAIN;
566 }
567 target->state = SRP_TARGET_CONNECTING;
568 spin_unlock_irq(target->scsi_host->host_lock);
569 580
570 srp_disconnect_target(target); 581 srp_disconnect_target(target);
571 /* 582 /*
@@ -590,27 +601,23 @@ static int srp_reconnect_target(struct srp_target_port *target)
590 while (ib_poll_cq(target->send_cq, 1, &wc) > 0) 601 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
591 ; /* nothing */ 602 ; /* nothing */
592 603
593 spin_lock_irq(target->scsi_host->host_lock); 604 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
594 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 605 struct srp_request *req = &target->req_ring[i];
595 srp_reset_req(target, req); 606 if (req->scmnd)
596 spin_unlock_irq(target->scsi_host->host_lock); 607 srp_reset_req(target, req);
608 }
597 609
598 target->rx_head = 0; 610 INIT_LIST_HEAD(&target->free_tx);
599 target->tx_head = 0; 611 for (i = 0; i < SRP_SQ_SIZE; ++i)
600 target->tx_tail = 0; 612 list_add(&target->tx_ring[i]->list, &target->free_tx);
601 613
602 target->qp_in_error = 0; 614 target->qp_in_error = 0;
603 ret = srp_connect_target(target); 615 ret = srp_connect_target(target);
604 if (ret) 616 if (ret)
605 goto err; 617 goto err;
606 618
607 spin_lock_irq(target->scsi_host->host_lock); 619 if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
608 if (target->state == SRP_TARGET_CONNECTING) {
609 ret = 0;
610 target->state = SRP_TARGET_LIVE;
611 } else
612 ret = -EAGAIN; 620 ret = -EAGAIN;
613 spin_unlock_irq(target->scsi_host->host_lock);
614 621
615 return ret; 622 return ret;
616 623
@@ -620,17 +627,20 @@ err:
620 627
621 /* 628 /*
622 * We couldn't reconnect, so kill our target port off. 629 * We couldn't reconnect, so kill our target port off.
623 * However, we have to defer the real removal because we might 630 * However, we have to defer the real removal because we
624 * be in the context of the SCSI error handler now, which 631 * are in the context of the SCSI error handler now, which
625 * would deadlock if we call scsi_remove_host(). 632 * will deadlock if we call scsi_remove_host().
633 *
634 * Schedule our work inside the lock to avoid a race with
635 * the flush_scheduled_work() in srp_remove_one().
626 */ 636 */
627 spin_lock_irq(target->scsi_host->host_lock); 637 spin_lock_irq(&target->lock);
628 if (target->state == SRP_TARGET_CONNECTING) { 638 if (target->state == SRP_TARGET_CONNECTING) {
629 target->state = SRP_TARGET_DEAD; 639 target->state = SRP_TARGET_DEAD;
630 INIT_WORK(&target->work, srp_remove_work); 640 INIT_WORK(&target->work, srp_remove_work);
631 schedule_work(&target->work); 641 schedule_work(&target->work);
632 } 642 }
633 spin_unlock_irq(target->scsi_host->host_lock); 643 spin_unlock_irq(&target->lock);
634 644
635 return ret; 645 return ret;
636} 646}
@@ -758,7 +768,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
758 struct srp_direct_buf *buf = (void *) cmd->add_data; 768 struct srp_direct_buf *buf = (void *) cmd->add_data;
759 769
760 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 770 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
761 buf->key = cpu_to_be32(dev->mr->rkey); 771 buf->key = cpu_to_be32(target->rkey);
762 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 772 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
763 } else if (srp_map_fmr(target, scat, count, req, 773 } else if (srp_map_fmr(target, scat, count, req,
764 (void *) cmd->add_data)) { 774 (void *) cmd->add_data)) {
@@ -783,7 +793,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
783 buf->desc_list[i].va = 793 buf->desc_list[i].va =
784 cpu_to_be64(ib_sg_dma_address(ibdev, sg)); 794 cpu_to_be64(ib_sg_dma_address(ibdev, sg));
785 buf->desc_list[i].key = 795 buf->desc_list[i].key =
786 cpu_to_be32(dev->mr->rkey); 796 cpu_to_be32(target->rkey);
787 buf->desc_list[i].len = cpu_to_be32(dma_len); 797 buf->desc_list[i].len = cpu_to_be32(dma_len);
788 datalen += dma_len; 798 datalen += dma_len;
789 } 799 }
@@ -796,7 +806,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
796 buf->table_desc.va = 806 buf->table_desc.va =
797 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); 807 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
798 buf->table_desc.key = 808 buf->table_desc.key =
799 cpu_to_be32(target->srp_host->srp_dev->mr->rkey); 809 cpu_to_be32(target->rkey);
800 buf->table_desc.len = 810 buf->table_desc.len =
801 cpu_to_be32(count * sizeof (struct srp_direct_buf)); 811 cpu_to_be32(count * sizeof (struct srp_direct_buf));
802 812
@@ -812,9 +822,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
812} 822}
813 823
814/* 824/*
815 * Must be called with target->scsi_host->host_lock held to protect 825 * Return an IU and possible credit to the free pool
816 * req_lim and tx_head. Lock cannot be dropped between call here and 826 */
817 * call to __srp_post_send(). 827static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
828 enum srp_iu_type iu_type)
829{
830 unsigned long flags;
831
832 spin_lock_irqsave(&target->lock, flags);
833 list_add(&iu->list, &target->free_tx);
834 if (iu_type != SRP_IU_RSP)
835 ++target->req_lim;
836 spin_unlock_irqrestore(&target->lock, flags);
837}
838
839/*
840 * Must be called with target->lock held to protect req_lim and free_tx.
841 * If IU is not sent, it must be returned using srp_put_tx_iu().
818 * 842 *
819 * Note: 843 * Note:
820 * An upper limit for the number of allocated information units for each 844 * An upper limit for the number of allocated information units for each
@@ -833,83 +857,59 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
833 857
834 srp_send_completion(target->send_cq, target); 858 srp_send_completion(target->send_cq, target);
835 859
836 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 860 if (list_empty(&target->free_tx))
837 return NULL; 861 return NULL;
838 862
839 /* Initiator responses to target requests do not consume credits */ 863 /* Initiator responses to target requests do not consume credits */
840 if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { 864 if (iu_type != SRP_IU_RSP) {
841 ++target->zero_req_lim; 865 if (target->req_lim <= rsv) {
842 return NULL; 866 ++target->zero_req_lim;
867 return NULL;
868 }
869
870 --target->req_lim;
843 } 871 }
844 872
845 iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; 873 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
846 iu->type = iu_type; 874 list_del(&iu->list);
847 return iu; 875 return iu;
848} 876}
849 877
850/* 878static int srp_post_send(struct srp_target_port *target,
851 * Must be called with target->scsi_host->host_lock held to protect 879 struct srp_iu *iu, int len)
852 * req_lim and tx_head.
853 */
854static int __srp_post_send(struct srp_target_port *target,
855 struct srp_iu *iu, int len)
856{ 880{
857 struct ib_sge list; 881 struct ib_sge list;
858 struct ib_send_wr wr, *bad_wr; 882 struct ib_send_wr wr, *bad_wr;
859 int ret = 0;
860 883
861 list.addr = iu->dma; 884 list.addr = iu->dma;
862 list.length = len; 885 list.length = len;
863 list.lkey = target->srp_host->srp_dev->mr->lkey; 886 list.lkey = target->lkey;
864 887
865 wr.next = NULL; 888 wr.next = NULL;
866 wr.wr_id = target->tx_head & SRP_SQ_MASK; 889 wr.wr_id = (uintptr_t) iu;
867 wr.sg_list = &list; 890 wr.sg_list = &list;
868 wr.num_sge = 1; 891 wr.num_sge = 1;
869 wr.opcode = IB_WR_SEND; 892 wr.opcode = IB_WR_SEND;
870 wr.send_flags = IB_SEND_SIGNALED; 893 wr.send_flags = IB_SEND_SIGNALED;
871 894
872 ret = ib_post_send(target->qp, &wr, &bad_wr); 895 return ib_post_send(target->qp, &wr, &bad_wr);
873
874 if (!ret) {
875 ++target->tx_head;
876 if (iu->type != SRP_IU_RSP)
877 --target->req_lim;
878 }
879
880 return ret;
881} 896}
882 897
883static int srp_post_recv(struct srp_target_port *target) 898static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
884{ 899{
885 unsigned long flags;
886 struct srp_iu *iu;
887 struct ib_sge list;
888 struct ib_recv_wr wr, *bad_wr; 900 struct ib_recv_wr wr, *bad_wr;
889 unsigned int next; 901 struct ib_sge list;
890 int ret;
891
892 spin_lock_irqsave(target->scsi_host->host_lock, flags);
893
894 next = target->rx_head & SRP_RQ_MASK;
895 wr.wr_id = next;
896 iu = target->rx_ring[next];
897 902
898 list.addr = iu->dma; 903 list.addr = iu->dma;
899 list.length = iu->size; 904 list.length = iu->size;
900 list.lkey = target->srp_host->srp_dev->mr->lkey; 905 list.lkey = target->lkey;
901 906
902 wr.next = NULL; 907 wr.next = NULL;
908 wr.wr_id = (uintptr_t) iu;
903 wr.sg_list = &list; 909 wr.sg_list = &list;
904 wr.num_sge = 1; 910 wr.num_sge = 1;
905 911
906 ret = ib_post_recv(target->qp, &wr, &bad_wr); 912 return ib_post_recv(target->qp, &wr, &bad_wr);
907 if (!ret)
908 ++target->rx_head;
909
910 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
911
912 return ret;
913} 913}
914 914
915static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) 915static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -917,23 +917,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
917 struct srp_request *req; 917 struct srp_request *req;
918 struct scsi_cmnd *scmnd; 918 struct scsi_cmnd *scmnd;
919 unsigned long flags; 919 unsigned long flags;
920 s32 delta;
921
922 delta = (s32) be32_to_cpu(rsp->req_lim_delta);
923
924 spin_lock_irqsave(target->scsi_host->host_lock, flags);
925
926 target->req_lim += delta;
927
928 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
929 920
930 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { 921 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
931 if (be32_to_cpu(rsp->resp_data_len) < 4) 922 spin_lock_irqsave(&target->lock, flags);
932 req->tsk_status = -1; 923 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
933 else 924 spin_unlock_irqrestore(&target->lock, flags);
934 req->tsk_status = rsp->data[3]; 925
935 complete(&req->done); 926 target->tsk_mgmt_status = -1;
927 if (be32_to_cpu(rsp->resp_data_len) >= 4)
928 target->tsk_mgmt_status = rsp->data[3];
929 complete(&target->tsk_mgmt_done);
936 } else { 930 } else {
931 req = &target->req_ring[rsp->tag];
937 scmnd = req->scmnd; 932 scmnd = req->scmnd;
938 if (!scmnd) 933 if (!scmnd)
939 shost_printk(KERN_ERR, target->scsi_host, 934 shost_printk(KERN_ERR, target->scsi_host,
@@ -953,49 +948,42 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
953 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) 948 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
954 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); 949 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
955 950
956 if (!req->tsk_mgmt) { 951 srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
957 scmnd->host_scribble = (void *) -1L; 952 scmnd->host_scribble = NULL;
958 scmnd->scsi_done(scmnd); 953 scmnd->scsi_done(scmnd);
959
960 srp_remove_req(target, req);
961 } else
962 req->cmd_done = 1;
963 } 954 }
964
965 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
966} 955}
967 956
968static int srp_response_common(struct srp_target_port *target, s32 req_delta, 957static int srp_response_common(struct srp_target_port *target, s32 req_delta,
969 void *rsp, int len) 958 void *rsp, int len)
970{ 959{
971 struct ib_device *dev; 960 struct ib_device *dev = target->srp_host->srp_dev->dev;
972 unsigned long flags; 961 unsigned long flags;
973 struct srp_iu *iu; 962 struct srp_iu *iu;
974 int err = 1; 963 int err;
975 964
976 dev = target->srp_host->srp_dev->dev; 965 spin_lock_irqsave(&target->lock, flags);
977
978 spin_lock_irqsave(target->scsi_host->host_lock, flags);
979 target->req_lim += req_delta; 966 target->req_lim += req_delta;
980
981 iu = __srp_get_tx_iu(target, SRP_IU_RSP); 967 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
968 spin_unlock_irqrestore(&target->lock, flags);
969
982 if (!iu) { 970 if (!iu) {
983 shost_printk(KERN_ERR, target->scsi_host, PFX 971 shost_printk(KERN_ERR, target->scsi_host, PFX
984 "no IU available to send response\n"); 972 "no IU available to send response\n");
985 goto out; 973 return 1;
986 } 974 }
987 975
988 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); 976 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
989 memcpy(iu->buf, rsp, len); 977 memcpy(iu->buf, rsp, len);
990 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); 978 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
991 979
992 err = __srp_post_send(target, iu, len); 980 err = srp_post_send(target, iu, len);
993 if (err) 981 if (err) {
994 shost_printk(KERN_ERR, target->scsi_host, PFX 982 shost_printk(KERN_ERR, target->scsi_host, PFX
995 "unable to post response: %d\n", err); 983 "unable to post response: %d\n", err);
984 srp_put_tx_iu(target, iu, SRP_IU_RSP);
985 }
996 986
997out:
998 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
999 return err; 987 return err;
1000} 988}
1001 989
@@ -1032,14 +1020,11 @@ static void srp_process_aer_req(struct srp_target_port *target,
1032 1020
1033static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 1021static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1034{ 1022{
1035 struct ib_device *dev; 1023 struct ib_device *dev = target->srp_host->srp_dev->dev;
1036 struct srp_iu *iu; 1024 struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
1037 int res; 1025 int res;
1038 u8 opcode; 1026 u8 opcode;
1039 1027
1040 iu = target->rx_ring[wc->wr_id];
1041
1042 dev = target->srp_host->srp_dev->dev;
1043 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 1028 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1044 DMA_FROM_DEVICE); 1029 DMA_FROM_DEVICE);
1045 1030
@@ -1080,7 +1065,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1080 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 1065 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1081 DMA_FROM_DEVICE); 1066 DMA_FROM_DEVICE);
1082 1067
1083 res = srp_post_recv(target); 1068 res = srp_post_recv(target, iu);
1084 if (res != 0) 1069 if (res != 0)
1085 shost_printk(KERN_ERR, target->scsi_host, 1070 shost_printk(KERN_ERR, target->scsi_host,
1086 PFX "Recv failed with error code %d\n", res); 1071 PFX "Recv failed with error code %d\n", res);
@@ -1109,6 +1094,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1109{ 1094{
1110 struct srp_target_port *target = target_ptr; 1095 struct srp_target_port *target = target_ptr;
1111 struct ib_wc wc; 1096 struct ib_wc wc;
1097 struct srp_iu *iu;
1112 1098
1113 while (ib_poll_cq(cq, 1, &wc) > 0) { 1099 while (ib_poll_cq(cq, 1, &wc) > 0) {
1114 if (wc.status) { 1100 if (wc.status) {
@@ -1119,18 +1105,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1119 break; 1105 break;
1120 } 1106 }
1121 1107
1122 ++target->tx_tail; 1108 iu = (struct srp_iu *) wc.wr_id;
1109 list_add(&iu->list, &target->free_tx);
1123 } 1110 }
1124} 1111}
1125 1112
1126static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, 1113static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1127 void (*done)(struct scsi_cmnd *))
1128{ 1114{
1129 struct srp_target_port *target = host_to_target(scmnd->device->host); 1115 struct srp_target_port *target = host_to_target(shost);
1130 struct srp_request *req; 1116 struct srp_request *req;
1131 struct srp_iu *iu; 1117 struct srp_iu *iu;
1132 struct srp_cmd *cmd; 1118 struct srp_cmd *cmd;
1133 struct ib_device *dev; 1119 struct ib_device *dev;
1120 unsigned long flags;
1134 int len; 1121 int len;
1135 1122
1136 if (target->state == SRP_TARGET_CONNECTING) 1123 if (target->state == SRP_TARGET_CONNECTING)
@@ -1139,11 +1126,19 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1139 if (target->state == SRP_TARGET_DEAD || 1126 if (target->state == SRP_TARGET_DEAD ||
1140 target->state == SRP_TARGET_REMOVED) { 1127 target->state == SRP_TARGET_REMOVED) {
1141 scmnd->result = DID_BAD_TARGET << 16; 1128 scmnd->result = DID_BAD_TARGET << 16;
1142 done(scmnd); 1129 scmnd->scsi_done(scmnd);
1143 return 0; 1130 return 0;
1144 } 1131 }
1145 1132
1133 spin_lock_irqsave(&target->lock, flags);
1146 iu = __srp_get_tx_iu(target, SRP_IU_CMD); 1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1135 if (iu) {
1136 req = list_first_entry(&target->free_reqs, struct srp_request,
1137 list);
1138 list_del(&req->list);
1139 }
1140 spin_unlock_irqrestore(&target->lock, flags);
1141
1147 if (!iu) 1142 if (!iu)
1148 goto err; 1143 goto err;
1149 1144
@@ -1151,11 +1146,8 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1151 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1146 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1152 DMA_TO_DEVICE); 1147 DMA_TO_DEVICE);
1153 1148
1154 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1155
1156 scmnd->scsi_done = done;
1157 scmnd->result = 0; 1149 scmnd->result = 0;
1158 scmnd->host_scribble = (void *) (long) req->index; 1150 scmnd->host_scribble = (void *) req;
1159 1151
1160 cmd = iu->buf; 1152 cmd = iu->buf;
1161 memset(cmd, 0, sizeof *cmd); 1153 memset(cmd, 0, sizeof *cmd);
@@ -1167,37 +1159,38 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
1167 1159
1168 req->scmnd = scmnd; 1160 req->scmnd = scmnd;
1169 req->cmd = iu; 1161 req->cmd = iu;
1170 req->cmd_done = 0;
1171 req->tsk_mgmt = NULL;
1172 1162
1173 len = srp_map_data(scmnd, target, req); 1163 len = srp_map_data(scmnd, target, req);
1174 if (len < 0) { 1164 if (len < 0) {
1175 shost_printk(KERN_ERR, target->scsi_host, 1165 shost_printk(KERN_ERR, target->scsi_host,
1176 PFX "Failed to map data\n"); 1166 PFX "Failed to map data\n");
1177 goto err; 1167 goto err_iu;
1178 } 1168 }
1179 1169
1180 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1170 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1181 DMA_TO_DEVICE); 1171 DMA_TO_DEVICE);
1182 1172
1183 if (__srp_post_send(target, iu, len)) { 1173 if (srp_post_send(target, iu, len)) {
1184 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); 1174 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1185 goto err_unmap; 1175 goto err_unmap;
1186 } 1176 }
1187 1177
1188 list_move_tail(&req->list, &target->req_queue);
1189
1190 return 0; 1178 return 0;
1191 1179
1192err_unmap: 1180err_unmap:
1193 srp_unmap_data(scmnd, target, req); 1181 srp_unmap_data(scmnd, target, req);
1194 1182
1183err_iu:
1184 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1185
1186 spin_lock_irqsave(&target->lock, flags);
1187 list_add(&req->list, &target->free_reqs);
1188 spin_unlock_irqrestore(&target->lock, flags);
1189
1195err: 1190err:
1196 return SCSI_MLQUEUE_HOST_BUSY; 1191 return SCSI_MLQUEUE_HOST_BUSY;
1197} 1192}
1198 1193
1199static DEF_SCSI_QCMD(srp_queuecommand)
1200
1201static int srp_alloc_iu_bufs(struct srp_target_port *target) 1194static int srp_alloc_iu_bufs(struct srp_target_port *target)
1202{ 1195{
1203 int i; 1196 int i;
@@ -1216,6 +1209,8 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
1216 GFP_KERNEL, DMA_TO_DEVICE); 1209 GFP_KERNEL, DMA_TO_DEVICE);
1217 if (!target->tx_ring[i]) 1210 if (!target->tx_ring[i])
1218 goto err; 1211 goto err;
1212
1213 list_add(&target->tx_ring[i]->list, &target->free_tx);
1219 } 1214 }
1220 1215
1221 return 0; 1216 return 0;
@@ -1377,7 +1372,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1377 break; 1372 break;
1378 1373
1379 for (i = 0; i < SRP_RQ_SIZE; i++) { 1374 for (i = 0; i < SRP_RQ_SIZE; i++) {
1380 target->status = srp_post_recv(target); 1375 struct srp_iu *iu = target->rx_ring[i];
1376 target->status = srp_post_recv(target, iu);
1381 if (target->status) 1377 if (target->status)
1382 break; 1378 break;
1383 } 1379 }
@@ -1442,25 +1438,24 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1442} 1438}
1443 1439
1444static int srp_send_tsk_mgmt(struct srp_target_port *target, 1440static int srp_send_tsk_mgmt(struct srp_target_port *target,
1445 struct srp_request *req, u8 func) 1441 u64 req_tag, unsigned int lun, u8 func)
1446{ 1442{
1447 struct ib_device *dev = target->srp_host->srp_dev->dev; 1443 struct ib_device *dev = target->srp_host->srp_dev->dev;
1448 struct srp_iu *iu; 1444 struct srp_iu *iu;
1449 struct srp_tsk_mgmt *tsk_mgmt; 1445 struct srp_tsk_mgmt *tsk_mgmt;
1450 1446
1451 spin_lock_irq(target->scsi_host->host_lock);
1452
1453 if (target->state == SRP_TARGET_DEAD || 1447 if (target->state == SRP_TARGET_DEAD ||
1454 target->state == SRP_TARGET_REMOVED) { 1448 target->state == SRP_TARGET_REMOVED)
1455 req->scmnd->result = DID_BAD_TARGET << 16; 1449 return -1;
1456 goto out;
1457 }
1458 1450
1459 init_completion(&req->done); 1451 init_completion(&target->tsk_mgmt_done);
1460 1452
1453 spin_lock_irq(&target->lock);
1461 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); 1454 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1455 spin_unlock_irq(&target->lock);
1456
1462 if (!iu) 1457 if (!iu)
1463 goto out; 1458 return -1;
1464 1459
1465 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, 1460 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1466 DMA_TO_DEVICE); 1461 DMA_TO_DEVICE);
@@ -1468,70 +1463,46 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1468 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1463 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1469 1464
1470 tsk_mgmt->opcode = SRP_TSK_MGMT; 1465 tsk_mgmt->opcode = SRP_TSK_MGMT;
1471 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); 1466 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1472 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; 1467 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1473 tsk_mgmt->tsk_mgmt_func = func; 1468 tsk_mgmt->tsk_mgmt_func = func;
1474 tsk_mgmt->task_tag = req->index; 1469 tsk_mgmt->task_tag = req_tag;
1475 1470
1476 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, 1471 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1477 DMA_TO_DEVICE); 1472 DMA_TO_DEVICE);
1478 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1473 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1479 goto out; 1474 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1480
1481 req->tsk_mgmt = iu;
1482
1483 spin_unlock_irq(target->scsi_host->host_lock);
1484
1485 if (!wait_for_completion_timeout(&req->done,
1486 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1487 return -1; 1475 return -1;
1476 }
1488 1477
1489 return 0; 1478 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1490 1479 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1491out:
1492 spin_unlock_irq(target->scsi_host->host_lock);
1493 return -1;
1494}
1495
1496static int srp_find_req(struct srp_target_port *target,
1497 struct scsi_cmnd *scmnd,
1498 struct srp_request **req)
1499{
1500 if (scmnd->host_scribble == (void *) -1L)
1501 return -1; 1480 return -1;
1502 1481
1503 *req = &target->req_ring[(long) scmnd->host_scribble];
1504
1505 return 0; 1482 return 0;
1506} 1483}
1507 1484
1508static int srp_abort(struct scsi_cmnd *scmnd) 1485static int srp_abort(struct scsi_cmnd *scmnd)
1509{ 1486{
1510 struct srp_target_port *target = host_to_target(scmnd->device->host); 1487 struct srp_target_port *target = host_to_target(scmnd->device->host);
1511 struct srp_request *req; 1488 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1512 int ret = SUCCESS; 1489 int ret = SUCCESS;
1513 1490
1514 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 1491 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1515 1492
1516 if (target->qp_in_error) 1493 if (!req || target->qp_in_error)
1517 return FAILED; 1494 return FAILED;
1518 if (srp_find_req(target, scmnd, &req)) 1495 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1496 SRP_TSK_ABORT_TASK))
1519 return FAILED; 1497 return FAILED;
1520 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1521 return FAILED;
1522
1523 spin_lock_irq(target->scsi_host->host_lock);
1524 1498
1525 if (req->cmd_done) { 1499 if (req->scmnd) {
1526 srp_remove_req(target, req); 1500 if (!target->tsk_mgmt_status) {
1527 scmnd->scsi_done(scmnd); 1501 srp_remove_req(target, req, 0);
1528 } else if (!req->tsk_status) { 1502 scmnd->result = DID_ABORT << 16;
1529 srp_remove_req(target, req); 1503 } else
1530 scmnd->result = DID_ABORT << 16; 1504 ret = FAILED;
1531 } else 1505 }
1532 ret = FAILED;
1533
1534 spin_unlock_irq(target->scsi_host->host_lock);
1535 1506
1536 return ret; 1507 return ret;
1537} 1508}
@@ -1539,26 +1510,23 @@ static int srp_abort(struct scsi_cmnd *scmnd)
1539static int srp_reset_device(struct scsi_cmnd *scmnd) 1510static int srp_reset_device(struct scsi_cmnd *scmnd)
1540{ 1511{
1541 struct srp_target_port *target = host_to_target(scmnd->device->host); 1512 struct srp_target_port *target = host_to_target(scmnd->device->host);
1542 struct srp_request *req, *tmp; 1513 int i;
1543 1514
1544 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 1515 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1545 1516
1546 if (target->qp_in_error) 1517 if (target->qp_in_error)
1547 return FAILED; 1518 return FAILED;
1548 if (srp_find_req(target, scmnd, &req)) 1519 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1520 SRP_TSK_LUN_RESET))
1549 return FAILED; 1521 return FAILED;
1550 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) 1522 if (target->tsk_mgmt_status)
1551 return FAILED; 1523 return FAILED;
1552 if (req->tsk_status)
1553 return FAILED;
1554
1555 spin_lock_irq(target->scsi_host->host_lock);
1556 1524
1557 list_for_each_entry_safe(req, tmp, &target->req_queue, list) 1525 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1558 if (req->scmnd->device == scmnd->device) 1526 struct srp_request *req = &target->req_ring[i];
1527 if (req->scmnd && req->scmnd->device == scmnd->device)
1559 srp_reset_req(target, req); 1528 srp_reset_req(target, req);
1560 1529 }
1561 spin_unlock_irq(target->scsi_host->host_lock);
1562 1530
1563 return SUCCESS; 1531 return SUCCESS;
1564} 1532}
@@ -1987,9 +1955,12 @@ static ssize_t srp_create_target(struct device *dev,
1987 target->io_class = SRP_REV16A_IB_IO_CLASS; 1955 target->io_class = SRP_REV16A_IB_IO_CLASS;
1988 target->scsi_host = target_host; 1956 target->scsi_host = target_host;
1989 target->srp_host = host; 1957 target->srp_host = host;
1958 target->lkey = host->srp_dev->mr->lkey;
1959 target->rkey = host->srp_dev->mr->rkey;
1990 1960
1961 spin_lock_init(&target->lock);
1962 INIT_LIST_HEAD(&target->free_tx);
1991 INIT_LIST_HEAD(&target->free_reqs); 1963 INIT_LIST_HEAD(&target->free_reqs);
1992 INIT_LIST_HEAD(&target->req_queue);
1993 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { 1964 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1994 target->req_ring[i].index = i; 1965 target->req_ring[i].index = i;
1995 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1966 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
@@ -2217,9 +2188,9 @@ static void srp_remove_one(struct ib_device *device)
2217 */ 2188 */
2218 spin_lock(&host->target_lock); 2189 spin_lock(&host->target_lock);
2219 list_for_each_entry(target, &host->target_list, list) { 2190 list_for_each_entry(target, &host->target_list, list) {
2220 spin_lock_irq(target->scsi_host->host_lock); 2191 spin_lock_irq(&target->lock);
2221 target->state = SRP_TARGET_REMOVED; 2192 target->state = SRP_TARGET_REMOVED;
2222 spin_unlock_irq(target->scsi_host->host_lock); 2193 spin_unlock_irq(&target->lock);
2223 } 2194 }
2224 spin_unlock(&host->target_lock); 2195 spin_unlock(&host->target_lock);
2225 2196
@@ -2258,8 +2229,7 @@ static int __init srp_init_module(void)
2258{ 2229{
2259 int ret; 2230 int ret;
2260 2231
2261 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE); 2232 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2262 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
2263 2233
2264 if (srp_sg_tablesize > 255) { 2234 if (srp_sg_tablesize > 255) {
2265 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); 2235 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ed0dce9e479f..9dc6fc3fd894 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,16 +59,15 @@ enum {
59 59
60 SRP_RQ_SHIFT = 6, 60 SRP_RQ_SHIFT = 6,
61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, 61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
62 SRP_RQ_MASK = SRP_RQ_SIZE - 1,
63 62
64 SRP_SQ_SIZE = SRP_RQ_SIZE, 63 SRP_SQ_SIZE = SRP_RQ_SIZE,
65 SRP_SQ_MASK = SRP_SQ_SIZE - 1,
66 SRP_RSP_SQ_SIZE = 1, 64 SRP_RSP_SQ_SIZE = 1,
67 SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, 65 SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
68 SRP_TSK_MGMT_SQ_SIZE = 1, 66 SRP_TSK_MGMT_SQ_SIZE = 1,
69 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, 67 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
70 68
71 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), 69 SRP_TAG_NO_REQ = ~0U,
70 SRP_TAG_TSK_MGMT = 1U << 31,
72 71
73 SRP_FMR_SIZE = 256, 72 SRP_FMR_SIZE = 256,
74 SRP_FMR_POOL_SIZE = 1024, 73 SRP_FMR_POOL_SIZE = 1024,
@@ -113,15 +112,29 @@ struct srp_request {
113 struct list_head list; 112 struct list_head list;
114 struct scsi_cmnd *scmnd; 113 struct scsi_cmnd *scmnd;
115 struct srp_iu *cmd; 114 struct srp_iu *cmd;
116 struct srp_iu *tsk_mgmt;
117 struct ib_pool_fmr *fmr; 115 struct ib_pool_fmr *fmr;
118 struct completion done;
119 short index; 116 short index;
120 u8 cmd_done;
121 u8 tsk_status;
122}; 117};
123 118
124struct srp_target_port { 119struct srp_target_port {
120 /* These are RW in the hot path, and commonly used together */
121 struct list_head free_tx;
122 struct list_head free_reqs;
123 spinlock_t lock;
124 s32 req_lim;
125
126 /* These are read-only in the hot path */
127 struct ib_cq *send_cq ____cacheline_aligned_in_smp;
128 struct ib_cq *recv_cq;
129 struct ib_qp *qp;
130 u32 lkey;
131 u32 rkey;
132 enum srp_target_state state;
133
134 /* Everything above this point is used in the hot path of
135 * command processing. Try to keep them packed into cachelines.
136 */
137
125 __be64 id_ext; 138 __be64 id_ext;
126 __be64 ioc_guid; 139 __be64 ioc_guid;
127 __be64 service_id; 140 __be64 service_id;
@@ -138,24 +151,13 @@ struct srp_target_port {
138 int path_query_id; 151 int path_query_id;
139 152
140 struct ib_cm_id *cm_id; 153 struct ib_cm_id *cm_id;
141 struct ib_cq *recv_cq;
142 struct ib_cq *send_cq;
143 struct ib_qp *qp;
144 154
145 int max_ti_iu_len; 155 int max_ti_iu_len;
146 s32 req_lim;
147 156
148 int zero_req_lim; 157 int zero_req_lim;
149 158
150 unsigned rx_head;
151 struct srp_iu *rx_ring[SRP_RQ_SIZE];
152
153 unsigned tx_head;
154 unsigned tx_tail;
155 struct srp_iu *tx_ring[SRP_SQ_SIZE]; 159 struct srp_iu *tx_ring[SRP_SQ_SIZE];
156 160 struct srp_iu *rx_ring[SRP_RQ_SIZE];
157 struct list_head free_reqs;
158 struct list_head req_queue;
159 struct srp_request req_ring[SRP_CMD_SQ_SIZE]; 161 struct srp_request req_ring[SRP_CMD_SQ_SIZE];
160 162
161 struct work_struct work; 163 struct work_struct work;
@@ -163,16 +165,18 @@ struct srp_target_port {
163 struct list_head list; 165 struct list_head list;
164 struct completion done; 166 struct completion done;
165 int status; 167 int status;
166 enum srp_target_state state;
167 int qp_in_error; 168 int qp_in_error;
169
170 struct completion tsk_mgmt_done;
171 u8 tsk_mgmt_status;
168}; 172};
169 173
170struct srp_iu { 174struct srp_iu {
175 struct list_head list;
171 u64 dma; 176 u64 dma;
172 void *buf; 177 void *buf;
173 size_t size; 178 size_t size;
174 enum dma_data_direction direction; 179 enum dma_data_direction direction;
175 enum srp_iu_type type;
176}; 180};
177 181
178#endif /* IB_SRP_H */ 182#endif /* IB_SRP_H */
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f07c11..3a4277f6fac4 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
178 } else { 178 } else {
179 int i; 179 int i;
180 180
181 buf->direct.buf = NULL;
181 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 182 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
182 buf->npages = buf->nbufs; 183 buf->npages = buf->nbufs;
183 buf->page_shift = PAGE_SHIFT; 184 buf->page_shift = PAGE_SHIFT;
@@ -229,7 +230,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
229 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 230 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
230 buf->direct.map); 231 buf->direct.map);
231 else { 232 else {
232 if (BITS_PER_LONG == 64) 233 if (BITS_PER_LONG == 64 && buf->direct.buf)
233 vunmap(buf->direct.buf); 234 vunmap(buf->direct.buf);
234 235
235 for (i = 0; i < buf->nbufs; ++i) 236 for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18ba278a..5de1db897835 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
290 dev_cap->bf_reg_size = 1 << (field & 0x1f); 290 dev_cap->bf_reg_size = 1 << (field & 0x1f);
291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) { 292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
293 mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
294 field = 3; 293 field = 3;
295 }
296 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 294 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
297 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 295 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
298 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 296 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);