diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/cxio_hal.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 56 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_driver.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/mad.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mad.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes_nic.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/Kconfig | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 12 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | 51 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 62 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 392 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 46 |
18 files changed, 228 insertions, 460 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 09dda0b8740e..c3f5aca4ef00 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
@@ -189,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) | |||
189 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | 189 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); |
190 | } | 190 | } |
191 | 191 | ||
192 | #ifdef notyet | ||
192 | int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | 193 | int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) |
193 | { | 194 | { |
194 | struct rdma_cq_setup setup; | 195 | struct rdma_cq_setup setup; |
@@ -200,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | |||
200 | setup.ovfl_mode = 1; | 201 | setup.ovfl_mode = 1; |
201 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | 202 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); |
202 | } | 203 | } |
204 | #endif | ||
203 | 205 | ||
204 | static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) | 206 | static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) |
205 | { | 207 | { |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index a237d49bdcc9..c5406da3f4cd 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h | |||
@@ -335,8 +335,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); | |||
335 | int iwch_post_zb_read(struct iwch_qp *qhp); | 335 | int iwch_post_zb_read(struct iwch_qp *qhp); |
336 | int iwch_register_device(struct iwch_dev *dev); | 336 | int iwch_register_device(struct iwch_dev *dev); |
337 | void iwch_unregister_device(struct iwch_dev *dev); | 337 | void iwch_unregister_device(struct iwch_dev *dev); |
338 | int iwch_quiesce_qps(struct iwch_cq *chp); | ||
339 | int iwch_resume_qps(struct iwch_cq *chp); | ||
340 | void stop_read_rep_timer(struct iwch_qp *qhp); | 338 | void stop_read_rep_timer(struct iwch_qp *qhp); |
341 | int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, | 339 | int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, |
342 | struct iwch_mr *mhp, int shift); | 340 | struct iwch_mr *mhp, int shift); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 0993137181d7..1b4cd09f74dc 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -1149,59 +1149,3 @@ out: | |||
1149 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); | 1149 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); |
1150 | return ret; | 1150 | return ret; |
1151 | } | 1151 | } |
1152 | |||
1153 | static int quiesce_qp(struct iwch_qp *qhp) | ||
1154 | { | ||
1155 | spin_lock_irq(&qhp->lock); | ||
1156 | iwch_quiesce_tid(qhp->ep); | ||
1157 | qhp->flags |= QP_QUIESCED; | ||
1158 | spin_unlock_irq(&qhp->lock); | ||
1159 | return 0; | ||
1160 | } | ||
1161 | |||
1162 | static int resume_qp(struct iwch_qp *qhp) | ||
1163 | { | ||
1164 | spin_lock_irq(&qhp->lock); | ||
1165 | iwch_resume_tid(qhp->ep); | ||
1166 | qhp->flags &= ~QP_QUIESCED; | ||
1167 | spin_unlock_irq(&qhp->lock); | ||
1168 | return 0; | ||
1169 | } | ||
1170 | |||
1171 | int iwch_quiesce_qps(struct iwch_cq *chp) | ||
1172 | { | ||
1173 | int i; | ||
1174 | struct iwch_qp *qhp; | ||
1175 | |||
1176 | for (i=0; i < T3_MAX_NUM_QP; i++) { | ||
1177 | qhp = get_qhp(chp->rhp, i); | ||
1178 | if (!qhp) | ||
1179 | continue; | ||
1180 | if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) { | ||
1181 | quiesce_qp(qhp); | ||
1182 | continue; | ||
1183 | } | ||
1184 | if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp)) | ||
1185 | quiesce_qp(qhp); | ||
1186 | } | ||
1187 | return 0; | ||
1188 | } | ||
1189 | |||
1190 | int iwch_resume_qps(struct iwch_cq *chp) | ||
1191 | { | ||
1192 | int i; | ||
1193 | struct iwch_qp *qhp; | ||
1194 | |||
1195 | for (i=0; i < T3_MAX_NUM_QP; i++) { | ||
1196 | qhp = get_qhp(chp->rhp, i); | ||
1197 | if (!qhp) | ||
1198 | continue; | ||
1199 | if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) { | ||
1200 | resume_qp(qhp); | ||
1201 | continue; | ||
1202 | } | ||
1203 | if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp)) | ||
1204 | resume_qp(qhp); | ||
1205 | } | ||
1206 | return 0; | ||
1207 | } | ||
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 16032cdb4337..cc600c2dd0b3 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -760,7 +760,6 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); | |||
760 | int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); | 760 | int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); |
761 | int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); | 761 | int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); |
762 | u16 c4iw_rqes_posted(struct c4iw_qp *qhp); | 762 | u16 c4iw_rqes_posted(struct c4iw_qp *qhp); |
763 | int c4iw_post_zb_read(struct c4iw_qp *qhp); | ||
764 | int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); | 763 | int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); |
765 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); | 764 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); |
766 | void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, | 765 | void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 057cb2505ea1..20800900ef3f 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -892,36 +892,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, | |||
892 | } | 892 | } |
893 | } | 893 | } |
894 | 894 | ||
895 | int c4iw_post_zb_read(struct c4iw_qp *qhp) | ||
896 | { | ||
897 | union t4_wr *wqe; | ||
898 | struct sk_buff *skb; | ||
899 | u8 len16; | ||
900 | |||
901 | PDBG("%s enter\n", __func__); | ||
902 | skb = alloc_skb(40, GFP_KERNEL); | ||
903 | if (!skb) { | ||
904 | printk(KERN_ERR "%s cannot send zb_read!!\n", __func__); | ||
905 | return -ENOMEM; | ||
906 | } | ||
907 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | ||
908 | |||
909 | wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read); | ||
910 | memset(wqe, 0, sizeof wqe->read); | ||
911 | wqe->read.r2 = cpu_to_be64(0); | ||
912 | wqe->read.stag_sink = cpu_to_be32(1); | ||
913 | wqe->read.to_sink_hi = cpu_to_be32(0); | ||
914 | wqe->read.to_sink_lo = cpu_to_be32(1); | ||
915 | wqe->read.stag_src = cpu_to_be32(1); | ||
916 | wqe->read.plen = cpu_to_be32(0); | ||
917 | wqe->read.to_src_hi = cpu_to_be32(0); | ||
918 | wqe->read.to_src_lo = cpu_to_be32(1); | ||
919 | len16 = DIV_ROUND_UP(sizeof wqe->read, 16); | ||
920 | init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16); | ||
921 | |||
922 | return c4iw_ofld_send(&qhp->rhp->rdev, skb); | ||
923 | } | ||
924 | |||
925 | static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, | 895 | static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, |
926 | gfp_t gfp) | 896 | gfp_t gfp) |
927 | { | 897 | { |
@@ -1029,7 +999,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1029 | wqe->cookie = (unsigned long) &ep->com.wr_wait; | 999 | wqe->cookie = (unsigned long) &ep->com.wr_wait; |
1030 | 1000 | ||
1031 | wqe->u.fini.type = FW_RI_TYPE_FINI; | 1001 | wqe->u.fini.type = FW_RI_TYPE_FINI; |
1032 | c4iw_init_wr_wait(&ep->com.wr_wait); | ||
1033 | ret = c4iw_ofld_send(&rhp->rdev, skb); | 1002 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
1034 | if (ret) | 1003 | if (ret) |
1035 | goto out; | 1004 | goto out; |
@@ -1125,7 +1094,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
1125 | if (qhp->attr.mpa_attr.initiator) | 1094 | if (qhp->attr.mpa_attr.initiator) |
1126 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); | 1095 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); |
1127 | 1096 | ||
1128 | c4iw_init_wr_wait(&qhp->ep->com.wr_wait); | ||
1129 | ret = c4iw_ofld_send(&rhp->rdev, skb); | 1097 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
1130 | if (ret) | 1098 | if (ret) |
1131 | goto out; | 1099 | goto out; |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 765f0fc1da76..b33f0457a1ff 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -530,9 +530,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
530 | for (j = 0; j < 6; j++) { | 530 | for (j = 0; j < 6; j++) { |
531 | if (!pdev->resource[j].start) | 531 | if (!pdev->resource[j].start) |
532 | continue; | 532 | continue; |
533 | ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n", | 533 | ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n", |
534 | j, (unsigned long long)pdev->resource[j].start, | 534 | j, &pdev->resource[j], |
535 | (unsigned long long)pdev->resource[j].end, | ||
536 | (unsigned long long)pci_resource_len(pdev, j)); | 535 | (unsigned long long)pci_resource_len(pdev, j)); |
537 | } | 536 | } |
538 | 537 | ||
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5a219a2fdf16..e8df155bc3b0 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -397,10 +397,14 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
397 | cq->resize_buf = NULL; | 397 | cq->resize_buf = NULL; |
398 | cq->resize_umem = NULL; | 398 | cq->resize_umem = NULL; |
399 | } else { | 399 | } else { |
400 | struct mlx4_ib_cq_buf tmp_buf; | ||
401 | int tmp_cqe = 0; | ||
402 | |||
400 | spin_lock_irq(&cq->lock); | 403 | spin_lock_irq(&cq->lock); |
401 | if (cq->resize_buf) { | 404 | if (cq->resize_buf) { |
402 | mlx4_ib_cq_resize_copy_cqes(cq); | 405 | mlx4_ib_cq_resize_copy_cqes(cq); |
403 | mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); | 406 | tmp_buf = cq->buf; |
407 | tmp_cqe = cq->ibcq.cqe; | ||
404 | cq->buf = cq->resize_buf->buf; | 408 | cq->buf = cq->resize_buf->buf; |
405 | cq->ibcq.cqe = cq->resize_buf->cqe; | 409 | cq->ibcq.cqe = cq->resize_buf->cqe; |
406 | 410 | ||
@@ -408,6 +412,9 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
408 | cq->resize_buf = NULL; | 412 | cq->resize_buf = NULL; |
409 | } | 413 | } |
410 | spin_unlock_irq(&cq->lock); | 414 | spin_unlock_irq(&cq->lock); |
415 | |||
416 | if (tmp_cqe) | ||
417 | mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe); | ||
411 | } | 418 | } |
412 | 419 | ||
413 | goto out; | 420 | goto out; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c9a8dd63b9e2..57ffa50f509e 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma | |||
211 | if (agent) { | 211 | if (agent) { |
212 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, | 212 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, |
213 | IB_MGMT_MAD_DATA, GFP_ATOMIC); | 213 | IB_MGMT_MAD_DATA, GFP_ATOMIC); |
214 | if (IS_ERR(send_buf)) | ||
215 | return; | ||
214 | /* | 216 | /* |
215 | * We rely here on the fact that MLX QPs don't use the | 217 | * We rely here on the fact that MLX QPs don't use the |
216 | * address handle after the send is posted (this is | 218 | * address handle after the send is posted (this is |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 5648659ff0b0..03a59534f59e 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
@@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev, | |||
171 | if (agent) { | 171 | if (agent) { |
172 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, | 172 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, |
173 | IB_MGMT_MAD_DATA, GFP_ATOMIC); | 173 | IB_MGMT_MAD_DATA, GFP_ATOMIC); |
174 | if (IS_ERR(send_buf)) | ||
175 | return; | ||
174 | /* | 176 | /* |
175 | * We rely here on the fact that MLX QPs don't use the | 177 | * We rely here on the fact that MLX QPs don't use the |
176 | * address handle after the send is posted (this is | 178 | * address handle after the send is posted (this is |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 3892e2c0e95a..5a4c36484722 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -908,8 +908,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev) | |||
908 | nesvnic->nic_index && | 908 | nesvnic->nic_index && |
909 | mc_index < max_pft_entries_avaiable) { | 909 | mc_index < max_pft_entries_avaiable) { |
910 | nes_debug(NES_DBG_NIC_RX, | 910 | nes_debug(NES_DBG_NIC_RX, |
911 | "mc_index=%d skipping nic_index=%d,\ | 911 | "mc_index=%d skipping nic_index=%d, " |
912 | used for=%d \n", mc_index, | 912 | "used for=%d \n", mc_index, |
913 | nesvnic->nic_index, | 913 | nesvnic->nic_index, |
914 | nesadapter->pft_mcast_map[mc_index]); | 914 | nesadapter->pft_mcast_map[mc_index]); |
915 | mc_index++; | 915 | mc_index++; |
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig index 9d9a9dc51f18..55855eeabae7 100644 --- a/drivers/infiniband/ulp/ipoib/Kconfig +++ b/drivers/infiniband/ulp/ipoib/Kconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | config INFINIBAND_IPOIB | 1 | config INFINIBAND_IPOIB |
2 | tristate "IP-over-InfiniBand" | 2 | tristate "IP-over-InfiniBand" |
3 | depends on NETDEVICES && INET && (IPV6 || IPV6=n) | 3 | depends on NETDEVICES && INET && (IPV6 || IPV6=n) |
4 | select INET_LRO | ||
5 | ---help--- | 4 | ---help--- |
6 | Support for the IP-over-InfiniBand protocol (IPoIB). This | 5 | Support for the IP-over-InfiniBand protocol (IPoIB). This |
7 | transports IP packets over InfiniBand so you can use your IB | 6 | transports IP packets over InfiniBand so you can use your IB |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 753a983a5fdc..ab97f92fc257 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -50,7 +50,7 @@ | |||
50 | #include <rdma/ib_verbs.h> | 50 | #include <rdma/ib_verbs.h> |
51 | #include <rdma/ib_pack.h> | 51 | #include <rdma/ib_pack.h> |
52 | #include <rdma/ib_sa.h> | 52 | #include <rdma/ib_sa.h> |
53 | #include <linux/inet_lro.h> | 53 | #include <linux/sched.h> |
54 | 54 | ||
55 | /* constants */ | 55 | /* constants */ |
56 | 56 | ||
@@ -100,9 +100,6 @@ enum { | |||
100 | IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ | 100 | IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */ |
101 | IPOIB_MCAST_FLAG_ATTACHED = 3, | 101 | IPOIB_MCAST_FLAG_ATTACHED = 3, |
102 | 102 | ||
103 | IPOIB_MAX_LRO_DESCRIPTORS = 8, | ||
104 | IPOIB_LRO_MAX_AGGR = 64, | ||
105 | |||
106 | MAX_SEND_CQE = 16, | 103 | MAX_SEND_CQE = 16, |
107 | IPOIB_CM_COPYBREAK = 256, | 104 | IPOIB_CM_COPYBREAK = 256, |
108 | }; | 105 | }; |
@@ -262,11 +259,6 @@ struct ipoib_ethtool_st { | |||
262 | u16 max_coalesced_frames; | 259 | u16 max_coalesced_frames; |
263 | }; | 260 | }; |
264 | 261 | ||
265 | struct ipoib_lro { | ||
266 | struct net_lro_mgr lro_mgr; | ||
267 | struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS]; | ||
268 | }; | ||
269 | |||
270 | /* | 262 | /* |
271 | * Device private locking: network stack tx_lock protects members used | 263 | * Device private locking: network stack tx_lock protects members used |
272 | * in TX fast path, lock protects everything else. lock nests inside | 264 | * in TX fast path, lock protects everything else. lock nests inside |
@@ -352,8 +344,6 @@ struct ipoib_dev_priv { | |||
352 | int hca_caps; | 344 | int hca_caps; |
353 | struct ipoib_ethtool_st ethtool; | 345 | struct ipoib_ethtool_st ethtool; |
354 | struct timer_list poll_timer; | 346 | struct timer_list poll_timer; |
355 | |||
356 | struct ipoib_lro lro; | ||
357 | }; | 347 | }; |
358 | 348 | ||
359 | struct ipoib_ah { | 349 | struct ipoib_ah { |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index bb1004114dec..c1c49f2d35b5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -1480,6 +1480,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, | |||
1480 | 1480 | ||
1481 | if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) { | 1481 | if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) { |
1482 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 1482 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
1483 | priv->dev->features |= NETIF_F_GRO; | ||
1483 | if (priv->hca_caps & IB_DEVICE_UD_TSO) | 1484 | if (priv->hca_caps & IB_DEVICE_UD_TSO) |
1484 | dev->features |= NETIF_F_TSO; | 1485 | dev->features |= NETIF_F_TSO; |
1485 | } | 1486 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 1a1657c82edd..19f7f5206f78 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | |||
@@ -106,63 +106,12 @@ static int ipoib_set_coalesce(struct net_device *dev, | |||
106 | return 0; | 106 | return 0; |
107 | } | 107 | } |
108 | 108 | ||
109 | static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = { | ||
110 | "LRO aggregated", "LRO flushed", | ||
111 | "LRO avg aggr", "LRO no desc" | ||
112 | }; | ||
113 | |||
114 | static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | ||
115 | { | ||
116 | switch (stringset) { | ||
117 | case ETH_SS_STATS: | ||
118 | memcpy(data, *ipoib_stats_keys, sizeof(ipoib_stats_keys)); | ||
119 | break; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | static int ipoib_get_sset_count(struct net_device *dev, int sset) | ||
124 | { | ||
125 | switch (sset) { | ||
126 | case ETH_SS_STATS: | ||
127 | return ARRAY_SIZE(ipoib_stats_keys); | ||
128 | default: | ||
129 | return -EOPNOTSUPP; | ||
130 | } | ||
131 | } | ||
132 | |||
133 | static void ipoib_get_ethtool_stats(struct net_device *dev, | ||
134 | struct ethtool_stats *stats, uint64_t *data) | ||
135 | { | ||
136 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
137 | int index = 0; | ||
138 | |||
139 | /* Get LRO statistics */ | ||
140 | data[index++] = priv->lro.lro_mgr.stats.aggregated; | ||
141 | data[index++] = priv->lro.lro_mgr.stats.flushed; | ||
142 | if (priv->lro.lro_mgr.stats.flushed) | ||
143 | data[index++] = priv->lro.lro_mgr.stats.aggregated / | ||
144 | priv->lro.lro_mgr.stats.flushed; | ||
145 | else | ||
146 | data[index++] = 0; | ||
147 | data[index++] = priv->lro.lro_mgr.stats.no_desc; | ||
148 | } | ||
149 | |||
150 | static int ipoib_set_flags(struct net_device *dev, u32 flags) | ||
151 | { | ||
152 | return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO); | ||
153 | } | ||
154 | |||
155 | static const struct ethtool_ops ipoib_ethtool_ops = { | 109 | static const struct ethtool_ops ipoib_ethtool_ops = { |
156 | .get_drvinfo = ipoib_get_drvinfo, | 110 | .get_drvinfo = ipoib_get_drvinfo, |
157 | .get_rx_csum = ipoib_get_rx_csum, | 111 | .get_rx_csum = ipoib_get_rx_csum, |
158 | .set_tso = ipoib_set_tso, | 112 | .set_tso = ipoib_set_tso, |
159 | .get_coalesce = ipoib_get_coalesce, | 113 | .get_coalesce = ipoib_get_coalesce, |
160 | .set_coalesce = ipoib_set_coalesce, | 114 | .set_coalesce = ipoib_set_coalesce, |
161 | .get_flags = ethtool_op_get_flags, | ||
162 | .set_flags = ipoib_set_flags, | ||
163 | .get_strings = ipoib_get_strings, | ||
164 | .get_sset_count = ipoib_get_sset_count, | ||
165 | .get_ethtool_stats = ipoib_get_ethtool_stats, | ||
166 | }; | 115 | }; |
167 | 116 | ||
168 | void ipoib_set_ethtool_ops(struct net_device *dev) | 117 | void ipoib_set_ethtool_ops(struct net_device *dev) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index dfa71903d6e4..806d0292dc39 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -295,10 +295,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
295 | if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok)) | 295 | if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok)) |
296 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 296 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
297 | 297 | ||
298 | if (dev->features & NETIF_F_LRO) | 298 | napi_gro_receive(&priv->napi, skb); |
299 | lro_receive_skb(&priv->lro.lro_mgr, skb, NULL); | ||
300 | else | ||
301 | netif_receive_skb(skb); | ||
302 | 299 | ||
303 | repost: | 300 | repost: |
304 | if (unlikely(ipoib_ib_post_receive(dev, wr_id))) | 301 | if (unlikely(ipoib_ib_post_receive(dev, wr_id))) |
@@ -450,9 +447,6 @@ poll_more: | |||
450 | } | 447 | } |
451 | 448 | ||
452 | if (done < budget) { | 449 | if (done < budget) { |
453 | if (dev->features & NETIF_F_LRO) | ||
454 | lro_flush_all(&priv->lro.lro_mgr); | ||
455 | |||
456 | napi_complete(napi); | 450 | napi_complete(napi); |
457 | if (unlikely(ib_req_notify_cq(priv->recv_cq, | 451 | if (unlikely(ib_req_notify_cq(priv->recv_cq, |
458 | IB_CQ_NEXT_COMP | | 452 | IB_CQ_NEXT_COMP | |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 9ff7bc73ed95..7a07a728fe0d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -60,15 +60,6 @@ MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue"); | |||
60 | module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); | 60 | module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444); |
61 | MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); | 61 | MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue"); |
62 | 62 | ||
63 | static int lro; | ||
64 | module_param(lro, bool, 0444); | ||
65 | MODULE_PARM_DESC(lro, "Enable LRO (Large Receive Offload)"); | ||
66 | |||
67 | static int lro_max_aggr = IPOIB_LRO_MAX_AGGR; | ||
68 | module_param(lro_max_aggr, int, 0644); | ||
69 | MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated " | ||
70 | "(default = 64)"); | ||
71 | |||
72 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG | 63 | #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG |
73 | int ipoib_debug_level; | 64 | int ipoib_debug_level; |
74 | 65 | ||
@@ -976,54 +967,6 @@ static const struct header_ops ipoib_header_ops = { | |||
976 | .create = ipoib_hard_header, | 967 | .create = ipoib_hard_header, |
977 | }; | 968 | }; |
978 | 969 | ||
979 | static int get_skb_hdr(struct sk_buff *skb, void **iphdr, | ||
980 | void **tcph, u64 *hdr_flags, void *priv) | ||
981 | { | ||
982 | unsigned int ip_len; | ||
983 | struct iphdr *iph; | ||
984 | |||
985 | if (unlikely(skb->protocol != htons(ETH_P_IP))) | ||
986 | return -1; | ||
987 | |||
988 | /* | ||
989 | * In the future we may add an else clause that verifies the | ||
990 | * checksum and allows devices which do not calculate checksum | ||
991 | * to use LRO. | ||
992 | */ | ||
993 | if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY)) | ||
994 | return -1; | ||
995 | |||
996 | /* Check for non-TCP packet */ | ||
997 | skb_reset_network_header(skb); | ||
998 | iph = ip_hdr(skb); | ||
999 | if (iph->protocol != IPPROTO_TCP) | ||
1000 | return -1; | ||
1001 | |||
1002 | ip_len = ip_hdrlen(skb); | ||
1003 | skb_set_transport_header(skb, ip_len); | ||
1004 | *tcph = tcp_hdr(skb); | ||
1005 | |||
1006 | /* check if IP header and TCP header are complete */ | ||
1007 | if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) | ||
1008 | return -1; | ||
1009 | |||
1010 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
1011 | *iphdr = iph; | ||
1012 | |||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1016 | static void ipoib_lro_setup(struct ipoib_dev_priv *priv) | ||
1017 | { | ||
1018 | priv->lro.lro_mgr.max_aggr = lro_max_aggr; | ||
1019 | priv->lro.lro_mgr.max_desc = IPOIB_MAX_LRO_DESCRIPTORS; | ||
1020 | priv->lro.lro_mgr.lro_arr = priv->lro.lro_desc; | ||
1021 | priv->lro.lro_mgr.get_skb_header = get_skb_hdr; | ||
1022 | priv->lro.lro_mgr.features = LRO_F_NAPI; | ||
1023 | priv->lro.lro_mgr.dev = priv->dev; | ||
1024 | priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
1025 | } | ||
1026 | |||
1027 | static const struct net_device_ops ipoib_netdev_ops = { | 970 | static const struct net_device_ops ipoib_netdev_ops = { |
1028 | .ndo_open = ipoib_open, | 971 | .ndo_open = ipoib_open, |
1029 | .ndo_stop = ipoib_stop, | 972 | .ndo_stop = ipoib_stop, |
@@ -1067,8 +1010,6 @@ static void ipoib_setup(struct net_device *dev) | |||
1067 | 1010 | ||
1068 | priv->dev = dev; | 1011 | priv->dev = dev; |
1069 | 1012 | ||
1070 | ipoib_lro_setup(priv); | ||
1071 | |||
1072 | spin_lock_init(&priv->lock); | 1013 | spin_lock_init(&priv->lock); |
1073 | 1014 | ||
1074 | mutex_init(&priv->vlan_mutex); | 1015 | mutex_init(&priv->vlan_mutex); |
@@ -1218,8 +1159,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) | |||
1218 | priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; | 1159 | priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; |
1219 | } | 1160 | } |
1220 | 1161 | ||
1221 | if (lro) | 1162 | priv->dev->features |= NETIF_F_GRO; |
1222 | priv->dev->features |= NETIF_F_LRO; | ||
1223 | 1163 | ||
1224 | if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) | 1164 | if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO) |
1225 | priv->dev->features |= NETIF_F_TSO; | 1165 | priv->dev->features |= NETIF_F_TSO; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 1e1e347a7715..4b62105ed1e8 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -441,18 +441,28 @@ static void srp_disconnect_target(struct srp_target_port *target) | |||
441 | wait_for_completion(&target->done); | 441 | wait_for_completion(&target->done); |
442 | } | 442 | } |
443 | 443 | ||
444 | static bool srp_change_state(struct srp_target_port *target, | ||
445 | enum srp_target_state old, | ||
446 | enum srp_target_state new) | ||
447 | { | ||
448 | bool changed = false; | ||
449 | |||
450 | spin_lock_irq(&target->lock); | ||
451 | if (target->state == old) { | ||
452 | target->state = new; | ||
453 | changed = true; | ||
454 | } | ||
455 | spin_unlock_irq(&target->lock); | ||
456 | return changed; | ||
457 | } | ||
458 | |||
444 | static void srp_remove_work(struct work_struct *work) | 459 | static void srp_remove_work(struct work_struct *work) |
445 | { | 460 | { |
446 | struct srp_target_port *target = | 461 | struct srp_target_port *target = |
447 | container_of(work, struct srp_target_port, work); | 462 | container_of(work, struct srp_target_port, work); |
448 | 463 | ||
449 | spin_lock_irq(target->scsi_host->host_lock); | 464 | if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED)) |
450 | if (target->state != SRP_TARGET_DEAD) { | ||
451 | spin_unlock_irq(target->scsi_host->host_lock); | ||
452 | return; | 465 | return; |
453 | } | ||
454 | target->state = SRP_TARGET_REMOVED; | ||
455 | spin_unlock_irq(target->scsi_host->host_lock); | ||
456 | 466 | ||
457 | spin_lock(&target->srp_host->target_lock); | 467 | spin_lock(&target->srp_host->target_lock); |
458 | list_del(&target->list); | 468 | list_del(&target->list); |
@@ -539,33 +549,34 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, | |||
539 | scsi_sg_count(scmnd), scmnd->sc_data_direction); | 549 | scsi_sg_count(scmnd), scmnd->sc_data_direction); |
540 | } | 550 | } |
541 | 551 | ||
542 | static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) | 552 | static void srp_remove_req(struct srp_target_port *target, |
553 | struct srp_request *req, s32 req_lim_delta) | ||
543 | { | 554 | { |
555 | unsigned long flags; | ||
556 | |||
544 | srp_unmap_data(req->scmnd, target, req); | 557 | srp_unmap_data(req->scmnd, target, req); |
545 | list_move_tail(&req->list, &target->free_reqs); | 558 | spin_lock_irqsave(&target->lock, flags); |
559 | target->req_lim += req_lim_delta; | ||
560 | req->scmnd = NULL; | ||
561 | list_add_tail(&req->list, &target->free_reqs); | ||
562 | spin_unlock_irqrestore(&target->lock, flags); | ||
546 | } | 563 | } |
547 | 564 | ||
548 | static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) | 565 | static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) |
549 | { | 566 | { |
550 | req->scmnd->result = DID_RESET << 16; | 567 | req->scmnd->result = DID_RESET << 16; |
551 | req->scmnd->scsi_done(req->scmnd); | 568 | req->scmnd->scsi_done(req->scmnd); |
552 | srp_remove_req(target, req); | 569 | srp_remove_req(target, req, 0); |
553 | } | 570 | } |
554 | 571 | ||
555 | static int srp_reconnect_target(struct srp_target_port *target) | 572 | static int srp_reconnect_target(struct srp_target_port *target) |
556 | { | 573 | { |
557 | struct ib_qp_attr qp_attr; | 574 | struct ib_qp_attr qp_attr; |
558 | struct srp_request *req, *tmp; | ||
559 | struct ib_wc wc; | 575 | struct ib_wc wc; |
560 | int ret; | 576 | int i, ret; |
561 | 577 | ||
562 | spin_lock_irq(target->scsi_host->host_lock); | 578 | if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING)) |
563 | if (target->state != SRP_TARGET_LIVE) { | ||
564 | spin_unlock_irq(target->scsi_host->host_lock); | ||
565 | return -EAGAIN; | 579 | return -EAGAIN; |
566 | } | ||
567 | target->state = SRP_TARGET_CONNECTING; | ||
568 | spin_unlock_irq(target->scsi_host->host_lock); | ||
569 | 580 | ||
570 | srp_disconnect_target(target); | 581 | srp_disconnect_target(target); |
571 | /* | 582 | /* |
@@ -590,27 +601,23 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
590 | while (ib_poll_cq(target->send_cq, 1, &wc) > 0) | 601 | while (ib_poll_cq(target->send_cq, 1, &wc) > 0) |
591 | ; /* nothing */ | 602 | ; /* nothing */ |
592 | 603 | ||
593 | spin_lock_irq(target->scsi_host->host_lock); | 604 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
594 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) | 605 | struct srp_request *req = &target->req_ring[i]; |
595 | srp_reset_req(target, req); | 606 | if (req->scmnd) |
596 | spin_unlock_irq(target->scsi_host->host_lock); | 607 | srp_reset_req(target, req); |
608 | } | ||
597 | 609 | ||
598 | target->rx_head = 0; | 610 | INIT_LIST_HEAD(&target->free_tx); |
599 | target->tx_head = 0; | 611 | for (i = 0; i < SRP_SQ_SIZE; ++i) |
600 | target->tx_tail = 0; | 612 | list_add(&target->tx_ring[i]->list, &target->free_tx); |
601 | 613 | ||
602 | target->qp_in_error = 0; | 614 | target->qp_in_error = 0; |
603 | ret = srp_connect_target(target); | 615 | ret = srp_connect_target(target); |
604 | if (ret) | 616 | if (ret) |
605 | goto err; | 617 | goto err; |
606 | 618 | ||
607 | spin_lock_irq(target->scsi_host->host_lock); | 619 | if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE)) |
608 | if (target->state == SRP_TARGET_CONNECTING) { | ||
609 | ret = 0; | ||
610 | target->state = SRP_TARGET_LIVE; | ||
611 | } else | ||
612 | ret = -EAGAIN; | 620 | ret = -EAGAIN; |
613 | spin_unlock_irq(target->scsi_host->host_lock); | ||
614 | 621 | ||
615 | return ret; | 622 | return ret; |
616 | 623 | ||
@@ -620,17 +627,20 @@ err: | |||
620 | 627 | ||
621 | /* | 628 | /* |
622 | * We couldn't reconnect, so kill our target port off. | 629 | * We couldn't reconnect, so kill our target port off. |
623 | * However, we have to defer the real removal because we might | 630 | * However, we have to defer the real removal because we |
624 | * be in the context of the SCSI error handler now, which | 631 | * are in the context of the SCSI error handler now, which |
625 | * would deadlock if we call scsi_remove_host(). | 632 | * will deadlock if we call scsi_remove_host(). |
633 | * | ||
634 | * Schedule our work inside the lock to avoid a race with | ||
635 | * the flush_scheduled_work() in srp_remove_one(). | ||
626 | */ | 636 | */ |
627 | spin_lock_irq(target->scsi_host->host_lock); | 637 | spin_lock_irq(&target->lock); |
628 | if (target->state == SRP_TARGET_CONNECTING) { | 638 | if (target->state == SRP_TARGET_CONNECTING) { |
629 | target->state = SRP_TARGET_DEAD; | 639 | target->state = SRP_TARGET_DEAD; |
630 | INIT_WORK(&target->work, srp_remove_work); | 640 | INIT_WORK(&target->work, srp_remove_work); |
631 | schedule_work(&target->work); | 641 | schedule_work(&target->work); |
632 | } | 642 | } |
633 | spin_unlock_irq(target->scsi_host->host_lock); | 643 | spin_unlock_irq(&target->lock); |
634 | 644 | ||
635 | return ret; | 645 | return ret; |
636 | } | 646 | } |
@@ -758,7 +768,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
758 | struct srp_direct_buf *buf = (void *) cmd->add_data; | 768 | struct srp_direct_buf *buf = (void *) cmd->add_data; |
759 | 769 | ||
760 | buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); | 770 | buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); |
761 | buf->key = cpu_to_be32(dev->mr->rkey); | 771 | buf->key = cpu_to_be32(target->rkey); |
762 | buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); | 772 | buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); |
763 | } else if (srp_map_fmr(target, scat, count, req, | 773 | } else if (srp_map_fmr(target, scat, count, req, |
764 | (void *) cmd->add_data)) { | 774 | (void *) cmd->add_data)) { |
@@ -783,7 +793,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
783 | buf->desc_list[i].va = | 793 | buf->desc_list[i].va = |
784 | cpu_to_be64(ib_sg_dma_address(ibdev, sg)); | 794 | cpu_to_be64(ib_sg_dma_address(ibdev, sg)); |
785 | buf->desc_list[i].key = | 795 | buf->desc_list[i].key = |
786 | cpu_to_be32(dev->mr->rkey); | 796 | cpu_to_be32(target->rkey); |
787 | buf->desc_list[i].len = cpu_to_be32(dma_len); | 797 | buf->desc_list[i].len = cpu_to_be32(dma_len); |
788 | datalen += dma_len; | 798 | datalen += dma_len; |
789 | } | 799 | } |
@@ -796,7 +806,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
796 | buf->table_desc.va = | 806 | buf->table_desc.va = |
797 | cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); | 807 | cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf); |
798 | buf->table_desc.key = | 808 | buf->table_desc.key = |
799 | cpu_to_be32(target->srp_host->srp_dev->mr->rkey); | 809 | cpu_to_be32(target->rkey); |
800 | buf->table_desc.len = | 810 | buf->table_desc.len = |
801 | cpu_to_be32(count * sizeof (struct srp_direct_buf)); | 811 | cpu_to_be32(count * sizeof (struct srp_direct_buf)); |
802 | 812 | ||
@@ -812,9 +822,23 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
812 | } | 822 | } |
813 | 823 | ||
814 | /* | 824 | /* |
815 | * Must be called with target->scsi_host->host_lock held to protect | 825 | * Return an IU and possible credit to the free pool |
816 | * req_lim and tx_head. Lock cannot be dropped between call here and | 826 | */ |
817 | * call to __srp_post_send(). | 827 | static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, |
828 | enum srp_iu_type iu_type) | ||
829 | { | ||
830 | unsigned long flags; | ||
831 | |||
832 | spin_lock_irqsave(&target->lock, flags); | ||
833 | list_add(&iu->list, &target->free_tx); | ||
834 | if (iu_type != SRP_IU_RSP) | ||
835 | ++target->req_lim; | ||
836 | spin_unlock_irqrestore(&target->lock, flags); | ||
837 | } | ||
838 | |||
839 | /* | ||
840 | * Must be called with target->lock held to protect req_lim and free_tx. | ||
841 | * If IU is not sent, it must be returned using srp_put_tx_iu(). | ||
818 | * | 842 | * |
819 | * Note: | 843 | * Note: |
820 | * An upper limit for the number of allocated information units for each | 844 | * An upper limit for the number of allocated information units for each |
@@ -833,83 +857,59 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, | |||
833 | 857 | ||
834 | srp_send_completion(target->send_cq, target); | 858 | srp_send_completion(target->send_cq, target); |
835 | 859 | ||
836 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) | 860 | if (list_empty(&target->free_tx)) |
837 | return NULL; | 861 | return NULL; |
838 | 862 | ||
839 | /* Initiator responses to target requests do not consume credits */ | 863 | /* Initiator responses to target requests do not consume credits */ |
840 | if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { | 864 | if (iu_type != SRP_IU_RSP) { |
841 | ++target->zero_req_lim; | 865 | if (target->req_lim <= rsv) { |
842 | return NULL; | 866 | ++target->zero_req_lim; |
867 | return NULL; | ||
868 | } | ||
869 | |||
870 | --target->req_lim; | ||
843 | } | 871 | } |
844 | 872 | ||
845 | iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; | 873 | iu = list_first_entry(&target->free_tx, struct srp_iu, list); |
846 | iu->type = iu_type; | 874 | list_del(&iu->list); |
847 | return iu; | 875 | return iu; |
848 | } | 876 | } |
849 | 877 | ||
850 | /* | 878 | static int srp_post_send(struct srp_target_port *target, |
851 | * Must be called with target->scsi_host->host_lock held to protect | 879 | struct srp_iu *iu, int len) |
852 | * req_lim and tx_head. | ||
853 | */ | ||
854 | static int __srp_post_send(struct srp_target_port *target, | ||
855 | struct srp_iu *iu, int len) | ||
856 | { | 880 | { |
857 | struct ib_sge list; | 881 | struct ib_sge list; |
858 | struct ib_send_wr wr, *bad_wr; | 882 | struct ib_send_wr wr, *bad_wr; |
859 | int ret = 0; | ||
860 | 883 | ||
861 | list.addr = iu->dma; | 884 | list.addr = iu->dma; |
862 | list.length = len; | 885 | list.length = len; |
863 | list.lkey = target->srp_host->srp_dev->mr->lkey; | 886 | list.lkey = target->lkey; |
864 | 887 | ||
865 | wr.next = NULL; | 888 | wr.next = NULL; |
866 | wr.wr_id = target->tx_head & SRP_SQ_MASK; | 889 | wr.wr_id = (uintptr_t) iu; |
867 | wr.sg_list = &list; | 890 | wr.sg_list = &list; |
868 | wr.num_sge = 1; | 891 | wr.num_sge = 1; |
869 | wr.opcode = IB_WR_SEND; | 892 | wr.opcode = IB_WR_SEND; |
870 | wr.send_flags = IB_SEND_SIGNALED; | 893 | wr.send_flags = IB_SEND_SIGNALED; |
871 | 894 | ||
872 | ret = ib_post_send(target->qp, &wr, &bad_wr); | 895 | return ib_post_send(target->qp, &wr, &bad_wr); |
873 | |||
874 | if (!ret) { | ||
875 | ++target->tx_head; | ||
876 | if (iu->type != SRP_IU_RSP) | ||
877 | --target->req_lim; | ||
878 | } | ||
879 | |||
880 | return ret; | ||
881 | } | 896 | } |
882 | 897 | ||
883 | static int srp_post_recv(struct srp_target_port *target) | 898 | static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) |
884 | { | 899 | { |
885 | unsigned long flags; | ||
886 | struct srp_iu *iu; | ||
887 | struct ib_sge list; | ||
888 | struct ib_recv_wr wr, *bad_wr; | 900 | struct ib_recv_wr wr, *bad_wr; |
889 | unsigned int next; | 901 | struct ib_sge list; |
890 | int ret; | ||
891 | |||
892 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
893 | |||
894 | next = target->rx_head & SRP_RQ_MASK; | ||
895 | wr.wr_id = next; | ||
896 | iu = target->rx_ring[next]; | ||
897 | 902 | ||
898 | list.addr = iu->dma; | 903 | list.addr = iu->dma; |
899 | list.length = iu->size; | 904 | list.length = iu->size; |
900 | list.lkey = target->srp_host->srp_dev->mr->lkey; | 905 | list.lkey = target->lkey; |
901 | 906 | ||
902 | wr.next = NULL; | 907 | wr.next = NULL; |
908 | wr.wr_id = (uintptr_t) iu; | ||
903 | wr.sg_list = &list; | 909 | wr.sg_list = &list; |
904 | wr.num_sge = 1; | 910 | wr.num_sge = 1; |
905 | 911 | ||
906 | ret = ib_post_recv(target->qp, &wr, &bad_wr); | 912 | return ib_post_recv(target->qp, &wr, &bad_wr); |
907 | if (!ret) | ||
908 | ++target->rx_head; | ||
909 | |||
910 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
911 | |||
912 | return ret; | ||
913 | } | 913 | } |
914 | 914 | ||
915 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | 915 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) |
@@ -917,23 +917,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
917 | struct srp_request *req; | 917 | struct srp_request *req; |
918 | struct scsi_cmnd *scmnd; | 918 | struct scsi_cmnd *scmnd; |
919 | unsigned long flags; | 919 | unsigned long flags; |
920 | s32 delta; | ||
921 | |||
922 | delta = (s32) be32_to_cpu(rsp->req_lim_delta); | ||
923 | |||
924 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
925 | |||
926 | target->req_lim += delta; | ||
927 | |||
928 | req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT]; | ||
929 | 920 | ||
930 | if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { | 921 | if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { |
931 | if (be32_to_cpu(rsp->resp_data_len) < 4) | 922 | spin_lock_irqsave(&target->lock, flags); |
932 | req->tsk_status = -1; | 923 | target->req_lim += be32_to_cpu(rsp->req_lim_delta); |
933 | else | 924 | spin_unlock_irqrestore(&target->lock, flags); |
934 | req->tsk_status = rsp->data[3]; | 925 | |
935 | complete(&req->done); | 926 | target->tsk_mgmt_status = -1; |
927 | if (be32_to_cpu(rsp->resp_data_len) >= 4) | ||
928 | target->tsk_mgmt_status = rsp->data[3]; | ||
929 | complete(&target->tsk_mgmt_done); | ||
936 | } else { | 930 | } else { |
931 | req = &target->req_ring[rsp->tag]; | ||
937 | scmnd = req->scmnd; | 932 | scmnd = req->scmnd; |
938 | if (!scmnd) | 933 | if (!scmnd) |
939 | shost_printk(KERN_ERR, target->scsi_host, | 934 | shost_printk(KERN_ERR, target->scsi_host, |
@@ -953,49 +948,42 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
953 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) | 948 | else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) |
954 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); | 949 | scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); |
955 | 950 | ||
956 | if (!req->tsk_mgmt) { | 951 | srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); |
957 | scmnd->host_scribble = (void *) -1L; | 952 | scmnd->host_scribble = NULL; |
958 | scmnd->scsi_done(scmnd); | 953 | scmnd->scsi_done(scmnd); |
959 | |||
960 | srp_remove_req(target, req); | ||
961 | } else | ||
962 | req->cmd_done = 1; | ||
963 | } | 954 | } |
964 | |||
965 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
966 | } | 955 | } |
967 | 956 | ||
968 | static int srp_response_common(struct srp_target_port *target, s32 req_delta, | 957 | static int srp_response_common(struct srp_target_port *target, s32 req_delta, |
969 | void *rsp, int len) | 958 | void *rsp, int len) |
970 | { | 959 | { |
971 | struct ib_device *dev; | 960 | struct ib_device *dev = target->srp_host->srp_dev->dev; |
972 | unsigned long flags; | 961 | unsigned long flags; |
973 | struct srp_iu *iu; | 962 | struct srp_iu *iu; |
974 | int err = 1; | 963 | int err; |
975 | 964 | ||
976 | dev = target->srp_host->srp_dev->dev; | 965 | spin_lock_irqsave(&target->lock, flags); |
977 | |||
978 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
979 | target->req_lim += req_delta; | 966 | target->req_lim += req_delta; |
980 | |||
981 | iu = __srp_get_tx_iu(target, SRP_IU_RSP); | 967 | iu = __srp_get_tx_iu(target, SRP_IU_RSP); |
968 | spin_unlock_irqrestore(&target->lock, flags); | ||
969 | |||
982 | if (!iu) { | 970 | if (!iu) { |
983 | shost_printk(KERN_ERR, target->scsi_host, PFX | 971 | shost_printk(KERN_ERR, target->scsi_host, PFX |
984 | "no IU available to send response\n"); | 972 | "no IU available to send response\n"); |
985 | goto out; | 973 | return 1; |
986 | } | 974 | } |
987 | 975 | ||
988 | ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); | 976 | ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); |
989 | memcpy(iu->buf, rsp, len); | 977 | memcpy(iu->buf, rsp, len); |
990 | ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); | 978 | ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); |
991 | 979 | ||
992 | err = __srp_post_send(target, iu, len); | 980 | err = srp_post_send(target, iu, len); |
993 | if (err) | 981 | if (err) { |
994 | shost_printk(KERN_ERR, target->scsi_host, PFX | 982 | shost_printk(KERN_ERR, target->scsi_host, PFX |
995 | "unable to post response: %d\n", err); | 983 | "unable to post response: %d\n", err); |
984 | srp_put_tx_iu(target, iu, SRP_IU_RSP); | ||
985 | } | ||
996 | 986 | ||
997 | out: | ||
998 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
999 | return err; | 987 | return err; |
1000 | } | 988 | } |
1001 | 989 | ||
@@ -1032,14 +1020,11 @@ static void srp_process_aer_req(struct srp_target_port *target, | |||
1032 | 1020 | ||
1033 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | 1021 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) |
1034 | { | 1022 | { |
1035 | struct ib_device *dev; | 1023 | struct ib_device *dev = target->srp_host->srp_dev->dev; |
1036 | struct srp_iu *iu; | 1024 | struct srp_iu *iu = (struct srp_iu *) wc->wr_id; |
1037 | int res; | 1025 | int res; |
1038 | u8 opcode; | 1026 | u8 opcode; |
1039 | 1027 | ||
1040 | iu = target->rx_ring[wc->wr_id]; | ||
1041 | |||
1042 | dev = target->srp_host->srp_dev->dev; | ||
1043 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, | 1028 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, |
1044 | DMA_FROM_DEVICE); | 1029 | DMA_FROM_DEVICE); |
1045 | 1030 | ||
@@ -1080,7 +1065,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
1080 | ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, | 1065 | ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, |
1081 | DMA_FROM_DEVICE); | 1066 | DMA_FROM_DEVICE); |
1082 | 1067 | ||
1083 | res = srp_post_recv(target); | 1068 | res = srp_post_recv(target, iu); |
1084 | if (res != 0) | 1069 | if (res != 0) |
1085 | shost_printk(KERN_ERR, target->scsi_host, | 1070 | shost_printk(KERN_ERR, target->scsi_host, |
1086 | PFX "Recv failed with error code %d\n", res); | 1071 | PFX "Recv failed with error code %d\n", res); |
@@ -1109,6 +1094,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1109 | { | 1094 | { |
1110 | struct srp_target_port *target = target_ptr; | 1095 | struct srp_target_port *target = target_ptr; |
1111 | struct ib_wc wc; | 1096 | struct ib_wc wc; |
1097 | struct srp_iu *iu; | ||
1112 | 1098 | ||
1113 | while (ib_poll_cq(cq, 1, &wc) > 0) { | 1099 | while (ib_poll_cq(cq, 1, &wc) > 0) { |
1114 | if (wc.status) { | 1100 | if (wc.status) { |
@@ -1119,18 +1105,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1119 | break; | 1105 | break; |
1120 | } | 1106 | } |
1121 | 1107 | ||
1122 | ++target->tx_tail; | 1108 | iu = (struct srp_iu *) wc.wr_id; |
1109 | list_add(&iu->list, &target->free_tx); | ||
1123 | } | 1110 | } |
1124 | } | 1111 | } |
1125 | 1112 | ||
1126 | static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, | 1113 | static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) |
1127 | void (*done)(struct scsi_cmnd *)) | ||
1128 | { | 1114 | { |
1129 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 1115 | struct srp_target_port *target = host_to_target(shost); |
1130 | struct srp_request *req; | 1116 | struct srp_request *req; |
1131 | struct srp_iu *iu; | 1117 | struct srp_iu *iu; |
1132 | struct srp_cmd *cmd; | 1118 | struct srp_cmd *cmd; |
1133 | struct ib_device *dev; | 1119 | struct ib_device *dev; |
1120 | unsigned long flags; | ||
1134 | int len; | 1121 | int len; |
1135 | 1122 | ||
1136 | if (target->state == SRP_TARGET_CONNECTING) | 1123 | if (target->state == SRP_TARGET_CONNECTING) |
@@ -1139,11 +1126,19 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, | |||
1139 | if (target->state == SRP_TARGET_DEAD || | 1126 | if (target->state == SRP_TARGET_DEAD || |
1140 | target->state == SRP_TARGET_REMOVED) { | 1127 | target->state == SRP_TARGET_REMOVED) { |
1141 | scmnd->result = DID_BAD_TARGET << 16; | 1128 | scmnd->result = DID_BAD_TARGET << 16; |
1142 | done(scmnd); | 1129 | scmnd->scsi_done(scmnd); |
1143 | return 0; | 1130 | return 0; |
1144 | } | 1131 | } |
1145 | 1132 | ||
1133 | spin_lock_irqsave(&target->lock, flags); | ||
1146 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); | 1134 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); |
1135 | if (iu) { | ||
1136 | req = list_first_entry(&target->free_reqs, struct srp_request, | ||
1137 | list); | ||
1138 | list_del(&req->list); | ||
1139 | } | ||
1140 | spin_unlock_irqrestore(&target->lock, flags); | ||
1141 | |||
1147 | if (!iu) | 1142 | if (!iu) |
1148 | goto err; | 1143 | goto err; |
1149 | 1144 | ||
@@ -1151,11 +1146,8 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, | |||
1151 | ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, | 1146 | ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, |
1152 | DMA_TO_DEVICE); | 1147 | DMA_TO_DEVICE); |
1153 | 1148 | ||
1154 | req = list_first_entry(&target->free_reqs, struct srp_request, list); | ||
1155 | |||
1156 | scmnd->scsi_done = done; | ||
1157 | scmnd->result = 0; | 1149 | scmnd->result = 0; |
1158 | scmnd->host_scribble = (void *) (long) req->index; | 1150 | scmnd->host_scribble = (void *) req; |
1159 | 1151 | ||
1160 | cmd = iu->buf; | 1152 | cmd = iu->buf; |
1161 | memset(cmd, 0, sizeof *cmd); | 1153 | memset(cmd, 0, sizeof *cmd); |
@@ -1167,37 +1159,38 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, | |||
1167 | 1159 | ||
1168 | req->scmnd = scmnd; | 1160 | req->scmnd = scmnd; |
1169 | req->cmd = iu; | 1161 | req->cmd = iu; |
1170 | req->cmd_done = 0; | ||
1171 | req->tsk_mgmt = NULL; | ||
1172 | 1162 | ||
1173 | len = srp_map_data(scmnd, target, req); | 1163 | len = srp_map_data(scmnd, target, req); |
1174 | if (len < 0) { | 1164 | if (len < 0) { |
1175 | shost_printk(KERN_ERR, target->scsi_host, | 1165 | shost_printk(KERN_ERR, target->scsi_host, |
1176 | PFX "Failed to map data\n"); | 1166 | PFX "Failed to map data\n"); |
1177 | goto err; | 1167 | goto err_iu; |
1178 | } | 1168 | } |
1179 | 1169 | ||
1180 | ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, | 1170 | ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, |
1181 | DMA_TO_DEVICE); | 1171 | DMA_TO_DEVICE); |
1182 | 1172 | ||
1183 | if (__srp_post_send(target, iu, len)) { | 1173 | if (srp_post_send(target, iu, len)) { |
1184 | shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); | 1174 | shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); |
1185 | goto err_unmap; | 1175 | goto err_unmap; |
1186 | } | 1176 | } |
1187 | 1177 | ||
1188 | list_move_tail(&req->list, &target->req_queue); | ||
1189 | |||
1190 | return 0; | 1178 | return 0; |
1191 | 1179 | ||
1192 | err_unmap: | 1180 | err_unmap: |
1193 | srp_unmap_data(scmnd, target, req); | 1181 | srp_unmap_data(scmnd, target, req); |
1194 | 1182 | ||
1183 | err_iu: | ||
1184 | srp_put_tx_iu(target, iu, SRP_IU_CMD); | ||
1185 | |||
1186 | spin_lock_irqsave(&target->lock, flags); | ||
1187 | list_add(&req->list, &target->free_reqs); | ||
1188 | spin_unlock_irqrestore(&target->lock, flags); | ||
1189 | |||
1195 | err: | 1190 | err: |
1196 | return SCSI_MLQUEUE_HOST_BUSY; | 1191 | return SCSI_MLQUEUE_HOST_BUSY; |
1197 | } | 1192 | } |
1198 | 1193 | ||
1199 | static DEF_SCSI_QCMD(srp_queuecommand) | ||
1200 | |||
1201 | static int srp_alloc_iu_bufs(struct srp_target_port *target) | 1194 | static int srp_alloc_iu_bufs(struct srp_target_port *target) |
1202 | { | 1195 | { |
1203 | int i; | 1196 | int i; |
@@ -1216,6 +1209,8 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) | |||
1216 | GFP_KERNEL, DMA_TO_DEVICE); | 1209 | GFP_KERNEL, DMA_TO_DEVICE); |
1217 | if (!target->tx_ring[i]) | 1210 | if (!target->tx_ring[i]) |
1218 | goto err; | 1211 | goto err; |
1212 | |||
1213 | list_add(&target->tx_ring[i]->list, &target->free_tx); | ||
1219 | } | 1214 | } |
1220 | 1215 | ||
1221 | return 0; | 1216 | return 0; |
@@ -1377,7 +1372,8 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1377 | break; | 1372 | break; |
1378 | 1373 | ||
1379 | for (i = 0; i < SRP_RQ_SIZE; i++) { | 1374 | for (i = 0; i < SRP_RQ_SIZE; i++) { |
1380 | target->status = srp_post_recv(target); | 1375 | struct srp_iu *iu = target->rx_ring[i]; |
1376 | target->status = srp_post_recv(target, iu); | ||
1381 | if (target->status) | 1377 | if (target->status) |
1382 | break; | 1378 | break; |
1383 | } | 1379 | } |
@@ -1442,25 +1438,24 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1442 | } | 1438 | } |
1443 | 1439 | ||
1444 | static int srp_send_tsk_mgmt(struct srp_target_port *target, | 1440 | static int srp_send_tsk_mgmt(struct srp_target_port *target, |
1445 | struct srp_request *req, u8 func) | 1441 | u64 req_tag, unsigned int lun, u8 func) |
1446 | { | 1442 | { |
1447 | struct ib_device *dev = target->srp_host->srp_dev->dev; | 1443 | struct ib_device *dev = target->srp_host->srp_dev->dev; |
1448 | struct srp_iu *iu; | 1444 | struct srp_iu *iu; |
1449 | struct srp_tsk_mgmt *tsk_mgmt; | 1445 | struct srp_tsk_mgmt *tsk_mgmt; |
1450 | 1446 | ||
1451 | spin_lock_irq(target->scsi_host->host_lock); | ||
1452 | |||
1453 | if (target->state == SRP_TARGET_DEAD || | 1447 | if (target->state == SRP_TARGET_DEAD || |
1454 | target->state == SRP_TARGET_REMOVED) { | 1448 | target->state == SRP_TARGET_REMOVED) |
1455 | req->scmnd->result = DID_BAD_TARGET << 16; | 1449 | return -1; |
1456 | goto out; | ||
1457 | } | ||
1458 | 1450 | ||
1459 | init_completion(&req->done); | 1451 | init_completion(&target->tsk_mgmt_done); |
1460 | 1452 | ||
1453 | spin_lock_irq(&target->lock); | ||
1461 | iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); | 1454 | iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); |
1455 | spin_unlock_irq(&target->lock); | ||
1456 | |||
1462 | if (!iu) | 1457 | if (!iu) |
1463 | goto out; | 1458 | return -1; |
1464 | 1459 | ||
1465 | ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, | 1460 | ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, |
1466 | DMA_TO_DEVICE); | 1461 | DMA_TO_DEVICE); |
@@ -1468,70 +1463,46 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, | |||
1468 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); | 1463 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); |
1469 | 1464 | ||
1470 | tsk_mgmt->opcode = SRP_TSK_MGMT; | 1465 | tsk_mgmt->opcode = SRP_TSK_MGMT; |
1471 | tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); | 1466 | tsk_mgmt->lun = cpu_to_be64((u64) lun << 48); |
1472 | tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; | 1467 | tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT; |
1473 | tsk_mgmt->tsk_mgmt_func = func; | 1468 | tsk_mgmt->tsk_mgmt_func = func; |
1474 | tsk_mgmt->task_tag = req->index; | 1469 | tsk_mgmt->task_tag = req_tag; |
1475 | 1470 | ||
1476 | ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, | 1471 | ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, |
1477 | DMA_TO_DEVICE); | 1472 | DMA_TO_DEVICE); |
1478 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) | 1473 | if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { |
1479 | goto out; | 1474 | srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); |
1480 | |||
1481 | req->tsk_mgmt = iu; | ||
1482 | |||
1483 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1484 | |||
1485 | if (!wait_for_completion_timeout(&req->done, | ||
1486 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) | ||
1487 | return -1; | 1475 | return -1; |
1476 | } | ||
1488 | 1477 | ||
1489 | return 0; | 1478 | if (!wait_for_completion_timeout(&target->tsk_mgmt_done, |
1490 | 1479 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) | |
1491 | out: | ||
1492 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1493 | return -1; | ||
1494 | } | ||
1495 | |||
1496 | static int srp_find_req(struct srp_target_port *target, | ||
1497 | struct scsi_cmnd *scmnd, | ||
1498 | struct srp_request **req) | ||
1499 | { | ||
1500 | if (scmnd->host_scribble == (void *) -1L) | ||
1501 | return -1; | 1480 | return -1; |
1502 | 1481 | ||
1503 | *req = &target->req_ring[(long) scmnd->host_scribble]; | ||
1504 | |||
1505 | return 0; | 1482 | return 0; |
1506 | } | 1483 | } |
1507 | 1484 | ||
1508 | static int srp_abort(struct scsi_cmnd *scmnd) | 1485 | static int srp_abort(struct scsi_cmnd *scmnd) |
1509 | { | 1486 | { |
1510 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 1487 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
1511 | struct srp_request *req; | 1488 | struct srp_request *req = (struct srp_request *) scmnd->host_scribble; |
1512 | int ret = SUCCESS; | 1489 | int ret = SUCCESS; |
1513 | 1490 | ||
1514 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); | 1491 | shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); |
1515 | 1492 | ||
1516 | if (target->qp_in_error) | 1493 | if (!req || target->qp_in_error) |
1517 | return FAILED; | 1494 | return FAILED; |
1518 | if (srp_find_req(target, scmnd, &req)) | 1495 | if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, |
1496 | SRP_TSK_ABORT_TASK)) | ||
1519 | return FAILED; | 1497 | return FAILED; |
1520 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) | ||
1521 | return FAILED; | ||
1522 | |||
1523 | spin_lock_irq(target->scsi_host->host_lock); | ||
1524 | 1498 | ||
1525 | if (req->cmd_done) { | 1499 | if (req->scmnd) { |
1526 | srp_remove_req(target, req); | 1500 | if (!target->tsk_mgmt_status) { |
1527 | scmnd->scsi_done(scmnd); | 1501 | srp_remove_req(target, req, 0); |
1528 | } else if (!req->tsk_status) { | 1502 | scmnd->result = DID_ABORT << 16; |
1529 | srp_remove_req(target, req); | 1503 | } else |
1530 | scmnd->result = DID_ABORT << 16; | 1504 | ret = FAILED; |
1531 | } else | 1505 | } |
1532 | ret = FAILED; | ||
1533 | |||
1534 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1535 | 1506 | ||
1536 | return ret; | 1507 | return ret; |
1537 | } | 1508 | } |
@@ -1539,26 +1510,23 @@ static int srp_abort(struct scsi_cmnd *scmnd) | |||
1539 | static int srp_reset_device(struct scsi_cmnd *scmnd) | 1510 | static int srp_reset_device(struct scsi_cmnd *scmnd) |
1540 | { | 1511 | { |
1541 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 1512 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
1542 | struct srp_request *req, *tmp; | 1513 | int i; |
1543 | 1514 | ||
1544 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); | 1515 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); |
1545 | 1516 | ||
1546 | if (target->qp_in_error) | 1517 | if (target->qp_in_error) |
1547 | return FAILED; | 1518 | return FAILED; |
1548 | if (srp_find_req(target, scmnd, &req)) | 1519 | if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun, |
1520 | SRP_TSK_LUN_RESET)) | ||
1549 | return FAILED; | 1521 | return FAILED; |
1550 | if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) | 1522 | if (target->tsk_mgmt_status) |
1551 | return FAILED; | 1523 | return FAILED; |
1552 | if (req->tsk_status) | ||
1553 | return FAILED; | ||
1554 | |||
1555 | spin_lock_irq(target->scsi_host->host_lock); | ||
1556 | 1524 | ||
1557 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) | 1525 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
1558 | if (req->scmnd->device == scmnd->device) | 1526 | struct srp_request *req = &target->req_ring[i]; |
1527 | if (req->scmnd && req->scmnd->device == scmnd->device) | ||
1559 | srp_reset_req(target, req); | 1528 | srp_reset_req(target, req); |
1560 | 1529 | } | |
1561 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1562 | 1530 | ||
1563 | return SUCCESS; | 1531 | return SUCCESS; |
1564 | } | 1532 | } |
@@ -1987,9 +1955,12 @@ static ssize_t srp_create_target(struct device *dev, | |||
1987 | target->io_class = SRP_REV16A_IB_IO_CLASS; | 1955 | target->io_class = SRP_REV16A_IB_IO_CLASS; |
1988 | target->scsi_host = target_host; | 1956 | target->scsi_host = target_host; |
1989 | target->srp_host = host; | 1957 | target->srp_host = host; |
1958 | target->lkey = host->srp_dev->mr->lkey; | ||
1959 | target->rkey = host->srp_dev->mr->rkey; | ||
1990 | 1960 | ||
1961 | spin_lock_init(&target->lock); | ||
1962 | INIT_LIST_HEAD(&target->free_tx); | ||
1991 | INIT_LIST_HEAD(&target->free_reqs); | 1963 | INIT_LIST_HEAD(&target->free_reqs); |
1992 | INIT_LIST_HEAD(&target->req_queue); | ||
1993 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { | 1964 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
1994 | target->req_ring[i].index = i; | 1965 | target->req_ring[i].index = i; |
1995 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | 1966 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); |
@@ -2217,9 +2188,9 @@ static void srp_remove_one(struct ib_device *device) | |||
2217 | */ | 2188 | */ |
2218 | spin_lock(&host->target_lock); | 2189 | spin_lock(&host->target_lock); |
2219 | list_for_each_entry(target, &host->target_list, list) { | 2190 | list_for_each_entry(target, &host->target_list, list) { |
2220 | spin_lock_irq(target->scsi_host->host_lock); | 2191 | spin_lock_irq(&target->lock); |
2221 | target->state = SRP_TARGET_REMOVED; | 2192 | target->state = SRP_TARGET_REMOVED; |
2222 | spin_unlock_irq(target->scsi_host->host_lock); | 2193 | spin_unlock_irq(&target->lock); |
2223 | } | 2194 | } |
2224 | spin_unlock(&host->target_lock); | 2195 | spin_unlock(&host->target_lock); |
2225 | 2196 | ||
@@ -2258,8 +2229,7 @@ static int __init srp_init_module(void) | |||
2258 | { | 2229 | { |
2259 | int ret; | 2230 | int ret; |
2260 | 2231 | ||
2261 | BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE); | 2232 | BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); |
2262 | BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE); | ||
2263 | 2233 | ||
2264 | if (srp_sg_tablesize > 255) { | 2234 | if (srp_sg_tablesize > 255) { |
2265 | printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); | 2235 | printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index ed0dce9e479f..9dc6fc3fd894 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -59,16 +59,15 @@ enum { | |||
59 | 59 | ||
60 | SRP_RQ_SHIFT = 6, | 60 | SRP_RQ_SHIFT = 6, |
61 | SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, | 61 | SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, |
62 | SRP_RQ_MASK = SRP_RQ_SIZE - 1, | ||
63 | 62 | ||
64 | SRP_SQ_SIZE = SRP_RQ_SIZE, | 63 | SRP_SQ_SIZE = SRP_RQ_SIZE, |
65 | SRP_SQ_MASK = SRP_SQ_SIZE - 1, | ||
66 | SRP_RSP_SQ_SIZE = 1, | 64 | SRP_RSP_SQ_SIZE = 1, |
67 | SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, | 65 | SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, |
68 | SRP_TSK_MGMT_SQ_SIZE = 1, | 66 | SRP_TSK_MGMT_SQ_SIZE = 1, |
69 | SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, | 67 | SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, |
70 | 68 | ||
71 | SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), | 69 | SRP_TAG_NO_REQ = ~0U, |
70 | SRP_TAG_TSK_MGMT = 1U << 31, | ||
72 | 71 | ||
73 | SRP_FMR_SIZE = 256, | 72 | SRP_FMR_SIZE = 256, |
74 | SRP_FMR_POOL_SIZE = 1024, | 73 | SRP_FMR_POOL_SIZE = 1024, |
@@ -113,15 +112,29 @@ struct srp_request { | |||
113 | struct list_head list; | 112 | struct list_head list; |
114 | struct scsi_cmnd *scmnd; | 113 | struct scsi_cmnd *scmnd; |
115 | struct srp_iu *cmd; | 114 | struct srp_iu *cmd; |
116 | struct srp_iu *tsk_mgmt; | ||
117 | struct ib_pool_fmr *fmr; | 115 | struct ib_pool_fmr *fmr; |
118 | struct completion done; | ||
119 | short index; | 116 | short index; |
120 | u8 cmd_done; | ||
121 | u8 tsk_status; | ||
122 | }; | 117 | }; |
123 | 118 | ||
124 | struct srp_target_port { | 119 | struct srp_target_port { |
120 | /* These are RW in the hot path, and commonly used together */ | ||
121 | struct list_head free_tx; | ||
122 | struct list_head free_reqs; | ||
123 | spinlock_t lock; | ||
124 | s32 req_lim; | ||
125 | |||
126 | /* These are read-only in the hot path */ | ||
127 | struct ib_cq *send_cq ____cacheline_aligned_in_smp; | ||
128 | struct ib_cq *recv_cq; | ||
129 | struct ib_qp *qp; | ||
130 | u32 lkey; | ||
131 | u32 rkey; | ||
132 | enum srp_target_state state; | ||
133 | |||
134 | /* Everything above this point is used in the hot path of | ||
135 | * command processing. Try to keep them packed into cachelines. | ||
136 | */ | ||
137 | |||
125 | __be64 id_ext; | 138 | __be64 id_ext; |
126 | __be64 ioc_guid; | 139 | __be64 ioc_guid; |
127 | __be64 service_id; | 140 | __be64 service_id; |
@@ -138,24 +151,13 @@ struct srp_target_port { | |||
138 | int path_query_id; | 151 | int path_query_id; |
139 | 152 | ||
140 | struct ib_cm_id *cm_id; | 153 | struct ib_cm_id *cm_id; |
141 | struct ib_cq *recv_cq; | ||
142 | struct ib_cq *send_cq; | ||
143 | struct ib_qp *qp; | ||
144 | 154 | ||
145 | int max_ti_iu_len; | 155 | int max_ti_iu_len; |
146 | s32 req_lim; | ||
147 | 156 | ||
148 | int zero_req_lim; | 157 | int zero_req_lim; |
149 | 158 | ||
150 | unsigned rx_head; | ||
151 | struct srp_iu *rx_ring[SRP_RQ_SIZE]; | ||
152 | |||
153 | unsigned tx_head; | ||
154 | unsigned tx_tail; | ||
155 | struct srp_iu *tx_ring[SRP_SQ_SIZE]; | 159 | struct srp_iu *tx_ring[SRP_SQ_SIZE]; |
156 | 160 | struct srp_iu *rx_ring[SRP_RQ_SIZE]; | |
157 | struct list_head free_reqs; | ||
158 | struct list_head req_queue; | ||
159 | struct srp_request req_ring[SRP_CMD_SQ_SIZE]; | 161 | struct srp_request req_ring[SRP_CMD_SQ_SIZE]; |
160 | 162 | ||
161 | struct work_struct work; | 163 | struct work_struct work; |
@@ -163,16 +165,18 @@ struct srp_target_port { | |||
163 | struct list_head list; | 165 | struct list_head list; |
164 | struct completion done; | 166 | struct completion done; |
165 | int status; | 167 | int status; |
166 | enum srp_target_state state; | ||
167 | int qp_in_error; | 168 | int qp_in_error; |
169 | |||
170 | struct completion tsk_mgmt_done; | ||
171 | u8 tsk_mgmt_status; | ||
168 | }; | 172 | }; |
169 | 173 | ||
170 | struct srp_iu { | 174 | struct srp_iu { |
175 | struct list_head list; | ||
171 | u64 dma; | 176 | u64 dma; |
172 | void *buf; | 177 | void *buf; |
173 | size_t size; | 178 | size_t size; |
174 | enum dma_data_direction direction; | 179 | enum dma_data_direction direction; |
175 | enum srp_iu_type type; | ||
176 | }; | 180 | }; |
177 | 181 | ||
178 | #endif /* IB_SRP_H */ | 182 | #endif /* IB_SRP_H */ |