diff options
Diffstat (limited to 'drivers/infiniband/hw')
27 files changed, 676 insertions, 255 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 09dda0b8740..c3f5aca4ef0 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
| @@ -189,6 +189,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) | |||
| 189 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | 189 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | #ifdef notyet | ||
| 192 | int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | 193 | int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) |
| 193 | { | 194 | { |
| 194 | struct rdma_cq_setup setup; | 195 | struct rdma_cq_setup setup; |
| @@ -200,6 +201,7 @@ int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) | |||
| 200 | setup.ovfl_mode = 1; | 201 | setup.ovfl_mode = 1; |
| 201 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); | 202 | return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); |
| 202 | } | 203 | } |
| 204 | #endif | ||
| 203 | 205 | ||
| 204 | static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) | 206 | static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx) |
| 205 | { | 207 | { |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index a237d49bdcc..c5406da3f4c 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h | |||
| @@ -335,8 +335,6 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); | |||
| 335 | int iwch_post_zb_read(struct iwch_qp *qhp); | 335 | int iwch_post_zb_read(struct iwch_qp *qhp); |
| 336 | int iwch_register_device(struct iwch_dev *dev); | 336 | int iwch_register_device(struct iwch_dev *dev); |
| 337 | void iwch_unregister_device(struct iwch_dev *dev); | 337 | void iwch_unregister_device(struct iwch_dev *dev); |
| 338 | int iwch_quiesce_qps(struct iwch_cq *chp); | ||
| 339 | int iwch_resume_qps(struct iwch_cq *chp); | ||
| 340 | void stop_read_rep_timer(struct iwch_qp *qhp); | 338 | void stop_read_rep_timer(struct iwch_qp *qhp); |
| 341 | int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, | 339 | int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php, |
| 342 | struct iwch_mr *mhp, int shift); | 340 | struct iwch_mr *mhp, int shift); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 0993137181d..1b4cd09f74d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
| @@ -1149,59 +1149,3 @@ out: | |||
| 1149 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); | 1149 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); |
| 1150 | return ret; | 1150 | return ret; |
| 1151 | } | 1151 | } |
| 1152 | |||
| 1153 | static int quiesce_qp(struct iwch_qp *qhp) | ||
| 1154 | { | ||
| 1155 | spin_lock_irq(&qhp->lock); | ||
| 1156 | iwch_quiesce_tid(qhp->ep); | ||
| 1157 | qhp->flags |= QP_QUIESCED; | ||
| 1158 | spin_unlock_irq(&qhp->lock); | ||
| 1159 | return 0; | ||
| 1160 | } | ||
| 1161 | |||
| 1162 | static int resume_qp(struct iwch_qp *qhp) | ||
| 1163 | { | ||
| 1164 | spin_lock_irq(&qhp->lock); | ||
| 1165 | iwch_resume_tid(qhp->ep); | ||
| 1166 | qhp->flags &= ~QP_QUIESCED; | ||
| 1167 | spin_unlock_irq(&qhp->lock); | ||
| 1168 | return 0; | ||
| 1169 | } | ||
| 1170 | |||
| 1171 | int iwch_quiesce_qps(struct iwch_cq *chp) | ||
| 1172 | { | ||
| 1173 | int i; | ||
| 1174 | struct iwch_qp *qhp; | ||
| 1175 | |||
| 1176 | for (i=0; i < T3_MAX_NUM_QP; i++) { | ||
| 1177 | qhp = get_qhp(chp->rhp, i); | ||
| 1178 | if (!qhp) | ||
| 1179 | continue; | ||
| 1180 | if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) { | ||
| 1181 | quiesce_qp(qhp); | ||
| 1182 | continue; | ||
| 1183 | } | ||
| 1184 | if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp)) | ||
| 1185 | quiesce_qp(qhp); | ||
| 1186 | } | ||
| 1187 | return 0; | ||
| 1188 | } | ||
| 1189 | |||
| 1190 | int iwch_resume_qps(struct iwch_cq *chp) | ||
| 1191 | { | ||
| 1192 | int i; | ||
| 1193 | struct iwch_qp *qhp; | ||
| 1194 | |||
| 1195 | for (i=0; i < T3_MAX_NUM_QP; i++) { | ||
| 1196 | qhp = get_qhp(chp->rhp, i); | ||
| 1197 | if (!qhp) | ||
| 1198 | continue; | ||
| 1199 | if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) { | ||
| 1200 | resume_qp(qhp); | ||
| 1201 | continue; | ||
| 1202 | } | ||
| 1203 | if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp)) | ||
| 1204 | resume_qp(qhp); | ||
| 1205 | } | ||
| 1206 | return 0; | ||
| 1207 | } | ||
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 16032cdb433..cc600c2dd0b 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
| @@ -760,7 +760,6 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); | |||
| 760 | int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); | 760 | int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); |
| 761 | int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); | 761 | int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); |
| 762 | u16 c4iw_rqes_posted(struct c4iw_qp *qhp); | 762 | u16 c4iw_rqes_posted(struct c4iw_qp *qhp); |
| 763 | int c4iw_post_zb_read(struct c4iw_qp *qhp); | ||
| 764 | int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); | 763 | int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); |
| 765 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); | 764 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); |
| 766 | void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, | 765 | void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 057cb2505ea..20800900ef3 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -892,36 +892,6 @@ static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, | |||
| 892 | } | 892 | } |
| 893 | } | 893 | } |
| 894 | 894 | ||
| 895 | int c4iw_post_zb_read(struct c4iw_qp *qhp) | ||
| 896 | { | ||
| 897 | union t4_wr *wqe; | ||
| 898 | struct sk_buff *skb; | ||
| 899 | u8 len16; | ||
| 900 | |||
| 901 | PDBG("%s enter\n", __func__); | ||
| 902 | skb = alloc_skb(40, GFP_KERNEL); | ||
| 903 | if (!skb) { | ||
| 904 | printk(KERN_ERR "%s cannot send zb_read!!\n", __func__); | ||
| 905 | return -ENOMEM; | ||
| 906 | } | ||
| 907 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | ||
| 908 | |||
| 909 | wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read); | ||
| 910 | memset(wqe, 0, sizeof wqe->read); | ||
| 911 | wqe->read.r2 = cpu_to_be64(0); | ||
| 912 | wqe->read.stag_sink = cpu_to_be32(1); | ||
| 913 | wqe->read.to_sink_hi = cpu_to_be32(0); | ||
| 914 | wqe->read.to_sink_lo = cpu_to_be32(1); | ||
| 915 | wqe->read.stag_src = cpu_to_be32(1); | ||
| 916 | wqe->read.plen = cpu_to_be32(0); | ||
| 917 | wqe->read.to_src_hi = cpu_to_be32(0); | ||
| 918 | wqe->read.to_src_lo = cpu_to_be32(1); | ||
| 919 | len16 = DIV_ROUND_UP(sizeof wqe->read, 16); | ||
| 920 | init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16); | ||
| 921 | |||
| 922 | return c4iw_ofld_send(&qhp->rhp->rdev, skb); | ||
| 923 | } | ||
| 924 | |||
| 925 | static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, | 895 | static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, |
| 926 | gfp_t gfp) | 896 | gfp_t gfp) |
| 927 | { | 897 | { |
| @@ -1029,7 +999,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1029 | wqe->cookie = (unsigned long) &ep->com.wr_wait; | 999 | wqe->cookie = (unsigned long) &ep->com.wr_wait; |
| 1030 | 1000 | ||
| 1031 | wqe->u.fini.type = FW_RI_TYPE_FINI; | 1001 | wqe->u.fini.type = FW_RI_TYPE_FINI; |
| 1032 | c4iw_init_wr_wait(&ep->com.wr_wait); | ||
| 1033 | ret = c4iw_ofld_send(&rhp->rdev, skb); | 1002 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
| 1034 | if (ret) | 1003 | if (ret) |
| 1035 | goto out; | 1004 | goto out; |
| @@ -1125,7 +1094,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
| 1125 | if (qhp->attr.mpa_attr.initiator) | 1094 | if (qhp->attr.mpa_attr.initiator) |
| 1126 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); | 1095 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); |
| 1127 | 1096 | ||
| 1128 | c4iw_init_wr_wait(&qhp->ep->com.wr_wait); | ||
| 1129 | ret = c4iw_ofld_send(&rhp->rdev, skb); | 1097 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
| 1130 | if (ret) | 1098 | if (ret) |
| 1131 | goto out; | 1099 | goto out; |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 765f0fc1da7..b33f0457a1f 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
| @@ -530,9 +530,8 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
| 530 | for (j = 0; j < 6; j++) { | 530 | for (j = 0; j < 6; j++) { |
| 531 | if (!pdev->resource[j].start) | 531 | if (!pdev->resource[j].start) |
| 532 | continue; | 532 | continue; |
| 533 | ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n", | 533 | ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n", |
| 534 | j, (unsigned long long)pdev->resource[j].start, | 534 | j, &pdev->resource[j], |
| 535 | (unsigned long long)pdev->resource[j].end, | ||
| 536 | (unsigned long long)pci_resource_len(pdev, j)); | 535 | (unsigned long long)pci_resource_len(pdev, j)); |
| 537 | } | 536 | } |
| 538 | 537 | ||
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5a219a2fdf1..e8df155bc3b 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
| @@ -397,10 +397,14 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
| 397 | cq->resize_buf = NULL; | 397 | cq->resize_buf = NULL; |
| 398 | cq->resize_umem = NULL; | 398 | cq->resize_umem = NULL; |
| 399 | } else { | 399 | } else { |
| 400 | struct mlx4_ib_cq_buf tmp_buf; | ||
| 401 | int tmp_cqe = 0; | ||
| 402 | |||
| 400 | spin_lock_irq(&cq->lock); | 403 | spin_lock_irq(&cq->lock); |
| 401 | if (cq->resize_buf) { | 404 | if (cq->resize_buf) { |
| 402 | mlx4_ib_cq_resize_copy_cqes(cq); | 405 | mlx4_ib_cq_resize_copy_cqes(cq); |
| 403 | mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); | 406 | tmp_buf = cq->buf; |
| 407 | tmp_cqe = cq->ibcq.cqe; | ||
| 404 | cq->buf = cq->resize_buf->buf; | 408 | cq->buf = cq->resize_buf->buf; |
| 405 | cq->ibcq.cqe = cq->resize_buf->cqe; | 409 | cq->ibcq.cqe = cq->resize_buf->cqe; |
| 406 | 410 | ||
| @@ -408,6 +412,9 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
| 408 | cq->resize_buf = NULL; | 412 | cq->resize_buf = NULL; |
| 409 | } | 413 | } |
| 410 | spin_unlock_irq(&cq->lock); | 414 | spin_unlock_irq(&cq->lock); |
| 415 | |||
| 416 | if (tmp_cqe) | ||
| 417 | mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe); | ||
| 411 | } | 418 | } |
| 412 | 419 | ||
| 413 | goto out; | 420 | goto out; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index c9a8dd63b9e..57ffa50f509 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
| @@ -211,6 +211,8 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma | |||
| 211 | if (agent) { | 211 | if (agent) { |
| 212 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, | 212 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, |
| 213 | IB_MGMT_MAD_DATA, GFP_ATOMIC); | 213 | IB_MGMT_MAD_DATA, GFP_ATOMIC); |
| 214 | if (IS_ERR(send_buf)) | ||
| 215 | return; | ||
| 214 | /* | 216 | /* |
| 215 | * We rely here on the fact that MLX QPs don't use the | 217 | * We rely here on the fact that MLX QPs don't use the |
| 216 | * address handle after the send is posted (this is | 218 | * address handle after the send is posted (this is |
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c index 5648659ff0b..03a59534f59 100644 --- a/drivers/infiniband/hw/mthca/mthca_mad.c +++ b/drivers/infiniband/hw/mthca/mthca_mad.c | |||
| @@ -171,6 +171,8 @@ static void forward_trap(struct mthca_dev *dev, | |||
| 171 | if (agent) { | 171 | if (agent) { |
| 172 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, | 172 | send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, |
| 173 | IB_MGMT_MAD_DATA, GFP_ATOMIC); | 173 | IB_MGMT_MAD_DATA, GFP_ATOMIC); |
| 174 | if (IS_ERR(send_buf)) | ||
| 175 | return; | ||
| 174 | /* | 176 | /* |
| 175 | * We rely here on the fact that MLX QPs don't use the | 177 | * We rely here on the fact that MLX QPs don't use the |
| 176 | * address handle after the send is posted (this is | 178 | * address handle after the send is posted (this is |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 3892e2c0e95..5a4c3648472 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
| @@ -908,8 +908,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev) | |||
| 908 | nesvnic->nic_index && | 908 | nesvnic->nic_index && |
| 909 | mc_index < max_pft_entries_avaiable) { | 909 | mc_index < max_pft_entries_avaiable) { |
| 910 | nes_debug(NES_DBG_NIC_RX, | 910 | nes_debug(NES_DBG_NIC_RX, |
| 911 | "mc_index=%d skipping nic_index=%d,\ | 911 | "mc_index=%d skipping nic_index=%d, " |
| 912 | used for=%d \n", mc_index, | 912 | "used for=%d \n", mc_index, |
| 913 | nesvnic->nic_index, | 913 | nesvnic->nic_index, |
| 914 | nesadapter->pft_mcast_map[mc_index]); | 914 | nesadapter->pft_mcast_map[mc_index]); |
| 915 | mc_index++; | 915 | mc_index++; |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 64c9e7d02d4..73225eee3cc 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
| @@ -766,7 +766,7 @@ struct qib_devdata { | |||
| 766 | void (*f_sdma_hw_start_up)(struct qib_pportdata *); | 766 | void (*f_sdma_hw_start_up)(struct qib_pportdata *); |
| 767 | void (*f_sdma_init_early)(struct qib_pportdata *); | 767 | void (*f_sdma_init_early)(struct qib_pportdata *); |
| 768 | void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); | 768 | void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32); |
| 769 | void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32); | 769 | void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32); |
| 770 | u32 (*f_hdrqempty)(struct qib_ctxtdata *); | 770 | u32 (*f_hdrqempty)(struct qib_ctxtdata *); |
| 771 | u64 (*f_portcntr)(struct qib_pportdata *, u32); | 771 | u64 (*f_portcntr)(struct qib_pportdata *, u32); |
| 772 | u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, | 772 | u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **, |
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c index a86cbf880f9..5246aa486bb 100644 --- a/drivers/infiniband/hw/qib/qib_cq.c +++ b/drivers/infiniband/hw/qib/qib_cq.c | |||
| @@ -100,7 +100,8 @@ void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) | |||
| 100 | wc->head = next; | 100 | wc->head = next; |
| 101 | 101 | ||
| 102 | if (cq->notify == IB_CQ_NEXT_COMP || | 102 | if (cq->notify == IB_CQ_NEXT_COMP || |
| 103 | (cq->notify == IB_CQ_SOLICITED && solicited)) { | 103 | (cq->notify == IB_CQ_SOLICITED && |
| 104 | (solicited || entry->status != IB_WC_SUCCESS))) { | ||
| 104 | cq->notify = IB_CQ_NONE; | 105 | cq->notify = IB_CQ_NONE; |
| 105 | cq->triggered++; | 106 | cq->triggered++; |
| 106 | /* | 107 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 9cd193603fb..23e584f4c36 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
| @@ -71,6 +71,11 @@ MODULE_DESCRIPTION("QLogic IB driver"); | |||
| 71 | */ | 71 | */ |
| 72 | #define QIB_PIO_MAXIBHDR 128 | 72 | #define QIB_PIO_MAXIBHDR 128 |
| 73 | 73 | ||
| 74 | /* | ||
| 75 | * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt. | ||
| 76 | */ | ||
| 77 | #define QIB_MAX_PKT_RECV 64 | ||
| 78 | |||
| 74 | struct qlogic_ib_stats qib_stats; | 79 | struct qlogic_ib_stats qib_stats; |
| 75 | 80 | ||
| 76 | const char *qib_get_unit_name(int unit) | 81 | const char *qib_get_unit_name(int unit) |
| @@ -284,14 +289,147 @@ static inline void *qib_get_egrbuf(const struct qib_ctxtdata *rcd, u32 etail) | |||
| 284 | * Returns 1 if error was a CRC, else 0. | 289 | * Returns 1 if error was a CRC, else 0. |
| 285 | * Needed for some chip's synthesized error counters. | 290 | * Needed for some chip's synthesized error counters. |
| 286 | */ | 291 | */ |
| 287 | static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt, | 292 | static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, |
| 288 | u32 eflags, u32 l, u32 etail, __le32 *rhf_addr, | 293 | u32 ctxt, u32 eflags, u32 l, u32 etail, |
| 289 | struct qib_message_header *hdr) | 294 | __le32 *rhf_addr, struct qib_message_header *rhdr) |
| 290 | { | 295 | { |
| 291 | u32 ret = 0; | 296 | u32 ret = 0; |
| 292 | 297 | ||
| 293 | if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) | 298 | if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR)) |
| 294 | ret = 1; | 299 | ret = 1; |
| 300 | else if (eflags == QLOGIC_IB_RHF_H_TIDERR) { | ||
| 301 | /* For TIDERR and RC QPs premptively schedule a NAK */ | ||
| 302 | struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr; | ||
| 303 | struct qib_other_headers *ohdr = NULL; | ||
| 304 | struct qib_ibport *ibp = &ppd->ibport_data; | ||
| 305 | struct qib_qp *qp = NULL; | ||
| 306 | u32 tlen = qib_hdrget_length_in_bytes(rhf_addr); | ||
| 307 | u16 lid = be16_to_cpu(hdr->lrh[1]); | ||
| 308 | int lnh = be16_to_cpu(hdr->lrh[0]) & 3; | ||
| 309 | u32 qp_num; | ||
| 310 | u32 opcode; | ||
| 311 | u32 psn; | ||
| 312 | int diff; | ||
| 313 | unsigned long flags; | ||
| 314 | |||
| 315 | /* Sanity check packet */ | ||
| 316 | if (tlen < 24) | ||
| 317 | goto drop; | ||
| 318 | |||
| 319 | if (lid < QIB_MULTICAST_LID_BASE) { | ||
| 320 | lid &= ~((1 << ppd->lmc) - 1); | ||
| 321 | if (unlikely(lid != ppd->lid)) | ||
| 322 | goto drop; | ||
| 323 | } | ||
| 324 | |||
| 325 | /* Check for GRH */ | ||
| 326 | if (lnh == QIB_LRH_BTH) | ||
| 327 | ohdr = &hdr->u.oth; | ||
| 328 | else if (lnh == QIB_LRH_GRH) { | ||
| 329 | u32 vtf; | ||
| 330 | |||
| 331 | ohdr = &hdr->u.l.oth; | ||
| 332 | if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) | ||
| 333 | goto drop; | ||
| 334 | vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); | ||
| 335 | if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) | ||
| 336 | goto drop; | ||
| 337 | } else | ||
| 338 | goto drop; | ||
| 339 | |||
| 340 | /* Get opcode and PSN from packet */ | ||
| 341 | opcode = be32_to_cpu(ohdr->bth[0]); | ||
| 342 | opcode >>= 24; | ||
| 343 | psn = be32_to_cpu(ohdr->bth[2]); | ||
| 344 | |||
| 345 | /* Get the destination QP number. */ | ||
| 346 | qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; | ||
| 347 | if (qp_num != QIB_MULTICAST_QPN) { | ||
| 348 | int ruc_res; | ||
| 349 | qp = qib_lookup_qpn(ibp, qp_num); | ||
| 350 | if (!qp) | ||
| 351 | goto drop; | ||
| 352 | |||
| 353 | /* | ||
| 354 | * Handle only RC QPs - for other QP types drop error | ||
| 355 | * packet. | ||
| 356 | */ | ||
| 357 | spin_lock(&qp->r_lock); | ||
| 358 | |||
| 359 | /* Check for valid receive state. */ | ||
| 360 | if (!(ib_qib_state_ops[qp->state] & | ||
| 361 | QIB_PROCESS_RECV_OK)) { | ||
| 362 | ibp->n_pkt_drops++; | ||
| 363 | goto unlock; | ||
| 364 | } | ||
| 365 | |||
| 366 | switch (qp->ibqp.qp_type) { | ||
| 367 | case IB_QPT_RC: | ||
| 368 | spin_lock_irqsave(&qp->s_lock, flags); | ||
| 369 | ruc_res = | ||
| 370 | qib_ruc_check_hdr( | ||
| 371 | ibp, hdr, | ||
| 372 | lnh == QIB_LRH_GRH, | ||
| 373 | qp, | ||
| 374 | be32_to_cpu(ohdr->bth[0])); | ||
| 375 | if (ruc_res) { | ||
| 376 | spin_unlock_irqrestore(&qp->s_lock, | ||
| 377 | flags); | ||
| 378 | goto unlock; | ||
| 379 | } | ||
| 380 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
| 381 | |||
| 382 | /* Only deal with RDMA Writes for now */ | ||
| 383 | if (opcode < | ||
| 384 | IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { | ||
| 385 | diff = qib_cmp24(psn, qp->r_psn); | ||
| 386 | if (!qp->r_nak_state && diff >= 0) { | ||
| 387 | ibp->n_rc_seqnak++; | ||
| 388 | qp->r_nak_state = | ||
| 389 | IB_NAK_PSN_ERROR; | ||
| 390 | /* Use the expected PSN. */ | ||
| 391 | qp->r_ack_psn = qp->r_psn; | ||
| 392 | /* | ||
| 393 | * Wait to send the sequence | ||
| 394 | * NAK until all packets | ||
| 395 | * in the receive queue have | ||
| 396 | * been processed. | ||
| 397 | * Otherwise, we end up | ||
| 398 | * propagating congestion. | ||
| 399 | */ | ||
| 400 | if (list_empty(&qp->rspwait)) { | ||
| 401 | qp->r_flags |= | ||
| 402 | QIB_R_RSP_NAK; | ||
| 403 | atomic_inc( | ||
| 404 | &qp->refcount); | ||
| 405 | list_add_tail( | ||
| 406 | &qp->rspwait, | ||
| 407 | &rcd->qp_wait_list); | ||
| 408 | } | ||
| 409 | } /* Out of sequence NAK */ | ||
| 410 | } /* QP Request NAKs */ | ||
| 411 | break; | ||
| 412 | case IB_QPT_SMI: | ||
| 413 | case IB_QPT_GSI: | ||
| 414 | case IB_QPT_UD: | ||
| 415 | case IB_QPT_UC: | ||
| 416 | default: | ||
| 417 | /* For now don't handle any other QP types */ | ||
| 418 | break; | ||
| 419 | } | ||
| 420 | |||
| 421 | unlock: | ||
| 422 | spin_unlock(&qp->r_lock); | ||
| 423 | /* | ||
| 424 | * Notify qib_destroy_qp() if it is waiting | ||
| 425 | * for us to finish. | ||
| 426 | */ | ||
| 427 | if (atomic_dec_and_test(&qp->refcount)) | ||
| 428 | wake_up(&qp->wait); | ||
| 429 | } /* Unicast QP */ | ||
| 430 | } /* Valid packet with TIDErr */ | ||
| 431 | |||
| 432 | drop: | ||
| 295 | return ret; | 433 | return ret; |
| 296 | } | 434 | } |
| 297 | 435 | ||
| @@ -335,7 +473,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) | |||
| 335 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ | 473 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ |
| 336 | } | 474 | } |
| 337 | 475 | ||
| 338 | for (last = 0, i = 1; !last && i <= 64; i += !last) { | 476 | for (last = 0, i = 1; !last; i += !last) { |
| 339 | hdr = dd->f_get_msgheader(dd, rhf_addr); | 477 | hdr = dd->f_get_msgheader(dd, rhf_addr); |
| 340 | eflags = qib_hdrget_err_flags(rhf_addr); | 478 | eflags = qib_hdrget_err_flags(rhf_addr); |
| 341 | etype = qib_hdrget_rcv_type(rhf_addr); | 479 | etype = qib_hdrget_rcv_type(rhf_addr); |
| @@ -371,7 +509,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) | |||
| 371 | * packets; only qibhdrerr should be set. | 509 | * packets; only qibhdrerr should be set. |
| 372 | */ | 510 | */ |
| 373 | if (unlikely(eflags)) | 511 | if (unlikely(eflags)) |
| 374 | crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l, | 512 | crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l, |
| 375 | etail, rhf_addr, hdr); | 513 | etail, rhf_addr, hdr); |
| 376 | else if (etype == RCVHQ_RCV_TYPE_NON_KD) { | 514 | else if (etype == RCVHQ_RCV_TYPE_NON_KD) { |
| 377 | qib_ib_rcv(rcd, hdr, ebuf, tlen); | 515 | qib_ib_rcv(rcd, hdr, ebuf, tlen); |
| @@ -384,6 +522,9 @@ move_along: | |||
| 384 | l += rsize; | 522 | l += rsize; |
| 385 | if (l >= maxcnt) | 523 | if (l >= maxcnt) |
| 386 | l = 0; | 524 | l = 0; |
| 525 | if (i == QIB_MAX_PKT_RECV) | ||
| 526 | last = 1; | ||
| 527 | |||
| 387 | rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; | 528 | rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; |
| 388 | if (dd->flags & QIB_NODMA_RTAIL) { | 529 | if (dd->flags & QIB_NODMA_RTAIL) { |
| 389 | u32 seq = qib_hdrget_seq(rhf_addr); | 530 | u32 seq = qib_hdrget_seq(rhf_addr); |
| @@ -402,7 +543,7 @@ move_along: | |||
| 402 | */ | 543 | */ |
| 403 | lval = l; | 544 | lval = l; |
| 404 | if (!last && !(i & 0xf)) { | 545 | if (!last && !(i & 0xf)) { |
| 405 | dd->f_update_usrhead(rcd, lval, updegr, etail); | 546 | dd->f_update_usrhead(rcd, lval, updegr, etail, i); |
| 406 | updegr = 0; | 547 | updegr = 0; |
| 407 | } | 548 | } |
| 408 | } | 549 | } |
| @@ -444,7 +585,7 @@ bail: | |||
| 444 | * if no packets were processed. | 585 | * if no packets were processed. |
| 445 | */ | 586 | */ |
| 446 | lval = (u64)rcd->head | dd->rhdrhead_intr_off; | 587 | lval = (u64)rcd->head | dd->rhdrhead_intr_off; |
| 447 | dd->f_update_usrhead(rcd, lval, updegr, etail); | 588 | dd->f_update_usrhead(rcd, lval, updegr, etail, i); |
| 448 | return crcs; | 589 | return crcs; |
| 449 | } | 590 | } |
| 450 | 591 | ||
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 79d9971aff1..75bfad16c11 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
| @@ -1379,17 +1379,17 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, | |||
| 1379 | /* find device (with ACTIVE ports) with fewest ctxts in use */ | 1379 | /* find device (with ACTIVE ports) with fewest ctxts in use */ |
| 1380 | for (ndev = 0; ndev < devmax; ndev++) { | 1380 | for (ndev = 0; ndev < devmax; ndev++) { |
| 1381 | struct qib_devdata *dd = qib_lookup(ndev); | 1381 | struct qib_devdata *dd = qib_lookup(ndev); |
| 1382 | unsigned cused = 0, cfree = 0; | 1382 | unsigned cused = 0, cfree = 0, pusable = 0; |
| 1383 | if (!dd) | 1383 | if (!dd) |
| 1384 | continue; | 1384 | continue; |
| 1385 | if (port && port <= dd->num_pports && | 1385 | if (port && port <= dd->num_pports && |
| 1386 | usable(dd->pport + port - 1)) | 1386 | usable(dd->pport + port - 1)) |
| 1387 | dusable = 1; | 1387 | pusable = 1; |
| 1388 | else | 1388 | else |
| 1389 | for (i = 0; i < dd->num_pports; i++) | 1389 | for (i = 0; i < dd->num_pports; i++) |
| 1390 | if (usable(dd->pport + i)) | 1390 | if (usable(dd->pport + i)) |
| 1391 | dusable++; | 1391 | pusable++; |
| 1392 | if (!dusable) | 1392 | if (!pusable) |
| 1393 | continue; | 1393 | continue; |
| 1394 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; | 1394 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; |
| 1395 | ctxt++) | 1395 | ctxt++) |
| @@ -1397,7 +1397,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, | |||
| 1397 | cused++; | 1397 | cused++; |
| 1398 | else | 1398 | else |
| 1399 | cfree++; | 1399 | cfree++; |
| 1400 | if (cfree && cused < inuse) { | 1400 | if (pusable && cfree && cused < inuse) { |
| 1401 | udd = dd; | 1401 | udd = dd; |
| 1402 | inuse = cused; | 1402 | inuse = cused; |
| 1403 | } | 1403 | } |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index a5e29dbb953..774dea897e9 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
| @@ -2074,7 +2074,7 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd) | |||
| 2074 | } | 2074 | } |
| 2075 | 2075 | ||
| 2076 | static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, | 2076 | static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, |
| 2077 | u32 updegr, u32 egrhd) | 2077 | u32 updegr, u32 egrhd, u32 npkts) |
| 2078 | { | 2078 | { |
| 2079 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | 2079 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); |
| 2080 | if (updegr) | 2080 | if (updegr) |
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 6fd8d74e739..127a0d5069f 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
| @@ -2297,7 +2297,7 @@ static void qib_7220_config_ctxts(struct qib_devdata *dd) | |||
| 2297 | nchipctxts = qib_read_kreg32(dd, kr_portcnt); | 2297 | nchipctxts = qib_read_kreg32(dd, kr_portcnt); |
| 2298 | dd->cspec->numctxts = nchipctxts; | 2298 | dd->cspec->numctxts = nchipctxts; |
| 2299 | if (qib_n_krcv_queues > 1) { | 2299 | if (qib_n_krcv_queues > 1) { |
| 2300 | dd->qpn_mask = 0x3f; | 2300 | dd->qpn_mask = 0x3e; |
| 2301 | dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; | 2301 | dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports; |
| 2302 | if (dd->first_user_ctxt > nchipctxts) | 2302 | if (dd->first_user_ctxt > nchipctxts) |
| 2303 | dd->first_user_ctxt = nchipctxts; | 2303 | dd->first_user_ctxt = nchipctxts; |
| @@ -2703,7 +2703,7 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) | |||
| 2703 | } | 2703 | } |
| 2704 | 2704 | ||
| 2705 | static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, | 2705 | static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, |
| 2706 | u32 updegr, u32 egrhd) | 2706 | u32 updegr, u32 egrhd, u32 npkts) |
| 2707 | { | 2707 | { |
| 2708 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | 2708 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); |
| 2709 | if (updegr) | 2709 | if (updegr) |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 584d443b533..dbbb0e85afe 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
| @@ -71,6 +71,9 @@ static void qib_7322_mini_pcs_reset(struct qib_pportdata *); | |||
| 71 | 71 | ||
| 72 | static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); | 72 | static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32); |
| 73 | static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); | 73 | static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned); |
| 74 | static void serdes_7322_los_enable(struct qib_pportdata *, int); | ||
| 75 | static int serdes_7322_init_old(struct qib_pportdata *); | ||
| 76 | static int serdes_7322_init_new(struct qib_pportdata *); | ||
| 74 | 77 | ||
| 75 | #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) | 78 | #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) |
| 76 | 79 | ||
| @@ -111,6 +114,21 @@ static ushort qib_singleport; | |||
| 111 | module_param_named(singleport, qib_singleport, ushort, S_IRUGO); | 114 | module_param_named(singleport, qib_singleport, ushort, S_IRUGO); |
| 112 | MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); | 115 | MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); |
| 113 | 116 | ||
| 117 | /* | ||
| 118 | * Receive header queue sizes | ||
| 119 | */ | ||
| 120 | static unsigned qib_rcvhdrcnt; | ||
| 121 | module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO); | ||
| 122 | MODULE_PARM_DESC(rcvhdrcnt, "receive header count"); | ||
| 123 | |||
| 124 | static unsigned qib_rcvhdrsize; | ||
| 125 | module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO); | ||
| 126 | MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words"); | ||
| 127 | |||
| 128 | static unsigned qib_rcvhdrentsize; | ||
| 129 | module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO); | ||
| 130 | MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words"); | ||
| 131 | |||
| 114 | #define MAX_ATTEN_LEN 64 /* plenty for any real system */ | 132 | #define MAX_ATTEN_LEN 64 /* plenty for any real system */ |
| 115 | /* for read back, default index is ~5m copper cable */ | 133 | /* for read back, default index is ~5m copper cable */ |
| 116 | static char txselect_list[MAX_ATTEN_LEN] = "10"; | 134 | static char txselect_list[MAX_ATTEN_LEN] = "10"; |
| @@ -544,6 +562,7 @@ static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *); | |||
| 544 | 562 | ||
| 545 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ | 563 | #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */ |
| 546 | #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ | 564 | #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */ |
| 565 | #define TXDDS_MFG_SZ 2 /* number of mfg tx settings entries */ | ||
| 547 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ | 566 | #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */ |
| 548 | 567 | ||
| 549 | #define H1_FORCE_VAL 8 | 568 | #define H1_FORCE_VAL 8 |
| @@ -604,6 +623,7 @@ struct qib_chippport_specific { | |||
| 604 | u8 ibmalfusesnap; | 623 | u8 ibmalfusesnap; |
| 605 | struct qib_qsfp_data qsfp_data; | 624 | struct qib_qsfp_data qsfp_data; |
| 606 | char epmsgbuf[192]; /* for port error interrupt msg buffer */ | 625 | char epmsgbuf[192]; /* for port error interrupt msg buffer */ |
| 626 | u8 bounced; | ||
| 607 | }; | 627 | }; |
| 608 | 628 | ||
| 609 | static struct { | 629 | static struct { |
| @@ -1677,6 +1697,8 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) | |||
| 1677 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { | 1697 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { |
| 1678 | force_h1(ppd); | 1698 | force_h1(ppd); |
| 1679 | ppd->cpspec->qdr_reforce = 1; | 1699 | ppd->cpspec->qdr_reforce = 1; |
| 1700 | if (!ppd->dd->cspec->r1) | ||
| 1701 | serdes_7322_los_enable(ppd, 0); | ||
| 1680 | } else if (ppd->cpspec->qdr_reforce && | 1702 | } else if (ppd->cpspec->qdr_reforce && |
| 1681 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && | 1703 | (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) && |
| 1682 | (ibclt == IB_7322_LT_STATE_CFGENH || | 1704 | (ibclt == IB_7322_LT_STATE_CFGENH || |
| @@ -1692,18 +1714,37 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst) | |||
| 1692 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) | 1714 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET))) |
| 1693 | adj_tx_serdes(ppd); | 1715 | adj_tx_serdes(ppd); |
| 1694 | 1716 | ||
| 1695 | if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP && | 1717 | if (ibclt != IB_7322_LT_STATE_LINKUP) { |
| 1696 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { | 1718 | u8 ltstate = qib_7322_phys_portstate(ibcst); |
| 1697 | ppd->cpspec->qdr_dfe_on = 1; | 1719 | u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, |
| 1698 | ppd->cpspec->qdr_dfe_time = 0; | 1720 | LinkTrainingState); |
| 1699 | /* On link down, reenable QDR adaptation */ | 1721 | if (!ppd->dd->cspec->r1 && |
| 1700 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | 1722 | pibclt == IB_7322_LT_STATE_LINKUP && |
| 1701 | ppd->dd->cspec->r1 ? | 1723 | ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER && |
| 1702 | QDR_STATIC_ADAPT_DOWN_R1 : | 1724 | ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN && |
| 1703 | QDR_STATIC_ADAPT_DOWN); | 1725 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && |
| 1726 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) | ||
| 1727 | /* If the link went down (but no into recovery, | ||
| 1728 | * turn LOS back on */ | ||
| 1729 | serdes_7322_los_enable(ppd, 1); | ||
| 1730 | if (!ppd->cpspec->qdr_dfe_on && | ||
| 1731 | ibclt <= IB_7322_LT_STATE_SLEEPQUIET) { | ||
| 1732 | ppd->cpspec->qdr_dfe_on = 1; | ||
| 1733 | ppd->cpspec->qdr_dfe_time = 0; | ||
| 1734 | /* On link down, reenable QDR adaptation */ | ||
| 1735 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | ||
| 1736 | ppd->dd->cspec->r1 ? | ||
| 1737 | QDR_STATIC_ADAPT_DOWN_R1 : | ||
| 1738 | QDR_STATIC_ADAPT_DOWN); | ||
| 1739 | printk(KERN_INFO QIB_DRV_NAME | ||
| 1740 | " IB%u:%u re-enabled QDR adaptation " | ||
| 1741 | "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt); | ||
| 1742 | } | ||
| 1704 | } | 1743 | } |
| 1705 | } | 1744 | } |
| 1706 | 1745 | ||
| 1746 | static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32); | ||
| 1747 | |||
| 1707 | /* | 1748 | /* |
| 1708 | * This is per-pport error handling. | 1749 | * This is per-pport error handling. |
| 1709 | * will likely get it's own MSIx interrupt (one for each port, | 1750 | * will likely get it's own MSIx interrupt (one for each port, |
| @@ -1840,7 +1881,23 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) | |||
| 1840 | IB_PHYSPORTSTATE_DISABLED) | 1881 | IB_PHYSPORTSTATE_DISABLED) |
| 1841 | qib_set_ib_7322_lstate(ppd, 0, | 1882 | qib_set_ib_7322_lstate(ppd, 0, |
| 1842 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | 1883 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); |
| 1843 | else | 1884 | else { |
| 1885 | u32 lstate; | ||
| 1886 | /* | ||
| 1887 | * We need the current logical link state before | ||
| 1888 | * lflags are set in handle_e_ibstatuschanged. | ||
| 1889 | */ | ||
| 1890 | lstate = qib_7322_iblink_state(ibcs); | ||
| 1891 | |||
| 1892 | if (IS_QMH(dd) && !ppd->cpspec->bounced && | ||
| 1893 | ltstate == IB_PHYSPORTSTATE_LINKUP && | ||
| 1894 | (lstate >= IB_PORT_INIT && | ||
| 1895 | lstate <= IB_PORT_ACTIVE)) { | ||
| 1896 | ppd->cpspec->bounced = 1; | ||
| 1897 | qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, | ||
| 1898 | IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL); | ||
| 1899 | } | ||
| 1900 | |||
| 1844 | /* | 1901 | /* |
| 1845 | * Since going into a recovery state causes the link | 1902 | * Since going into a recovery state causes the link |
| 1846 | * state to go down and since recovery is transitory, | 1903 | * state to go down and since recovery is transitory, |
| @@ -1854,6 +1911,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) | |||
| 1854 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && | 1911 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && |
| 1855 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) | 1912 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) |
| 1856 | qib_handle_e_ibstatuschanged(ppd, ibcs); | 1913 | qib_handle_e_ibstatuschanged(ppd, ibcs); |
| 1914 | } | ||
| 1857 | } | 1915 | } |
| 1858 | if (*msg && iserr) | 1916 | if (*msg && iserr) |
| 1859 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); | 1917 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); |
| @@ -2785,7 +2843,6 @@ static irqreturn_t qib_7322intr(int irq, void *data) | |||
| 2785 | ctxtrbits &= ~rmask; | 2843 | ctxtrbits &= ~rmask; |
| 2786 | if (dd->rcd[i]) { | 2844 | if (dd->rcd[i]) { |
| 2787 | qib_kreceive(dd->rcd[i], NULL, &npkts); | 2845 | qib_kreceive(dd->rcd[i], NULL, &npkts); |
| 2788 | adjust_rcv_timeout(dd->rcd[i], npkts); | ||
| 2789 | } | 2846 | } |
| 2790 | } | 2847 | } |
| 2791 | rmask <<= 1; | 2848 | rmask <<= 1; |
| @@ -2835,7 +2892,6 @@ static irqreturn_t qib_7322pintr(int irq, void *data) | |||
| 2835 | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); | 2892 | (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt); |
| 2836 | 2893 | ||
| 2837 | qib_kreceive(rcd, NULL, &npkts); | 2894 | qib_kreceive(rcd, NULL, &npkts); |
| 2838 | adjust_rcv_timeout(rcd, npkts); | ||
| 2839 | 2895 | ||
| 2840 | return IRQ_HANDLED; | 2896 | return IRQ_HANDLED; |
| 2841 | } | 2897 | } |
| @@ -3157,6 +3213,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd) | |||
| 3157 | case BOARD_QME7342: | 3213 | case BOARD_QME7342: |
| 3158 | n = "InfiniPath_QME7342"; | 3214 | n = "InfiniPath_QME7342"; |
| 3159 | break; | 3215 | break; |
| 3216 | case 8: | ||
| 3217 | n = "InfiniPath_QME7362"; | ||
| 3218 | dd->flags |= QIB_HAS_QSFP; | ||
| 3219 | break; | ||
| 3160 | case 15: | 3220 | case 15: |
| 3161 | n = "InfiniPath_QLE7342_TEST"; | 3221 | n = "InfiniPath_QLE7342_TEST"; |
| 3162 | dd->flags |= QIB_HAS_QSFP; | 3222 | dd->flags |= QIB_HAS_QSFP; |
| @@ -3475,11 +3535,6 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd) | |||
| 3475 | nchipctxts = qib_read_kreg32(dd, kr_contextcnt); | 3535 | nchipctxts = qib_read_kreg32(dd, kr_contextcnt); |
| 3476 | dd->cspec->numctxts = nchipctxts; | 3536 | dd->cspec->numctxts = nchipctxts; |
| 3477 | if (qib_n_krcv_queues > 1 && dd->num_pports) { | 3537 | if (qib_n_krcv_queues > 1 && dd->num_pports) { |
| 3478 | /* | ||
| 3479 | * Set the mask for which bits from the QPN are used | ||
| 3480 | * to select a context number. | ||
| 3481 | */ | ||
| 3482 | dd->qpn_mask = 0x3f; | ||
| 3483 | dd->first_user_ctxt = NUM_IB_PORTS + | 3538 | dd->first_user_ctxt = NUM_IB_PORTS + |
| 3484 | (qib_n_krcv_queues - 1) * dd->num_pports; | 3539 | (qib_n_krcv_queues - 1) * dd->num_pports; |
| 3485 | if (dd->first_user_ctxt > nchipctxts) | 3540 | if (dd->first_user_ctxt > nchipctxts) |
| @@ -3530,8 +3585,11 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd) | |||
| 3530 | 3585 | ||
| 3531 | /* kr_rcvegrcnt changes based on the number of contexts enabled */ | 3586 | /* kr_rcvegrcnt changes based on the number of contexts enabled */ |
| 3532 | dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); | 3587 | dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt); |
| 3533 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, | 3588 | if (qib_rcvhdrcnt) |
| 3534 | dd->num_pports > 1 ? 1024U : 2048U); | 3589 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt); |
| 3590 | else | ||
| 3591 | dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, | ||
| 3592 | dd->num_pports > 1 ? 1024U : 2048U); | ||
| 3535 | } | 3593 | } |
| 3536 | 3594 | ||
| 3537 | static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) | 3595 | static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which) |
| @@ -4002,8 +4060,14 @@ static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t) | |||
| 4002 | } | 4060 | } |
| 4003 | 4061 | ||
| 4004 | static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, | 4062 | static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, |
| 4005 | u32 updegr, u32 egrhd) | 4063 | u32 updegr, u32 egrhd, u32 npkts) |
| 4006 | { | 4064 | { |
| 4065 | /* | ||
| 4066 | * Need to write timeout register before updating rcvhdrhead to ensure | ||
| 4067 | * that the timer is enabled on reception of a packet. | ||
| 4068 | */ | ||
| 4069 | if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT) | ||
| 4070 | adjust_rcv_timeout(rcd, npkts); | ||
| 4007 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | 4071 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); |
| 4008 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); | 4072 | qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); |
| 4009 | if (updegr) | 4073 | if (updegr) |
| @@ -5522,7 +5586,7 @@ static void qsfp_7322_event(struct work_struct *work) | |||
| 5522 | u64 now = get_jiffies_64(); | 5586 | u64 now = get_jiffies_64(); |
| 5523 | if (time_after64(now, pwrup)) | 5587 | if (time_after64(now, pwrup)) |
| 5524 | break; | 5588 | break; |
| 5525 | msleep(1); | 5589 | msleep(20); |
| 5526 | } | 5590 | } |
| 5527 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); | 5591 | ret = qib_refresh_qsfp_cache(ppd, &qd->cache); |
| 5528 | /* | 5592 | /* |
| @@ -5579,6 +5643,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
| 5579 | u32 pidx, unit, port, deflt, h1; | 5643 | u32 pidx, unit, port, deflt, h1; |
| 5580 | unsigned long val; | 5644 | unsigned long val; |
| 5581 | int any = 0, seth1; | 5645 | int any = 0, seth1; |
| 5646 | int txdds_size; | ||
| 5582 | 5647 | ||
| 5583 | str = txselect_list; | 5648 | str = txselect_list; |
| 5584 | 5649 | ||
| @@ -5587,6 +5652,10 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
| 5587 | for (pidx = 0; pidx < dd->num_pports; ++pidx) | 5652 | for (pidx = 0; pidx < dd->num_pports; ++pidx) |
| 5588 | dd->pport[pidx].cpspec->no_eep = deflt; | 5653 | dd->pport[pidx].cpspec->no_eep = deflt; |
| 5589 | 5654 | ||
| 5655 | txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ; | ||
| 5656 | if (IS_QME(dd) || IS_QMH(dd)) | ||
| 5657 | txdds_size += TXDDS_MFG_SZ; | ||
| 5658 | |||
| 5590 | while (*nxt && nxt[1]) { | 5659 | while (*nxt && nxt[1]) { |
| 5591 | str = ++nxt; | 5660 | str = ++nxt; |
| 5592 | unit = simple_strtoul(str, &nxt, 0); | 5661 | unit = simple_strtoul(str, &nxt, 0); |
| @@ -5609,7 +5678,7 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
| 5609 | ; | 5678 | ; |
| 5610 | continue; | 5679 | continue; |
| 5611 | } | 5680 | } |
| 5612 | if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ) | 5681 | if (val >= txdds_size) |
| 5613 | continue; | 5682 | continue; |
| 5614 | seth1 = 0; | 5683 | seth1 = 0; |
| 5615 | h1 = 0; /* gcc thinks it might be used uninitted */ | 5684 | h1 = 0; /* gcc thinks it might be used uninitted */ |
| @@ -5661,10 +5730,11 @@ static int setup_txselect(const char *str, struct kernel_param *kp) | |||
| 5661 | return -ENOSPC; | 5730 | return -ENOSPC; |
| 5662 | } | 5731 | } |
| 5663 | val = simple_strtoul(str, &n, 0); | 5732 | val = simple_strtoul(str, &n, 0); |
| 5664 | if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) { | 5733 | if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + |
| 5734 | TXDDS_MFG_SZ)) { | ||
| 5665 | printk(KERN_INFO QIB_DRV_NAME | 5735 | printk(KERN_INFO QIB_DRV_NAME |
| 5666 | "txselect_values must start with a number < %d\n", | 5736 | "txselect_values must start with a number < %d\n", |
| 5667 | TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); | 5737 | TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); |
| 5668 | return -EINVAL; | 5738 | return -EINVAL; |
| 5669 | } | 5739 | } |
| 5670 | strcpy(txselect_list, str); | 5740 | strcpy(txselect_list, str); |
| @@ -5810,7 +5880,8 @@ static void write_7322_initregs(struct qib_devdata *dd) | |||
| 5810 | unsigned n, regno; | 5880 | unsigned n, regno; |
| 5811 | unsigned long flags; | 5881 | unsigned long flags; |
| 5812 | 5882 | ||
| 5813 | if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported) | 5883 | if (dd->n_krcv_queues < 2 || |
| 5884 | !dd->pport[pidx].link_speed_supported) | ||
| 5814 | continue; | 5885 | continue; |
| 5815 | 5886 | ||
| 5816 | ppd = &dd->pport[pidx]; | 5887 | ppd = &dd->pport[pidx]; |
| @@ -6097,8 +6168,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
| 6097 | ppd++; | 6168 | ppd++; |
| 6098 | } | 6169 | } |
| 6099 | 6170 | ||
| 6100 | dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE; | 6171 | dd->rcvhdrentsize = qib_rcvhdrentsize ? |
| 6101 | dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE; | 6172 | qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE; |
| 6173 | dd->rcvhdrsize = qib_rcvhdrsize ? | ||
| 6174 | qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE; | ||
| 6102 | dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); | 6175 | dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32); |
| 6103 | 6176 | ||
| 6104 | /* we always allocate at least 2048 bytes for eager buffers */ | 6177 | /* we always allocate at least 2048 bytes for eager buffers */ |
| @@ -6495,7 +6568,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, | |||
| 6495 | /* make sure we see an updated copy next time around */ | 6568 | /* make sure we see an updated copy next time around */ |
| 6496 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); | 6569 | sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); |
| 6497 | sleeps++; | 6570 | sleeps++; |
| 6498 | msleep(1); | 6571 | msleep(20); |
| 6499 | } | 6572 | } |
| 6500 | 6573 | ||
| 6501 | switch (which) { | 6574 | switch (which) { |
| @@ -6993,6 +7066,12 @@ static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = { | |||
| 6993 | { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ | 7066 | { 0, 1, 0, 12 }, /* QMH7342 backplane settings */ |
| 6994 | }; | 7067 | }; |
| 6995 | 7068 | ||
| 7069 | static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = { | ||
| 7070 | /* amp, pre, main, post */ | ||
| 7071 | { 0, 0, 0, 0 }, /* QME7342 mfg settings */ | ||
| 7072 | { 0, 0, 0, 6 }, /* QME7342 P2 mfg settings */ | ||
| 7073 | }; | ||
| 7074 | |||
| 6996 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, | 7075 | static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds, |
| 6997 | unsigned atten) | 7076 | unsigned atten) |
| 6998 | { | 7077 | { |
| @@ -7066,6 +7145,16 @@ static void find_best_ent(struct qib_pportdata *ppd, | |||
| 7066 | *sdr_dds = &txdds_extra_sdr[idx]; | 7145 | *sdr_dds = &txdds_extra_sdr[idx]; |
| 7067 | *ddr_dds = &txdds_extra_ddr[idx]; | 7146 | *ddr_dds = &txdds_extra_ddr[idx]; |
| 7068 | *qdr_dds = &txdds_extra_qdr[idx]; | 7147 | *qdr_dds = &txdds_extra_qdr[idx]; |
| 7148 | } else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) && | ||
| 7149 | ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + | ||
| 7150 | TXDDS_MFG_SZ)) { | ||
| 7151 | idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ); | ||
| 7152 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7153 | " IB%u:%u use idx %u into txdds_mfg\n", | ||
| 7154 | ppd->dd->unit, ppd->port, idx); | ||
| 7155 | *sdr_dds = &txdds_extra_mfg[idx]; | ||
| 7156 | *ddr_dds = &txdds_extra_mfg[idx]; | ||
| 7157 | *qdr_dds = &txdds_extra_mfg[idx]; | ||
| 7069 | } else { | 7158 | } else { |
| 7070 | /* this shouldn't happen, it's range checked */ | 7159 | /* this shouldn't happen, it's range checked */ |
| 7071 | *sdr_dds = txdds_sdr + qib_long_atten; | 7160 | *sdr_dds = txdds_sdr + qib_long_atten; |
| @@ -7210,9 +7299,30 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data, | |||
| 7210 | } | 7299 | } |
| 7211 | } | 7300 | } |
| 7212 | 7301 | ||
| 7302 | static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) | ||
| 7303 | { | ||
| 7304 | u64 data = qib_read_kreg_port(ppd, krp_serdesctrl); | ||
| 7305 | printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n", | ||
| 7306 | ppd->dd->unit, ppd->port, (enable ? "on" : "off")); | ||
| 7307 | if (enable) | ||
| 7308 | data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); | ||
| 7309 | else | ||
| 7310 | data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); | ||
| 7311 | qib_write_kreg_port(ppd, krp_serdesctrl, data); | ||
| 7312 | } | ||
| 7313 | |||
| 7213 | static int serdes_7322_init(struct qib_pportdata *ppd) | 7314 | static int serdes_7322_init(struct qib_pportdata *ppd) |
| 7214 | { | 7315 | { |
| 7215 | u64 data; | 7316 | int ret = 0; |
| 7317 | if (ppd->dd->cspec->r1) | ||
| 7318 | ret = serdes_7322_init_old(ppd); | ||
| 7319 | else | ||
| 7320 | ret = serdes_7322_init_new(ppd); | ||
| 7321 | return ret; | ||
| 7322 | } | ||
| 7323 | |||
| 7324 | static int serdes_7322_init_old(struct qib_pportdata *ppd) | ||
| 7325 | { | ||
| 7216 | u32 le_val; | 7326 | u32 le_val; |
| 7217 | 7327 | ||
| 7218 | /* | 7328 | /* |
| @@ -7270,11 +7380,7 @@ static int serdes_7322_init(struct qib_pportdata *ppd) | |||
| 7270 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ | 7380 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ |
| 7271 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | 7381 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ |
| 7272 | 7382 | ||
| 7273 | data = qib_read_kreg_port(ppd, krp_serdesctrl); | 7383 | serdes_7322_los_enable(ppd, 1); |
| 7274 | /* Turn off IB latency mode */ | ||
| 7275 | data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE); | ||
| 7276 | qib_write_kreg_port(ppd, krp_serdesctrl, data | | ||
| 7277 | SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); | ||
| 7278 | 7384 | ||
| 7279 | /* rxbistena; set 0 to avoid effects of it switch later */ | 7385 | /* rxbistena; set 0 to avoid effects of it switch later */ |
| 7280 | ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); | 7386 | ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15); |
| @@ -7314,6 +7420,205 @@ static int serdes_7322_init(struct qib_pportdata *ppd) | |||
| 7314 | return 0; | 7420 | return 0; |
| 7315 | } | 7421 | } |
| 7316 | 7422 | ||
| 7423 | static int serdes_7322_init_new(struct qib_pportdata *ppd) | ||
| 7424 | { | ||
| 7425 | u64 tstart; | ||
| 7426 | u32 le_val, rxcaldone; | ||
| 7427 | int chan, chan_done = (1 << SERDES_CHANS) - 1; | ||
| 7428 | |||
| 7429 | /* | ||
| 7430 | * Initialize the Tx DDS tables. Also done every QSFP event, | ||
| 7431 | * for adapters with QSFP | ||
| 7432 | */ | ||
| 7433 | init_txdds_table(ppd, 0); | ||
| 7434 | |||
| 7435 | /* Clear cmode-override, may be set from older driver */ | ||
| 7436 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14); | ||
| 7437 | |||
| 7438 | /* ensure no tx overrides from earlier driver loads */ | ||
| 7439 | qib_write_kreg_port(ppd, krp_tx_deemph_override, | ||
| 7440 | SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, | ||
| 7441 | reset_tx_deemphasis_override)); | ||
| 7442 | |||
| 7443 | /* START OF LSI SUGGESTED SERDES BRINGUP */ | ||
| 7444 | /* Reset - Calibration Setup */ | ||
| 7445 | /* Stop DFE adaptaion */ | ||
| 7446 | ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1)); | ||
| 7447 | /* Disable LE1 */ | ||
| 7448 | ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5)); | ||
| 7449 | /* Disable autoadapt for LE1 */ | ||
| 7450 | ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15)); | ||
| 7451 | /* Disable LE2 */ | ||
| 7452 | ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6)); | ||
| 7453 | /* Disable VGA */ | ||
| 7454 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); | ||
| 7455 | /* Disable AFE Offset Cancel */ | ||
| 7456 | ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12)); | ||
| 7457 | /* Disable Timing Loop */ | ||
| 7458 | ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3)); | ||
| 7459 | /* Disable Frequency Loop */ | ||
| 7460 | ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4)); | ||
| 7461 | /* Disable Baseline Wander Correction */ | ||
| 7462 | ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13)); | ||
| 7463 | /* Disable RX Calibration */ | ||
| 7464 | ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); | ||
| 7465 | /* Disable RX Offset Calibration */ | ||
| 7466 | ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4)); | ||
| 7467 | /* Select BB CDR */ | ||
| 7468 | ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15)); | ||
| 7469 | /* CDR Step Size */ | ||
| 7470 | ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8)); | ||
| 7471 | /* Enable phase Calibration */ | ||
| 7472 | ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5)); | ||
| 7473 | /* DFE Bandwidth [2:14-12] */ | ||
| 7474 | ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12)); | ||
| 7475 | /* DFE Config (4 taps only) */ | ||
| 7476 | ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0)); | ||
| 7477 | /* Gain Loop Bandwidth */ | ||
| 7478 | if (!ppd->dd->cspec->r1) { | ||
| 7479 | ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12)); | ||
| 7480 | ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8)); | ||
| 7481 | } else { | ||
| 7482 | ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11)); | ||
| 7483 | } | ||
| 7484 | /* Baseline Wander Correction Gain [13:4-0] (leave as default) */ | ||
| 7485 | /* Baseline Wander Correction Gain [3:7-5] (leave as default) */ | ||
| 7486 | /* Data Rate Select [5:7-6] (leave as default) */ | ||
| 7487 | /* RX Parralel Word Width [3:10-8] (leave as default) */ | ||
| 7488 | |||
| 7489 | /* RX REST */ | ||
| 7490 | /* Single- or Multi-channel reset */ | ||
| 7491 | /* RX Analog reset */ | ||
| 7492 | /* RX Digital reset */ | ||
| 7493 | ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13)); | ||
| 7494 | msleep(20); | ||
| 7495 | /* RX Analog reset */ | ||
| 7496 | ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14)); | ||
| 7497 | msleep(20); | ||
| 7498 | /* RX Digital reset */ | ||
| 7499 | ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13)); | ||
| 7500 | msleep(20); | ||
| 7501 | |||
| 7502 | /* setup LoS params; these are subsystem, so chan == 5 */ | ||
| 7503 | /* LoS filter threshold_count on, ch 0-3, set to 8 */ | ||
| 7504 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11)); | ||
| 7505 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4)); | ||
| 7506 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11)); | ||
| 7507 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4)); | ||
| 7508 | |||
| 7509 | /* LoS filter threshold_count off, ch 0-3, set to 4 */ | ||
| 7510 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0)); | ||
| 7511 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8)); | ||
| 7512 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0)); | ||
| 7513 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8)); | ||
| 7514 | |||
| 7515 | /* LoS filter select enabled */ | ||
| 7516 | ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15); | ||
| 7517 | |||
| 7518 | /* LoS target data: SDR=4, DDR=2, QDR=1 */ | ||
| 7519 | ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */ | ||
| 7520 | ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */ | ||
| 7521 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | ||
| 7522 | |||
| 7523 | /* Turn on LOS on initial SERDES init */ | ||
| 7524 | serdes_7322_los_enable(ppd, 1); | ||
| 7525 | /* FLoop LOS gate: PPM filter enabled */ | ||
| 7526 | ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10); | ||
| 7527 | |||
| 7528 | /* RX LATCH CALIBRATION */ | ||
| 7529 | /* Enable Eyefinder Phase Calibration latch */ | ||
| 7530 | ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0)); | ||
| 7531 | /* Enable RX Offset Calibration latch */ | ||
| 7532 | ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4)); | ||
| 7533 | msleep(20); | ||
| 7534 | /* Start Calibration */ | ||
| 7535 | ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); | ||
| 7536 | tstart = get_jiffies_64(); | ||
| 7537 | while (chan_done && | ||
| 7538 | !time_after64(tstart, tstart + msecs_to_jiffies(500))) { | ||
| 7539 | msleep(20); | ||
| 7540 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | ||
| 7541 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | ||
| 7542 | (chan + (chan >> 1)), | ||
| 7543 | 25, 0, 0); | ||
| 7544 | if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 && | ||
| 7545 | (~chan_done & (1 << chan)) == 0) | ||
| 7546 | chan_done &= ~(1 << chan); | ||
| 7547 | } | ||
| 7548 | } | ||
| 7549 | if (chan_done) { | ||
| 7550 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7551 | " Serdes %d calibration not done after .5 sec: 0x%x\n", | ||
| 7552 | IBSD(ppd->hw_pidx), chan_done); | ||
| 7553 | } else { | ||
| 7554 | for (chan = 0; chan < SERDES_CHANS; ++chan) { | ||
| 7555 | rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), | ||
| 7556 | (chan + (chan >> 1)), | ||
| 7557 | 25, 0, 0); | ||
| 7558 | if ((~rxcaldone & (u32)BMASK(10, 10)) == 0) | ||
| 7559 | printk(KERN_INFO QIB_DRV_NAME | ||
| 7560 | " Serdes %d chan %d calibration " | ||
| 7561 | "failed\n", IBSD(ppd->hw_pidx), chan); | ||
| 7562 | } | ||
| 7563 | } | ||
| 7564 | |||
| 7565 | /* Turn off Calibration */ | ||
| 7566 | ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10)); | ||
| 7567 | msleep(20); | ||
| 7568 | |||
| 7569 | /* BRING RX UP */ | ||
| 7570 | /* Set LE2 value (May be overridden in qsfp_7322_event) */ | ||
| 7571 | le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT; | ||
| 7572 | ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7)); | ||
| 7573 | /* Set LE2 Loop bandwidth */ | ||
| 7574 | ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5)); | ||
| 7575 | /* Enable LE2 */ | ||
| 7576 | ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6)); | ||
| 7577 | msleep(20); | ||
| 7578 | /* Enable H0 only */ | ||
| 7579 | ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1)); | ||
| 7580 | /* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */ | ||
| 7581 | le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac; | ||
| 7582 | ibsd_wr_allchans(ppd, 21, le_val, 0xfffe); | ||
| 7583 | /* Enable VGA */ | ||
| 7584 | ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0)); | ||
| 7585 | msleep(20); | ||
| 7586 | /* Set Frequency Loop Bandwidth */ | ||
| 7587 | ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5)); | ||
| 7588 | /* Enable Frequency Loop */ | ||
| 7589 | ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4)); | ||
| 7590 | /* Set Timing Loop Bandwidth */ | ||
| 7591 | ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9)); | ||
| 7592 | /* Enable Timing Loop */ | ||
| 7593 | ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3)); | ||
| 7594 | msleep(50); | ||
| 7595 | /* Enable DFE | ||
| 7596 | * Set receive adaptation mode. SDR and DDR adaptation are | ||
| 7597 | * always on, and QDR is initially enabled; later disabled. | ||
| 7598 | */ | ||
| 7599 | qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL); | ||
| 7600 | qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL); | ||
| 7601 | qib_write_kreg_port(ppd, krp_static_adapt_dis(2), | ||
| 7602 | ppd->dd->cspec->r1 ? | ||
| 7603 | QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN); | ||
| 7604 | ppd->cpspec->qdr_dfe_on = 1; | ||
| 7605 | /* Disable LE1 */ | ||
| 7606 | ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5)); | ||
| 7607 | /* Disable auto adapt for LE1 */ | ||
| 7608 | ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15)); | ||
| 7609 | msleep(20); | ||
| 7610 | /* Enable AFE Offset Cancel */ | ||
| 7611 | ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12)); | ||
| 7612 | /* Enable Baseline Wander Correction */ | ||
| 7613 | ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13)); | ||
| 7614 | /* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */ | ||
| 7615 | ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11)); | ||
| 7616 | /* VGA output common mode */ | ||
| 7617 | ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2)); | ||
| 7618 | |||
| 7619 | return 0; | ||
| 7620 | } | ||
| 7621 | |||
| 7317 | /* start adjust QMH serdes parameters */ | 7622 | /* start adjust QMH serdes parameters */ |
| 7318 | 7623 | ||
| 7319 | static void set_man_code(struct qib_pportdata *ppd, int chan, int code) | 7624 | static void set_man_code(struct qib_pportdata *ppd, int chan, int code) |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index f3b50393604..7896afbb9ce 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
| @@ -92,9 +92,11 @@ unsigned long *qib_cpulist; | |||
| 92 | /* set number of contexts we'll actually use */ | 92 | /* set number of contexts we'll actually use */ |
| 93 | void qib_set_ctxtcnt(struct qib_devdata *dd) | 93 | void qib_set_ctxtcnt(struct qib_devdata *dd) |
| 94 | { | 94 | { |
| 95 | if (!qib_cfgctxts) | 95 | if (!qib_cfgctxts) { |
| 96 | dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); | 96 | dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); |
| 97 | else if (qib_cfgctxts < dd->num_pports) | 97 | if (dd->cfgctxts > dd->ctxtcnt) |
| 98 | dd->cfgctxts = dd->ctxtcnt; | ||
| 99 | } else if (qib_cfgctxts < dd->num_pports) | ||
| 98 | dd->cfgctxts = dd->ctxtcnt; | 100 | dd->cfgctxts = dd->ctxtcnt; |
| 99 | else if (qib_cfgctxts <= dd->ctxtcnt) | 101 | else if (qib_cfgctxts <= dd->ctxtcnt) |
| 100 | dd->cfgctxts = qib_cfgctxts; | 102 | dd->cfgctxts = qib_cfgctxts; |
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c index 54a40828a10..a693c56ec8a 100644 --- a/drivers/infiniband/hw/qib/qib_intr.c +++ b/drivers/infiniband/hw/qib/qib_intr.c | |||
| @@ -131,7 +131,8 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs) | |||
| 131 | /* start a 75msec timer to clear symbol errors */ | 131 | /* start a 75msec timer to clear symbol errors */ |
| 132 | mod_timer(&ppd->symerr_clear_timer, | 132 | mod_timer(&ppd->symerr_clear_timer, |
| 133 | msecs_to_jiffies(75)); | 133 | msecs_to_jiffies(75)); |
| 134 | } else if (ltstate == IB_PHYSPORTSTATE_LINKUP) { | 134 | } else if (ltstate == IB_PHYSPORTSTATE_LINKUP && |
| 135 | !(ppd->lflags & QIBL_LINKACTIVE)) { | ||
| 135 | /* active, but not active defered */ | 136 | /* active, but not active defered */ |
| 136 | qib_hol_up(ppd); /* useful only for 6120 now */ | 137 | qib_hol_up(ppd); /* useful only for 6120 now */ |
| 137 | *ppd->statusp |= | 138 | *ppd->statusp |= |
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 4b80eb153d5..8fd19a47df0 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c | |||
| @@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
| 136 | struct qib_mregion *mr; | 136 | struct qib_mregion *mr; |
| 137 | unsigned n, m; | 137 | unsigned n, m; |
| 138 | size_t off; | 138 | size_t off; |
| 139 | int ret = 0; | ||
| 140 | unsigned long flags; | 139 | unsigned long flags; |
| 141 | 140 | ||
| 142 | /* | 141 | /* |
| @@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
| 152 | if (!dev->dma_mr) | 151 | if (!dev->dma_mr) |
| 153 | goto bail; | 152 | goto bail; |
| 154 | atomic_inc(&dev->dma_mr->refcount); | 153 | atomic_inc(&dev->dma_mr->refcount); |
| 154 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
| 155 | |||
| 155 | isge->mr = dev->dma_mr; | 156 | isge->mr = dev->dma_mr; |
| 156 | isge->vaddr = (void *) sge->addr; | 157 | isge->vaddr = (void *) sge->addr; |
| 157 | isge->length = sge->length; | 158 | isge->length = sge->length; |
| @@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
| 170 | off + sge->length > mr->length || | 171 | off + sge->length > mr->length || |
| 171 | (mr->access_flags & acc) != acc)) | 172 | (mr->access_flags & acc) != acc)) |
| 172 | goto bail; | 173 | goto bail; |
| 174 | atomic_inc(&mr->refcount); | ||
| 175 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
| 173 | 176 | ||
| 174 | off += mr->offset; | 177 | off += mr->offset; |
| 175 | m = 0; | 178 | if (mr->page_shift) { |
| 176 | n = 0; | 179 | /* |
| 177 | while (off >= mr->map[m]->segs[n].length) { | 180 | page sizes are uniform power of 2 so no loop is necessary |
| 178 | off -= mr->map[m]->segs[n].length; | 181 | entries_spanned_by_off is the number of times the loop below |
| 179 | n++; | 182 | would have executed. |
| 180 | if (n >= QIB_SEGSZ) { | 183 | */ |
| 181 | m++; | 184 | size_t entries_spanned_by_off; |
| 182 | n = 0; | 185 | |
| 186 | entries_spanned_by_off = off >> mr->page_shift; | ||
| 187 | off -= (entries_spanned_by_off << mr->page_shift); | ||
| 188 | m = entries_spanned_by_off/QIB_SEGSZ; | ||
| 189 | n = entries_spanned_by_off%QIB_SEGSZ; | ||
| 190 | } else { | ||
| 191 | m = 0; | ||
| 192 | n = 0; | ||
| 193 | while (off >= mr->map[m]->segs[n].length) { | ||
| 194 | off -= mr->map[m]->segs[n].length; | ||
| 195 | n++; | ||
| 196 | if (n >= QIB_SEGSZ) { | ||
| 197 | m++; | ||
| 198 | n = 0; | ||
| 199 | } | ||
| 183 | } | 200 | } |
| 184 | } | 201 | } |
| 185 | atomic_inc(&mr->refcount); | ||
| 186 | isge->mr = mr; | 202 | isge->mr = mr; |
| 187 | isge->vaddr = mr->map[m]->segs[n].vaddr + off; | 203 | isge->vaddr = mr->map[m]->segs[n].vaddr + off; |
| 188 | isge->length = mr->map[m]->segs[n].length - off; | 204 | isge->length = mr->map[m]->segs[n].length - off; |
| @@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
| 190 | isge->m = m; | 206 | isge->m = m; |
| 191 | isge->n = n; | 207 | isge->n = n; |
| 192 | ok: | 208 | ok: |
| 193 | ret = 1; | 209 | return 1; |
| 194 | bail: | 210 | bail: |
| 195 | spin_unlock_irqrestore(&rkt->lock, flags); | 211 | spin_unlock_irqrestore(&rkt->lock, flags); |
| 196 | return ret; | 212 | return 0; |
| 197 | } | 213 | } |
| 198 | 214 | ||
| 199 | /** | 215 | /** |
| @@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
| 214 | struct qib_mregion *mr; | 230 | struct qib_mregion *mr; |
| 215 | unsigned n, m; | 231 | unsigned n, m; |
| 216 | size_t off; | 232 | size_t off; |
| 217 | int ret = 0; | ||
| 218 | unsigned long flags; | 233 | unsigned long flags; |
| 219 | 234 | ||
| 220 | /* | 235 | /* |
| @@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
| 231 | if (!dev->dma_mr) | 246 | if (!dev->dma_mr) |
| 232 | goto bail; | 247 | goto bail; |
| 233 | atomic_inc(&dev->dma_mr->refcount); | 248 | atomic_inc(&dev->dma_mr->refcount); |
| 249 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
| 250 | |||
| 234 | sge->mr = dev->dma_mr; | 251 | sge->mr = dev->dma_mr; |
| 235 | sge->vaddr = (void *) vaddr; | 252 | sge->vaddr = (void *) vaddr; |
| 236 | sge->length = len; | 253 | sge->length = len; |
| @@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
| 248 | if (unlikely(vaddr < mr->iova || off + len > mr->length || | 265 | if (unlikely(vaddr < mr->iova || off + len > mr->length || |
| 249 | (mr->access_flags & acc) == 0)) | 266 | (mr->access_flags & acc) == 0)) |
| 250 | goto bail; | 267 | goto bail; |
| 268 | atomic_inc(&mr->refcount); | ||
| 269 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
| 251 | 270 | ||
| 252 | off += mr->offset; | 271 | off += mr->offset; |
| 253 | m = 0; | 272 | if (mr->page_shift) { |
| 254 | n = 0; | 273 | /* |
| 255 | while (off >= mr->map[m]->segs[n].length) { | 274 | page sizes are uniform power of 2 so no loop is necessary |
| 256 | off -= mr->map[m]->segs[n].length; | 275 | entries_spanned_by_off is the number of times the loop below |
| 257 | n++; | 276 | would have executed. |
| 258 | if (n >= QIB_SEGSZ) { | 277 | */ |
| 259 | m++; | 278 | size_t entries_spanned_by_off; |
| 260 | n = 0; | 279 | |
| 280 | entries_spanned_by_off = off >> mr->page_shift; | ||
| 281 | off -= (entries_spanned_by_off << mr->page_shift); | ||
| 282 | m = entries_spanned_by_off/QIB_SEGSZ; | ||
| 283 | n = entries_spanned_by_off%QIB_SEGSZ; | ||
| 284 | } else { | ||
| 285 | m = 0; | ||
| 286 | n = 0; | ||
| 287 | while (off >= mr->map[m]->segs[n].length) { | ||
| 288 | off -= mr->map[m]->segs[n].length; | ||
| 289 | n++; | ||
| 290 | if (n >= QIB_SEGSZ) { | ||
| 291 | m++; | ||
| 292 | n = 0; | ||
| 293 | } | ||
| 261 | } | 294 | } |
| 262 | } | 295 | } |
| 263 | atomic_inc(&mr->refcount); | ||
| 264 | sge->mr = mr; | 296 | sge->mr = mr; |
| 265 | sge->vaddr = mr->map[m]->segs[n].vaddr + off; | 297 | sge->vaddr = mr->map[m]->segs[n].vaddr + off; |
| 266 | sge->length = mr->map[m]->segs[n].length - off; | 298 | sge->length = mr->map[m]->segs[n].length - off; |
| @@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
| 268 | sge->m = m; | 300 | sge->m = m; |
| 269 | sge->n = n; | 301 | sge->n = n; |
| 270 | ok: | 302 | ok: |
| 271 | ret = 1; | 303 | return 1; |
| 272 | bail: | 304 | bail: |
| 273 | spin_unlock_irqrestore(&rkt->lock, flags); | 305 | spin_unlock_irqrestore(&rkt->lock, flags); |
| 274 | return ret; | 306 | return 0; |
| 275 | } | 307 | } |
| 276 | 308 | ||
| 277 | /* | 309 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 94b0d1f3a8f..5ad224e4a38 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c | |||
| @@ -668,8 +668,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 668 | lid = be16_to_cpu(pip->lid); | 668 | lid = be16_to_cpu(pip->lid); |
| 669 | /* Must be a valid unicast LID address. */ | 669 | /* Must be a valid unicast LID address. */ |
| 670 | if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) | 670 | if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) |
| 671 | goto err; | 671 | smp->status |= IB_SMP_INVALID_FIELD; |
| 672 | if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { | 672 | else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { |
| 673 | if (ppd->lid != lid) | 673 | if (ppd->lid != lid) |
| 674 | qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); | 674 | qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); |
| 675 | if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) | 675 | if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) |
| @@ -683,8 +683,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 683 | msl = pip->neighbormtu_mastersmsl & 0xF; | 683 | msl = pip->neighbormtu_mastersmsl & 0xF; |
| 684 | /* Must be a valid unicast LID address. */ | 684 | /* Must be a valid unicast LID address. */ |
| 685 | if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) | 685 | if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) |
| 686 | goto err; | 686 | smp->status |= IB_SMP_INVALID_FIELD; |
| 687 | if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { | 687 | else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { |
| 688 | spin_lock_irqsave(&ibp->lock, flags); | 688 | spin_lock_irqsave(&ibp->lock, flags); |
| 689 | if (ibp->sm_ah) { | 689 | if (ibp->sm_ah) { |
| 690 | if (smlid != ibp->sm_lid) | 690 | if (smlid != ibp->sm_lid) |
| @@ -707,8 +707,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 707 | if (lwe == 0xFF) | 707 | if (lwe == 0xFF) |
| 708 | lwe = ppd->link_width_supported; | 708 | lwe = ppd->link_width_supported; |
| 709 | else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) | 709 | else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) |
| 710 | goto err; | 710 | smp->status |= IB_SMP_INVALID_FIELD; |
| 711 | set_link_width_enabled(ppd, lwe); | 711 | else if (lwe != ppd->link_width_enabled) |
| 712 | set_link_width_enabled(ppd, lwe); | ||
| 712 | } | 713 | } |
| 713 | 714 | ||
| 714 | lse = pip->linkspeedactive_enabled & 0xF; | 715 | lse = pip->linkspeedactive_enabled & 0xF; |
| @@ -721,8 +722,9 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 721 | if (lse == 15) | 722 | if (lse == 15) |
| 722 | lse = ppd->link_speed_supported; | 723 | lse = ppd->link_speed_supported; |
| 723 | else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) | 724 | else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) |
| 724 | goto err; | 725 | smp->status |= IB_SMP_INVALID_FIELD; |
| 725 | set_link_speed_enabled(ppd, lse); | 726 | else if (lse != ppd->link_speed_enabled) |
| 727 | set_link_speed_enabled(ppd, lse); | ||
| 726 | } | 728 | } |
| 727 | 729 | ||
| 728 | /* Set link down default state. */ | 730 | /* Set link down default state. */ |
| @@ -738,7 +740,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 738 | IB_LINKINITCMD_POLL); | 740 | IB_LINKINITCMD_POLL); |
| 739 | break; | 741 | break; |
| 740 | default: | 742 | default: |
| 741 | goto err; | 743 | smp->status |= IB_SMP_INVALID_FIELD; |
| 742 | } | 744 | } |
| 743 | 745 | ||
| 744 | ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; | 746 | ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; |
| @@ -748,15 +750,17 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 748 | 750 | ||
| 749 | mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); | 751 | mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); |
| 750 | if (mtu == -1) | 752 | if (mtu == -1) |
| 751 | goto err; | 753 | smp->status |= IB_SMP_INVALID_FIELD; |
| 752 | qib_set_mtu(ppd, mtu); | 754 | else |
| 755 | qib_set_mtu(ppd, mtu); | ||
| 753 | 756 | ||
| 754 | /* Set operational VLs */ | 757 | /* Set operational VLs */ |
| 755 | vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; | 758 | vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; |
| 756 | if (vls) { | 759 | if (vls) { |
| 757 | if (vls > ppd->vls_supported) | 760 | if (vls > ppd->vls_supported) |
| 758 | goto err; | 761 | smp->status |= IB_SMP_INVALID_FIELD; |
| 759 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); | 762 | else |
| 763 | (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); | ||
| 760 | } | 764 | } |
| 761 | 765 | ||
| 762 | if (pip->mkey_violations == 0) | 766 | if (pip->mkey_violations == 0) |
| @@ -770,10 +774,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 770 | 774 | ||
| 771 | ore = pip->localphyerrors_overrunerrors; | 775 | ore = pip->localphyerrors_overrunerrors; |
| 772 | if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) | 776 | if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) |
| 773 | goto err; | 777 | smp->status |= IB_SMP_INVALID_FIELD; |
| 774 | 778 | ||
| 775 | if (set_overrunthreshold(ppd, (ore & 0xF))) | 779 | if (set_overrunthreshold(ppd, (ore & 0xF))) |
| 776 | goto err; | 780 | smp->status |= IB_SMP_INVALID_FIELD; |
| 777 | 781 | ||
| 778 | ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; | 782 | ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; |
| 779 | 783 | ||
| @@ -792,7 +796,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 792 | state = pip->linkspeed_portstate & 0xF; | 796 | state = pip->linkspeed_portstate & 0xF; |
| 793 | lstate = (pip->portphysstate_linkdown >> 4) & 0xF; | 797 | lstate = (pip->portphysstate_linkdown >> 4) & 0xF; |
| 794 | if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) | 798 | if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) |
| 795 | goto err; | 799 | smp->status |= IB_SMP_INVALID_FIELD; |
| 796 | 800 | ||
| 797 | /* | 801 | /* |
| 798 | * Only state changes of DOWN, ARM, and ACTIVE are valid | 802 | * Only state changes of DOWN, ARM, and ACTIVE are valid |
| @@ -812,8 +816,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 812 | lstate = QIB_IB_LINKDOWN; | 816 | lstate = QIB_IB_LINKDOWN; |
| 813 | else if (lstate == 3) | 817 | else if (lstate == 3) |
| 814 | lstate = QIB_IB_LINKDOWN_DISABLE; | 818 | lstate = QIB_IB_LINKDOWN_DISABLE; |
| 815 | else | 819 | else { |
| 816 | goto err; | 820 | smp->status |= IB_SMP_INVALID_FIELD; |
| 821 | break; | ||
| 822 | } | ||
| 817 | spin_lock_irqsave(&ppd->lflags_lock, flags); | 823 | spin_lock_irqsave(&ppd->lflags_lock, flags); |
| 818 | ppd->lflags &= ~QIBL_LINKV; | 824 | ppd->lflags &= ~QIBL_LINKV; |
| 819 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); | 825 | spin_unlock_irqrestore(&ppd->lflags_lock, flags); |
| @@ -835,8 +841,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, | |||
| 835 | qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); | 841 | qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); |
| 836 | break; | 842 | break; |
| 837 | default: | 843 | default: |
| 838 | /* XXX We have already partially updated our state! */ | 844 | smp->status |= IB_SMP_INVALID_FIELD; |
| 839 | goto err; | ||
| 840 | } | 845 | } |
| 841 | 846 | ||
| 842 | ret = subn_get_portinfo(smp, ibdev, port); | 847 | ret = subn_get_portinfo(smp, ibdev, port); |
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index 5f95f0f6385..08944e2ee33 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c | |||
| @@ -39,7 +39,6 @@ | |||
| 39 | /* Fast memory region */ | 39 | /* Fast memory region */ |
| 40 | struct qib_fmr { | 40 | struct qib_fmr { |
| 41 | struct ib_fmr ibfmr; | 41 | struct ib_fmr ibfmr; |
| 42 | u8 page_shift; | ||
| 43 | struct qib_mregion mr; /* must be last */ | 42 | struct qib_mregion mr; /* must be last */ |
| 44 | }; | 43 | }; |
| 45 | 44 | ||
| @@ -107,6 +106,7 @@ static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table) | |||
| 107 | goto bail; | 106 | goto bail; |
| 108 | } | 107 | } |
| 109 | mr->mr.mapsz = m; | 108 | mr->mr.mapsz = m; |
| 109 | mr->mr.page_shift = 0; | ||
| 110 | mr->mr.max_segs = count; | 110 | mr->mr.max_segs = count; |
| 111 | 111 | ||
| 112 | /* | 112 | /* |
| @@ -231,6 +231,8 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
| 231 | mr->mr.access_flags = mr_access_flags; | 231 | mr->mr.access_flags = mr_access_flags; |
| 232 | mr->umem = umem; | 232 | mr->umem = umem; |
| 233 | 233 | ||
| 234 | if (is_power_of_2(umem->page_size)) | ||
| 235 | mr->mr.page_shift = ilog2(umem->page_size); | ||
| 234 | m = 0; | 236 | m = 0; |
| 235 | n = 0; | 237 | n = 0; |
| 236 | list_for_each_entry(chunk, &umem->chunk_list, list) { | 238 | list_for_each_entry(chunk, &umem->chunk_list, list) { |
| @@ -390,7 +392,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |||
| 390 | fmr->mr.offset = 0; | 392 | fmr->mr.offset = 0; |
| 391 | fmr->mr.access_flags = mr_access_flags; | 393 | fmr->mr.access_flags = mr_access_flags; |
| 392 | fmr->mr.max_segs = fmr_attr->max_pages; | 394 | fmr->mr.max_segs = fmr_attr->max_pages; |
| 393 | fmr->page_shift = fmr_attr->page_shift; | 395 | fmr->mr.page_shift = fmr_attr->page_shift; |
| 394 | 396 | ||
| 395 | atomic_set(&fmr->mr.refcount, 0); | 397 | atomic_set(&fmr->mr.refcount, 0); |
| 396 | ret = &fmr->ibfmr; | 398 | ret = &fmr->ibfmr; |
| @@ -437,7 +439,7 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |||
| 437 | spin_lock_irqsave(&rkt->lock, flags); | 439 | spin_lock_irqsave(&rkt->lock, flags); |
| 438 | fmr->mr.user_base = iova; | 440 | fmr->mr.user_base = iova; |
| 439 | fmr->mr.iova = iova; | 441 | fmr->mr.iova = iova; |
| 440 | ps = 1 << fmr->page_shift; | 442 | ps = 1 << fmr->mr.page_shift; |
| 441 | fmr->mr.length = list_len * ps; | 443 | fmr->mr.length = list_len * ps; |
| 442 | m = 0; | 444 | m = 0; |
| 443 | n = 0; | 445 | n = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 6c39851d2de..e16751f8639 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
| @@ -48,13 +48,12 @@ static inline unsigned mk_qpn(struct qib_qpn_table *qpt, | |||
| 48 | 48 | ||
| 49 | static inline unsigned find_next_offset(struct qib_qpn_table *qpt, | 49 | static inline unsigned find_next_offset(struct qib_qpn_table *qpt, |
| 50 | struct qpn_map *map, unsigned off, | 50 | struct qpn_map *map, unsigned off, |
| 51 | unsigned r) | 51 | unsigned n) |
| 52 | { | 52 | { |
| 53 | if (qpt->mask) { | 53 | if (qpt->mask) { |
| 54 | off++; | 54 | off++; |
| 55 | if ((off & qpt->mask) >> 1 != r) | 55 | if (((off & qpt->mask) >> 1) >= n) |
| 56 | off = ((off & qpt->mask) ? | 56 | off = (off | qpt->mask) + 2; |
| 57 | (off | qpt->mask) + 1 : off) | (r << 1); | ||
| 58 | } else | 57 | } else |
| 59 | off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); | 58 | off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); |
| 60 | return off; | 59 | return off; |
| @@ -123,7 +122,6 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | |||
| 123 | u32 i, offset, max_scan, qpn; | 122 | u32 i, offset, max_scan, qpn; |
| 124 | struct qpn_map *map; | 123 | struct qpn_map *map; |
| 125 | u32 ret; | 124 | u32 ret; |
| 126 | int r; | ||
| 127 | 125 | ||
| 128 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { | 126 | if (type == IB_QPT_SMI || type == IB_QPT_GSI) { |
| 129 | unsigned n; | 127 | unsigned n; |
| @@ -139,15 +137,11 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | |||
| 139 | goto bail; | 137 | goto bail; |
| 140 | } | 138 | } |
| 141 | 139 | ||
| 142 | r = smp_processor_id(); | 140 | qpn = qpt->last + 2; |
| 143 | if (r >= dd->n_krcv_queues) | ||
| 144 | r %= dd->n_krcv_queues; | ||
| 145 | qpn = qpt->last + 1; | ||
| 146 | if (qpn >= QPN_MAX) | 141 | if (qpn >= QPN_MAX) |
| 147 | qpn = 2; | 142 | qpn = 2; |
| 148 | if (qpt->mask && ((qpn & qpt->mask) >> 1) != r) | 143 | if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) |
| 149 | qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) | | 144 | qpn = (qpn | qpt->mask) + 2; |
| 150 | (r << 1); | ||
| 151 | offset = qpn & BITS_PER_PAGE_MASK; | 145 | offset = qpn & BITS_PER_PAGE_MASK; |
| 152 | map = &qpt->map[qpn / BITS_PER_PAGE]; | 146 | map = &qpt->map[qpn / BITS_PER_PAGE]; |
| 153 | max_scan = qpt->nmaps - !offset; | 147 | max_scan = qpt->nmaps - !offset; |
| @@ -163,7 +157,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | |||
| 163 | ret = qpn; | 157 | ret = qpn; |
| 164 | goto bail; | 158 | goto bail; |
| 165 | } | 159 | } |
| 166 | offset = find_next_offset(qpt, map, offset, r); | 160 | offset = find_next_offset(qpt, map, offset, |
| 161 | dd->n_krcv_queues); | ||
| 167 | qpn = mk_qpn(qpt, map, offset); | 162 | qpn = mk_qpn(qpt, map, offset); |
| 168 | /* | 163 | /* |
| 169 | * This test differs from alloc_pidmap(). | 164 | * This test differs from alloc_pidmap(). |
| @@ -183,13 +178,13 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, | |||
| 183 | if (qpt->nmaps == QPNMAP_ENTRIES) | 178 | if (qpt->nmaps == QPNMAP_ENTRIES) |
| 184 | break; | 179 | break; |
| 185 | map = &qpt->map[qpt->nmaps++]; | 180 | map = &qpt->map[qpt->nmaps++]; |
| 186 | offset = qpt->mask ? (r << 1) : 0; | 181 | offset = 0; |
| 187 | } else if (map < &qpt->map[qpt->nmaps]) { | 182 | } else if (map < &qpt->map[qpt->nmaps]) { |
| 188 | ++map; | 183 | ++map; |
| 189 | offset = qpt->mask ? (r << 1) : 0; | 184 | offset = 0; |
| 190 | } else { | 185 | } else { |
| 191 | map = &qpt->map[0]; | 186 | map = &qpt->map[0]; |
| 192 | offset = qpt->mask ? (r << 1) : 2; | 187 | offset = 2; |
| 193 | } | 188 | } |
| 194 | qpn = mk_qpn(qpt, map, offset); | 189 | qpn = mk_qpn(qpt, map, offset); |
| 195 | } | 190 | } |
| @@ -468,6 +463,10 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) | |||
| 468 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); | 463 | qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); |
| 469 | del_timer(&qp->s_timer); | 464 | del_timer(&qp->s_timer); |
| 470 | } | 465 | } |
| 466 | |||
| 467 | if (qp->s_flags & QIB_S_ANY_WAIT_SEND) | ||
| 468 | qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; | ||
| 469 | |||
| 471 | spin_lock(&dev->pending_lock); | 470 | spin_lock(&dev->pending_lock); |
| 472 | if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { | 471 | if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { |
| 473 | qp->s_flags &= ~QIB_S_ANY_WAIT_IO; | 472 | qp->s_flags &= ~QIB_S_ANY_WAIT_IO; |
| @@ -1061,7 +1060,6 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd, | |||
| 1061 | } | 1060 | } |
| 1062 | qp->ibqp.qp_num = err; | 1061 | qp->ibqp.qp_num = err; |
| 1063 | qp->port_num = init_attr->port_num; | 1062 | qp->port_num = init_attr->port_num; |
| 1064 | qp->processor_id = smp_processor_id(); | ||
| 1065 | qib_reset_qp(qp, init_attr->qp_type); | 1063 | qib_reset_qp(qp, init_attr->qp_type); |
| 1066 | break; | 1064 | break; |
| 1067 | 1065 | ||
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 955fb715779..8245237b67c 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
| @@ -1407,6 +1407,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
| 1407 | struct qib_ctxtdata *rcd) | 1407 | struct qib_ctxtdata *rcd) |
| 1408 | { | 1408 | { |
| 1409 | struct qib_swqe *wqe; | 1409 | struct qib_swqe *wqe; |
| 1410 | struct qib_pportdata *ppd = ppd_from_ibp(ibp); | ||
| 1410 | enum ib_wc_status status; | 1411 | enum ib_wc_status status; |
| 1411 | unsigned long flags; | 1412 | unsigned long flags; |
| 1412 | int diff; | 1413 | int diff; |
| @@ -1414,6 +1415,29 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
| 1414 | u32 aeth; | 1415 | u32 aeth; |
| 1415 | u64 val; | 1416 | u64 val; |
| 1416 | 1417 | ||
| 1418 | if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) { | ||
| 1419 | /* | ||
| 1420 | * If ACK'd PSN on SDMA busy list try to make progress to | ||
| 1421 | * reclaim SDMA credits. | ||
| 1422 | */ | ||
| 1423 | if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) && | ||
| 1424 | (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) { | ||
| 1425 | |||
| 1426 | /* | ||
| 1427 | * If send tasklet not running attempt to progress | ||
| 1428 | * SDMA queue. | ||
| 1429 | */ | ||
| 1430 | if (!(qp->s_flags & QIB_S_BUSY)) { | ||
| 1431 | /* Acquire SDMA Lock */ | ||
| 1432 | spin_lock_irqsave(&ppd->sdma_lock, flags); | ||
| 1433 | /* Invoke sdma make progress */ | ||
| 1434 | qib_sdma_make_progress(ppd); | ||
| 1435 | /* Release SDMA Lock */ | ||
| 1436 | spin_unlock_irqrestore(&ppd->sdma_lock, flags); | ||
| 1437 | } | ||
| 1438 | } | ||
| 1439 | } | ||
| 1440 | |||
| 1417 | spin_lock_irqsave(&qp->s_lock, flags); | 1441 | spin_lock_irqsave(&qp->s_lock, flags); |
| 1418 | 1442 | ||
| 1419 | /* Ignore invalid responses. */ | 1443 | /* Ignore invalid responses. */ |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index e1b3da2a1f8..4a51fd1e9cb 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
| @@ -445,13 +445,14 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 445 | qkey = be32_to_cpu(ohdr->u.ud.deth[0]); | 445 | qkey = be32_to_cpu(ohdr->u.ud.deth[0]); |
| 446 | src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; | 446 | src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; |
| 447 | 447 | ||
| 448 | /* Get the number of bytes the message was padded by. */ | 448 | /* |
| 449 | * Get the number of bytes the message was padded by | ||
| 450 | * and drop incomplete packets. | ||
| 451 | */ | ||
| 449 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; | 452 | pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; |
| 450 | if (unlikely(tlen < (hdrsize + pad + 4))) { | 453 | if (unlikely(tlen < (hdrsize + pad + 4))) |
| 451 | /* Drop incomplete packets. */ | 454 | goto drop; |
| 452 | ibp->n_pkt_drops++; | 455 | |
| 453 | goto bail; | ||
| 454 | } | ||
| 455 | tlen -= hdrsize + pad + 4; | 456 | tlen -= hdrsize + pad + 4; |
| 456 | 457 | ||
| 457 | /* | 458 | /* |
| @@ -460,10 +461,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 460 | */ | 461 | */ |
| 461 | if (qp->ibqp.qp_num) { | 462 | if (qp->ibqp.qp_num) { |
| 462 | if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || | 463 | if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE || |
| 463 | hdr->lrh[3] == IB_LID_PERMISSIVE)) { | 464 | hdr->lrh[3] == IB_LID_PERMISSIVE)) |
| 464 | ibp->n_pkt_drops++; | 465 | goto drop; |
| 465 | goto bail; | ||
| 466 | } | ||
| 467 | if (qp->ibqp.qp_num > 1) { | 466 | if (qp->ibqp.qp_num > 1) { |
| 468 | u16 pkey1, pkey2; | 467 | u16 pkey1, pkey2; |
| 469 | 468 | ||
| @@ -476,7 +475,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 476 | 0xF, | 475 | 0xF, |
| 477 | src_qp, qp->ibqp.qp_num, | 476 | src_qp, qp->ibqp.qp_num, |
| 478 | hdr->lrh[3], hdr->lrh[1]); | 477 | hdr->lrh[3], hdr->lrh[1]); |
| 479 | goto bail; | 478 | return; |
| 480 | } | 479 | } |
| 481 | } | 480 | } |
| 482 | if (unlikely(qkey != qp->qkey)) { | 481 | if (unlikely(qkey != qp->qkey)) { |
| @@ -484,30 +483,24 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 484 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, | 483 | (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF, |
| 485 | src_qp, qp->ibqp.qp_num, | 484 | src_qp, qp->ibqp.qp_num, |
| 486 | hdr->lrh[3], hdr->lrh[1]); | 485 | hdr->lrh[3], hdr->lrh[1]); |
| 487 | goto bail; | 486 | return; |
| 488 | } | 487 | } |
| 489 | /* Drop invalid MAD packets (see 13.5.3.1). */ | 488 | /* Drop invalid MAD packets (see 13.5.3.1). */ |
| 490 | if (unlikely(qp->ibqp.qp_num == 1 && | 489 | if (unlikely(qp->ibqp.qp_num == 1 && |
| 491 | (tlen != 256 || | 490 | (tlen != 256 || |
| 492 | (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) { | 491 | (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) |
| 493 | ibp->n_pkt_drops++; | 492 | goto drop; |
| 494 | goto bail; | ||
| 495 | } | ||
| 496 | } else { | 493 | } else { |
| 497 | struct ib_smp *smp; | 494 | struct ib_smp *smp; |
| 498 | 495 | ||
| 499 | /* Drop invalid MAD packets (see 13.5.3.1). */ | 496 | /* Drop invalid MAD packets (see 13.5.3.1). */ |
| 500 | if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) { | 497 | if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) |
| 501 | ibp->n_pkt_drops++; | 498 | goto drop; |
| 502 | goto bail; | ||
| 503 | } | ||
| 504 | smp = (struct ib_smp *) data; | 499 | smp = (struct ib_smp *) data; |
| 505 | if ((hdr->lrh[1] == IB_LID_PERMISSIVE || | 500 | if ((hdr->lrh[1] == IB_LID_PERMISSIVE || |
| 506 | hdr->lrh[3] == IB_LID_PERMISSIVE) && | 501 | hdr->lrh[3] == IB_LID_PERMISSIVE) && |
| 507 | smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { | 502 | smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) |
| 508 | ibp->n_pkt_drops++; | 503 | goto drop; |
| 509 | goto bail; | ||
| 510 | } | ||
| 511 | } | 504 | } |
| 512 | 505 | ||
| 513 | /* | 506 | /* |
| @@ -519,14 +512,12 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 519 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { | 512 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { |
| 520 | wc.ex.imm_data = ohdr->u.ud.imm_data; | 513 | wc.ex.imm_data = ohdr->u.ud.imm_data; |
| 521 | wc.wc_flags = IB_WC_WITH_IMM; | 514 | wc.wc_flags = IB_WC_WITH_IMM; |
| 522 | hdrsize += sizeof(u32); | 515 | tlen -= sizeof(u32); |
| 523 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { | 516 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { |
| 524 | wc.ex.imm_data = 0; | 517 | wc.ex.imm_data = 0; |
| 525 | wc.wc_flags = 0; | 518 | wc.wc_flags = 0; |
| 526 | } else { | 519 | } else |
| 527 | ibp->n_pkt_drops++; | 520 | goto drop; |
| 528 | goto bail; | ||
| 529 | } | ||
| 530 | 521 | ||
| 531 | /* | 522 | /* |
| 532 | * A GRH is expected to preceed the data even if not | 523 | * A GRH is expected to preceed the data even if not |
| @@ -556,8 +547,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 556 | /* Silently drop packets which are too big. */ | 547 | /* Silently drop packets which are too big. */ |
| 557 | if (unlikely(wc.byte_len > qp->r_len)) { | 548 | if (unlikely(wc.byte_len > qp->r_len)) { |
| 558 | qp->r_flags |= QIB_R_REUSE_SGE; | 549 | qp->r_flags |= QIB_R_REUSE_SGE; |
| 559 | ibp->n_pkt_drops++; | 550 | goto drop; |
| 560 | return; | ||
| 561 | } | 551 | } |
| 562 | if (has_grh) { | 552 | if (has_grh) { |
| 563 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, | 553 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, |
| @@ -594,5 +584,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
| 594 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 584 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
| 595 | (ohdr->bth[0] & | 585 | (ohdr->bth[0] & |
| 596 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); | 586 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); |
| 597 | bail:; | 587 | return; |
| 588 | |||
| 589 | drop: | ||
| 590 | ibp->n_pkt_drops++; | ||
| 598 | } | 591 | } |
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index 4c19e06b5e8..66208bcd7c1 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c | |||
| @@ -382,6 +382,7 @@ static void qib_user_sdma_free_pkt_list(struct device *dev, | |||
| 382 | 382 | ||
| 383 | kmem_cache_free(pq->pkt_slab, pkt); | 383 | kmem_cache_free(pq->pkt_slab, pkt); |
| 384 | } | 384 | } |
| 385 | INIT_LIST_HEAD(list); | ||
| 385 | } | 386 | } |
| 386 | 387 | ||
| 387 | /* | 388 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index bd57c127322..63b22a9a7fe 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
| @@ -301,6 +301,7 @@ struct qib_mregion { | |||
| 301 | int access_flags; | 301 | int access_flags; |
| 302 | u32 max_segs; /* number of qib_segs in all the arrays */ | 302 | u32 max_segs; /* number of qib_segs in all the arrays */ |
| 303 | u32 mapsz; /* size of the map array */ | 303 | u32 mapsz; /* size of the map array */ |
| 304 | u8 page_shift; /* 0 - non unform/non powerof2 sizes */ | ||
| 304 | atomic_t refcount; | 305 | atomic_t refcount; |
| 305 | struct qib_segarray *map[0]; /* the segments */ | 306 | struct qib_segarray *map[0]; /* the segments */ |
| 306 | }; | 307 | }; |
| @@ -435,7 +436,6 @@ struct qib_qp { | |||
| 435 | spinlock_t r_lock; /* used for APM */ | 436 | spinlock_t r_lock; /* used for APM */ |
| 436 | spinlock_t s_lock; | 437 | spinlock_t s_lock; |
| 437 | atomic_t s_dma_busy; | 438 | atomic_t s_dma_busy; |
| 438 | unsigned processor_id; /* Processor ID QP is bound to */ | ||
| 439 | u32 s_flags; | 439 | u32 s_flags; |
| 440 | u32 s_cur_size; /* size of send packet in bytes */ | 440 | u32 s_cur_size; /* size of send packet in bytes */ |
| 441 | u32 s_len; /* total length of s_sge */ | 441 | u32 s_len; /* total length of s_sge */ |
| @@ -813,13 +813,8 @@ extern struct workqueue_struct *qib_cq_wq; | |||
| 813 | */ | 813 | */ |
| 814 | static inline void qib_schedule_send(struct qib_qp *qp) | 814 | static inline void qib_schedule_send(struct qib_qp *qp) |
| 815 | { | 815 | { |
| 816 | if (qib_send_ok(qp)) { | 816 | if (qib_send_ok(qp)) |
| 817 | if (qp->processor_id == smp_processor_id()) | 817 | queue_work(qib_wq, &qp->s_work); |
| 818 | queue_work(qib_wq, &qp->s_work); | ||
| 819 | else | ||
| 820 | queue_work_on(qp->processor_id, | ||
| 821 | qib_wq, &qp->s_work); | ||
| 822 | } | ||
| 823 | } | 818 | } |
| 824 | 819 | ||
| 825 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) | 820 | static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) |
