aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile3
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_mem.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c29
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h24
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c40
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c30
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c180
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c116
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c97
-rw-r--r--drivers/infiniband/hw/ipath/ipath_common.h35
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_debug.h4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c197
-rw-r--r--drivers/infiniband/hw/ipath/ipath_eeprom.c23
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c94
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c14
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6110.c395
-rw-r--r--drivers/infiniband/hw/ipath/ipath_iba6120.c439
-rw-r--r--drivers/infiniband/hw/ipath/ipath_init_chip.c67
-rw-r--r--drivers/infiniband/hw/ipath/ipath_intr.c81
-rw-r--r--drivers/infiniband/hw/ipath/ipath_kernel.h204
-rw-r--r--drivers/infiniband/hw/ipath/ipath_keys.c5
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c123
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c18
-rw-r--r--drivers/infiniband/hw/ipath/ipath_registers.h33
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c13
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_stats.c24
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c390
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ud.c47
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c55
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h12
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c11
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c45
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c47
-rw-r--r--drivers/infiniband/hw/nes/Kconfig16
-rw-r--r--drivers/infiniband/hw/nes/Makefile3
-rw-r--r--drivers/infiniband/hw/nes/nes.c1152
-rw-r--r--drivers/infiniband/hw/nes/nes.h560
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c3088
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h433
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h193
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c3080
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1206
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c1703
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h112
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c917
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c3917
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h169
64 files changed, 18742 insertions, 835 deletions
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
index 36b98989b15e..7e7b5a66f042 100644
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ b/drivers/infiniband/hw/cxgb3/Makefile
@@ -1,5 +1,4 @@
1EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/cxgb3 \ 1EXTRA_CFLAGS += -Idrivers/net/cxgb3
2 -I$(TOPDIR)/drivers/infiniband/hw/cxgb3/core
3 2
4obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o 3obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o
5 4
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index eec6a30840ca..03c5ff62889a 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -179,7 +179,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
179 setup.size = 1UL << cq->size_log2; 179 setup.size = 1UL << cq->size_log2;
180 setup.credits = 65535; 180 setup.credits = 65535;
181 setup.credit_thres = 1; 181 setup.credit_thres = 1;
182 if (rdev_p->t3cdev_p->type == T3B) 182 if (rdev_p->t3cdev_p->type != T3A)
183 setup.ovfl_mode = 0; 183 setup.ovfl_mode = 0;
184 else 184 else
185 setup.ovfl_mode = 1; 185 setup.ovfl_mode = 1;
@@ -584,7 +584,7 @@ static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
584{ 584{
585 u32 i, nr_wqe, copy_len; 585 u32 i, nr_wqe, copy_len;
586 u8 *copy_data; 586 u8 *copy_data;
587 u8 wr_len, utx_len; /* lenght in 8 byte flit */ 587 u8 wr_len, utx_len; /* length in 8 byte flit */
588 enum t3_wr_flags flag; 588 enum t3_wr_flags flag;
589 __be64 *wqe; 589 __be64 *wqe;
590 u64 utx_cmd; 590 u64 utx_cmd;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index c84d4ac49355..969d4d928455 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -315,7 +315,7 @@ struct t3_rdma_init_wr {
315 __be32 ird; 315 __be32 ird;
316 __be64 qp_dma_addr; /* 7 */ 316 __be64 qp_dma_addr; /* 7 */
317 __be32 qp_dma_size; /* 8 */ 317 __be32 qp_dma_size; /* 8 */
318 u32 irs; 318 __be32 irs;
319}; 319};
320 320
321struct t3_genbit { 321struct t3_genbit {
@@ -324,7 +324,8 @@ struct t3_genbit {
324}; 324};
325 325
326enum rdma_init_wr_flags { 326enum rdma_init_wr_flags {
327 RECVS_POSTED = 1, 327 RECVS_POSTED = (1<<0),
328 PRIV_QP = (1<<1),
328}; 329};
329 330
330union t3_wr { 331union t3_wr {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 20ba372dd182..e9a08fa3dffe 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -332,7 +332,7 @@ static struct rtable *find_route(struct t3cdev *dev, __be32 local_ip,
332 } 332 }
333 }; 333 };
334 334
335 if (ip_route_output_flow(&rt, &fl, NULL, 0)) 335 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
336 return NULL; 336 return NULL;
337 return rt; 337 return rt;
338} 338}
@@ -1118,7 +1118,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1118 status2errno(rpl->status)); 1118 status2errno(rpl->status));
1119 connect_reply_upcall(ep, status2errno(rpl->status)); 1119 connect_reply_upcall(ep, status2errno(rpl->status));
1120 state_set(&ep->com, DEAD); 1120 state_set(&ep->com, DEAD);
1121 if (ep->com.tdev->type == T3B && act_open_has_tid(rpl->status)) 1121 if (ep->com.tdev->type != T3A && act_open_has_tid(rpl->status))
1122 release_tid(ep->com.tdev, GET_TID(rpl), NULL); 1122 release_tid(ep->com.tdev, GET_TID(rpl), NULL);
1123 cxgb3_free_atid(ep->com.tdev, ep->atid); 1123 cxgb3_free_atid(ep->com.tdev, ep->atid);
1124 dst_release(ep->dst); 1124 dst_release(ep->dst);
@@ -1249,7 +1249,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip,
1249 skb_trim(skb, sizeof(struct cpl_tid_release)); 1249 skb_trim(skb, sizeof(struct cpl_tid_release));
1250 skb_get(skb); 1250 skb_get(skb);
1251 1251
1252 if (tdev->type == T3B) 1252 if (tdev->type != T3A)
1253 release_tid(tdev, hwtid, skb); 1253 release_tid(tdev, hwtid, skb);
1254 else { 1254 else {
1255 struct cpl_pass_accept_rpl *rpl; 1255 struct cpl_pass_accept_rpl *rpl;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_mem.c b/drivers/infiniband/hw/cxgb3/iwch_mem.c
index a6c2c4ba29e6..73bfd1656f86 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_mem.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_mem.c
@@ -122,6 +122,13 @@ int build_phys_page_list(struct ib_phys_buf *buffer_list,
122 *total_size += buffer_list[i].size; 122 *total_size += buffer_list[i].size;
123 if (i > 0) 123 if (i > 0)
124 mask |= buffer_list[i].addr; 124 mask |= buffer_list[i].addr;
125 else
126 mask |= buffer_list[i].addr & PAGE_MASK;
127 if (i != num_phys_buf - 1)
128 mask |= buffer_list[i].addr + buffer_list[i].size;
129 else
130 mask |= (buffer_list[i].addr + buffer_list[i].size +
131 PAGE_SIZE - 1) & PAGE_MASK;
125 } 132 }
126 133
127 if (*total_size > 0xFFFFFFFFULL) 134 if (*total_size > 0xFFFFFFFFULL)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b5436ca92e68..df1838f8f94d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -39,6 +39,7 @@
39#include <linux/list.h> 39#include <linux/list.h>
40#include <linux/spinlock.h> 40#include <linux/spinlock.h>
41#include <linux/ethtool.h> 41#include <linux/ethtool.h>
42#include <linux/rtnetlink.h>
42 43
43#include <asm/io.h> 44#include <asm/io.h>
44#include <asm/irq.h> 45#include <asm/irq.h>
@@ -645,7 +646,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
645 if (err) 646 if (err)
646 goto err; 647 goto err;
647 648
648 if (udata && t3b_device(rhp)) { 649 if (udata && !t3a_device(rhp)) {
649 uresp.pbl_addr = (mhp->attr.pbl_addr - 650 uresp.pbl_addr = (mhp->attr.pbl_addr -
650 rhp->rdev.rnic_info.pbl_base) >> 3; 651 rhp->rdev.rnic_info.pbl_base) >> 3;
651 PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__, 652 PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__,
@@ -1053,7 +1054,9 @@ static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1053 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1054 struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1054 1055
1055 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1056 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
1057 rtnl_lock();
1056 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1058 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1059 rtnl_unlock();
1057 return sprintf(buf, "%s\n", info.fw_version); 1060 return sprintf(buf, "%s\n", info.fw_version);
1058} 1061}
1059 1062
@@ -1065,7 +1068,9 @@ static ssize_t show_hca(struct class_device *cdev, char *buf)
1065 struct net_device *lldev = dev->rdev.t3cdev_p->lldev; 1068 struct net_device *lldev = dev->rdev.t3cdev_p->lldev;
1066 1069
1067 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); 1070 PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev);
1071 rtnl_lock();
1068 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1072 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1073 rtnl_unlock();
1069 return sprintf(buf, "%s\n", info.driver); 1074 return sprintf(buf, "%s\n", info.driver);
1070} 1075}
1071 1076
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index dd89b6b91f9c..ea2cdd73dd85 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -208,36 +208,19 @@ static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
208static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe, 208static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe,
209 struct ib_recv_wr *wr) 209 struct ib_recv_wr *wr)
210{ 210{
211 int i, err = 0; 211 int i;
212 u32 pbl_addr[4];
213 u8 page_size[4];
214 if (wr->num_sge > T3_MAX_SGE) 212 if (wr->num_sge > T3_MAX_SGE)
215 return -EINVAL; 213 return -EINVAL;
216 err = iwch_sgl2pbl_map(rhp, wr->sg_list, wr->num_sge, pbl_addr,
217 page_size);
218 if (err)
219 return err;
220 wqe->recv.pagesz[0] = page_size[0];
221 wqe->recv.pagesz[1] = page_size[1];
222 wqe->recv.pagesz[2] = page_size[2];
223 wqe->recv.pagesz[3] = page_size[3];
224 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge); 214 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
225 for (i = 0; i < wr->num_sge; i++) { 215 for (i = 0; i < wr->num_sge; i++) {
226 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey); 216 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
227 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length); 217 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
228 218 wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
229 /* to in the WQE == the offset into the page */
230 wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
231 (1UL << (12 + page_size[i])));
232
233 /* pbl_addr is the adapters address in the PBL */
234 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
235 } 219 }
236 for (; i < T3_MAX_SGE; i++) { 220 for (; i < T3_MAX_SGE; i++) {
237 wqe->recv.sgl[i].stag = 0; 221 wqe->recv.sgl[i].stag = 0;
238 wqe->recv.sgl[i].len = 0; 222 wqe->recv.sgl[i].len = 0;
239 wqe->recv.sgl[i].to = 0; 223 wqe->recv.sgl[i].to = 0;
240 wqe->recv.pbl_addr[i] = 0;
241 } 224 }
242 return 0; 225 return 0;
243} 226}
@@ -659,6 +642,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
659 cxio_flush_rq(&qhp->wq, &rchp->cq, count); 642 cxio_flush_rq(&qhp->wq, &rchp->cq, count);
660 spin_unlock(&qhp->lock); 643 spin_unlock(&qhp->lock);
661 spin_unlock_irqrestore(&rchp->lock, *flag); 644 spin_unlock_irqrestore(&rchp->lock, *flag);
645 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
662 646
663 /* locking heirarchy: cq lock first, then qp lock. */ 647 /* locking heirarchy: cq lock first, then qp lock. */
664 spin_lock_irqsave(&schp->lock, *flag); 648 spin_lock_irqsave(&schp->lock, *flag);
@@ -668,6 +652,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
668 cxio_flush_sq(&qhp->wq, &schp->cq, count); 652 cxio_flush_sq(&qhp->wq, &schp->cq, count);
669 spin_unlock(&qhp->lock); 653 spin_unlock(&qhp->lock);
670 spin_unlock_irqrestore(&schp->lock, *flag); 654 spin_unlock_irqrestore(&schp->lock, *flag);
655 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
671 656
672 /* deref */ 657 /* deref */
673 if (atomic_dec_and_test(&qhp->refcnt)) 658 if (atomic_dec_and_test(&qhp->refcnt))
@@ -678,7 +663,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
678 663
679static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) 664static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
680{ 665{
681 if (t3b_device(qhp->rhp)) 666 if (qhp->ibqp.uobject)
682 cxio_set_wq_in_error(&qhp->wq); 667 cxio_set_wq_in_error(&qhp->wq);
683 else 668 else
684 __flush_qp(qhp, flag); 669 __flush_qp(qhp, flag);
@@ -732,6 +717,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
732 init_attr.qp_dma_addr = qhp->wq.dma_addr; 717 init_attr.qp_dma_addr = qhp->wq.dma_addr;
733 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); 718 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
734 init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0; 719 init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
720 init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
735 init_attr.irs = qhp->ep->rcv_seq; 721 init_attr.irs = qhp->ep->rcv_seq;
736 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d " 722 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
737 "flags 0x%x qpcaps 0x%x\n", __FUNCTION__, 723 "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
@@ -847,10 +833,11 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
847 disconnect = 1; 833 disconnect = 1;
848 ep = qhp->ep; 834 ep = qhp->ep;
849 } 835 }
836 flush_qp(qhp, &flag);
850 break; 837 break;
851 case IWCH_QP_STATE_TERMINATE: 838 case IWCH_QP_STATE_TERMINATE:
852 qhp->attr.state = IWCH_QP_STATE_TERMINATE; 839 qhp->attr.state = IWCH_QP_STATE_TERMINATE;
853 if (t3b_device(qhp->rhp)) 840 if (qhp->ibqp.uobject)
854 cxio_set_wq_in_error(&qhp->wq); 841 cxio_set_wq_in_error(&qhp->wq);
855 if (!internal) 842 if (!internal)
856 terminate = 1; 843 terminate = 1;
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index f7782c882ab4..194c1c30cf63 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER 2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 * 3 *
4 * adress vector functions 4 * address vector functions
5 * 5 *
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com> 6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Khadija Souissi <souissik@de.ibm.com> 7 * Khadija Souissi <souissik@de.ibm.com>
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 74d2b72a11d8..92cce8aacbb7 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -94,9 +94,14 @@ struct ehca_sma_attr {
94 94
95struct ehca_sport { 95struct ehca_sport {
96 struct ib_cq *ibcq_aqp1; 96 struct ib_cq *ibcq_aqp1;
97 struct ib_qp *ibqp_aqp1; 97 struct ib_qp *ibqp_sqp[2];
98 /* lock to serialze modify_qp() calls for sqp in normal
99 * and irq path (when event PORT_ACTIVE is received first time)
100 */
101 spinlock_t mod_sqp_lock;
98 enum ib_port_state port_state; 102 enum ib_port_state port_state;
99 struct ehca_sma_attr saved_attr; 103 struct ehca_sma_attr saved_attr;
104 u32 pma_qp_nr;
100}; 105};
101 106
102#define HCA_CAP_MR_PGSIZE_4K 0x80000000 107#define HCA_CAP_MR_PGSIZE_4K 0x80000000
@@ -141,6 +146,14 @@ enum ehca_ext_qp_type {
141 EQPT_SRQ = 3, 146 EQPT_SRQ = 3,
142}; 147};
143 148
149/* struct to cache modify_qp()'s parms for GSI/SMI qp */
150struct ehca_mod_qp_parm {
151 int mask;
152 struct ib_qp_attr attr;
153};
154
155#define EHCA_MOD_QP_PARM_MAX 4
156
144struct ehca_qp { 157struct ehca_qp {
145 union { 158 union {
146 struct ib_qp ib_qp; 159 struct ib_qp ib_qp;
@@ -164,10 +177,18 @@ struct ehca_qp {
164 struct ehca_cq *recv_cq; 177 struct ehca_cq *recv_cq;
165 unsigned int sqerr_purgeflag; 178 unsigned int sqerr_purgeflag;
166 struct hlist_node list_entries; 179 struct hlist_node list_entries;
180 /* array to cache modify_qp()'s parms for GSI/SMI qp */
181 struct ehca_mod_qp_parm *mod_qp_parm;
182 int mod_qp_parm_idx;
167 /* mmap counter for resources mapped into user space */ 183 /* mmap counter for resources mapped into user space */
168 u32 mm_count_squeue; 184 u32 mm_count_squeue;
169 u32 mm_count_rqueue; 185 u32 mm_count_rqueue;
170 u32 mm_count_galpa; 186 u32 mm_count_galpa;
187 /* unsolicited ack circumvention */
188 int unsol_ack_circ;
189 int mtu_shift;
190 u32 message_count;
191 u32 packet_count;
171}; 192};
172 193
173#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 194#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
@@ -323,6 +344,7 @@ extern int ehca_port_act_time;
323extern int ehca_use_hp_mr; 344extern int ehca_use_hp_mr;
324extern int ehca_scaling_code; 345extern int ehca_scaling_code;
325extern int ehca_lock_hcalls; 346extern int ehca_lock_hcalls;
347extern int ehca_nr_ports;
326 348
327struct ipzu_queue_resp { 349struct ipzu_queue_resp {
328 u32 qe_size; /* queue entry size */ 350 u32 qe_size; /* queue entry size */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 79c25f51c21e..0467c158d4a9 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -246,7 +246,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
246 } else { 246 } else {
247 if (h_ret != H_PAGE_REGISTERED) { 247 if (h_ret != H_PAGE_REGISTERED) {
248 ehca_err(device, "Registration of page failed " 248 ehca_err(device, "Registration of page failed "
249 "ehca_cq=%p cq_num=%x h_ret=%li" 249 "ehca_cq=%p cq_num=%x h_ret=%li "
250 "counter=%i act_pages=%i", 250 "counter=%i act_pages=%i",
251 my_cq, my_cq->cq_number, 251 my_cq, my_cq->cq_number,
252 h_ret, counter, param.act_pages); 252 h_ret, counter, param.act_pages);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3f617b27b954..b5ca94c6b8d9 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -62,6 +62,7 @@
62#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15) 62#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
63#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16) 63#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
64#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16) 64#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
65#define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
65 66
66#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63) 67#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
67#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7) 68#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
@@ -354,17 +355,34 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
354{ 355{
355 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); 356 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
356 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); 357 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
358 u8 spec_event;
359 struct ehca_sport *sport = &shca->sport[port - 1];
360 unsigned long flags;
357 361
358 switch (ec) { 362 switch (ec) {
359 case 0x30: /* port availability change */ 363 case 0x30: /* port availability change */
360 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { 364 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
361 shca->sport[port - 1].port_state = IB_PORT_ACTIVE; 365 int suppress_event;
366 /* replay modify_qp for sqps */
367 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
368 suppress_event = !sport->ibqp_sqp[IB_QPT_GSI];
369 if (sport->ibqp_sqp[IB_QPT_SMI])
370 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
371 if (!suppress_event)
372 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
373 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
374
375 /* AQP1 was destroyed, ignore this event */
376 if (suppress_event)
377 break;
378
379 sport->port_state = IB_PORT_ACTIVE;
362 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, 380 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
363 "is active"); 381 "is active");
364 ehca_query_sma_attr(shca, port, 382 ehca_query_sma_attr(shca, port,
365 &shca->sport[port - 1].saved_attr); 383 &sport->saved_attr);
366 } else { 384 } else {
367 shca->sport[port - 1].port_state = IB_PORT_DOWN; 385 sport->port_state = IB_PORT_DOWN;
368 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, 386 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
369 "is inactive"); 387 "is inactive");
370 } 388 }
@@ -378,13 +396,15 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
378 ehca_warn(&shca->ib_device, "disruptive port " 396 ehca_warn(&shca->ib_device, "disruptive port "
379 "%d configuration change", port); 397 "%d configuration change", port);
380 398
381 shca->sport[port - 1].port_state = IB_PORT_DOWN; 399 sport->port_state = IB_PORT_DOWN;
382 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, 400 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
383 "is inactive"); 401 "is inactive");
384 402
385 shca->sport[port - 1].port_state = IB_PORT_ACTIVE; 403 sport->port_state = IB_PORT_ACTIVE;
386 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, 404 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
387 "is active"); 405 "is active");
406 ehca_query_sma_attr(shca, port,
407 &sport->saved_attr);
388 } else 408 } else
389 notify_port_conf_change(shca, port); 409 notify_port_conf_change(shca, port);
390 break; 410 break;
@@ -394,6 +414,16 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
394 case 0x33: /* trace stopped */ 414 case 0x33: /* trace stopped */
395 ehca_err(&shca->ib_device, "Traced stopped."); 415 ehca_err(&shca->ib_device, "Traced stopped.");
396 break; 416 break;
417 case 0x34: /* util async event */
418 spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
419 if (spec_event == 0x80) /* client reregister required */
420 dispatch_port_event(shca, port,
421 IB_EVENT_CLIENT_REREGISTER,
422 "client reregister req.");
423 else
424 ehca_warn(&shca->ib_device, "Unknown util async "
425 "event %x on port %x", spec_event, port);
426 break;
397 default: 427 default:
398 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.", 428 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
399 ec, shca->ib_device.name); 429 ec, shca->ib_device.name);
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 5485799cdc8d..a8a2ea585d2f 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -187,6 +187,11 @@ int ehca_dealloc_ucontext(struct ib_ucontext *context);
187 187
188int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); 188int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
189 189
190int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
191 struct ib_wc *in_wc, struct ib_grh *in_grh,
192 struct ib_mad *in_mad,
193 struct ib_mad *out_mad);
194
190void ehca_poll_eqs(unsigned long data); 195void ehca_poll_eqs(unsigned long data);
191 196
192int ehca_calc_ipd(struct ehca_shca *shca, int port, 197int ehca_calc_ipd(struct ehca_shca *shca, int port,
@@ -200,4 +205,6 @@ void ehca_free_fw_ctrlblock(void *ptr);
200#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 205#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
201#endif 206#endif
202 207
208void ehca_recover_sqp(struct ib_qp *sqp);
209
203#endif 210#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 6a56d86a2951..a86ebcc79a95 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -90,7 +90,8 @@ MODULE_PARM_DESC(hw_level,
90 "hardware level" 90 "hardware level"
91 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); 91 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
92MODULE_PARM_DESC(nr_ports, 92MODULE_PARM_DESC(nr_ports,
93 "number of connected ports (default: 2)"); 93 "number of connected ports (-1: autodetect, 1: port one only, "
94 "2: two ports (default)");
94MODULE_PARM_DESC(use_hp_mr, 95MODULE_PARM_DESC(use_hp_mr,
95 "high performance MRs (0: no (default), 1: yes)"); 96 "high performance MRs (0: no (default), 1: yes)");
96MODULE_PARM_DESC(port_act_time, 97MODULE_PARM_DESC(port_act_time,
@@ -471,7 +472,7 @@ int ehca_init_device(struct ehca_shca *shca)
471 shca->ib_device.dealloc_fmr = ehca_dealloc_fmr; 472 shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
472 shca->ib_device.attach_mcast = ehca_attach_mcast; 473 shca->ib_device.attach_mcast = ehca_attach_mcast;
473 shca->ib_device.detach_mcast = ehca_detach_mcast; 474 shca->ib_device.detach_mcast = ehca_detach_mcast;
474 /* shca->ib_device.process_mad = ehca_process_mad; */ 475 shca->ib_device.process_mad = ehca_process_mad;
475 shca->ib_device.mmap = ehca_mmap; 476 shca->ib_device.mmap = ehca_mmap;
476 477
477 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { 478 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
@@ -511,7 +512,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
511 } 512 }
512 sport->ibcq_aqp1 = ibcq; 513 sport->ibcq_aqp1 = ibcq;
513 514
514 if (sport->ibqp_aqp1) { 515 if (sport->ibqp_sqp[IB_QPT_GSI]) {
515 ehca_err(&shca->ib_device, "AQP1 QP is already created."); 516 ehca_err(&shca->ib_device, "AQP1 QP is already created.");
516 ret = -EPERM; 517 ret = -EPERM;
517 goto create_aqp1; 518 goto create_aqp1;
@@ -537,7 +538,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
537 ret = PTR_ERR(ibqp); 538 ret = PTR_ERR(ibqp);
538 goto create_aqp1; 539 goto create_aqp1;
539 } 540 }
540 sport->ibqp_aqp1 = ibqp; 541 sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
541 542
542 return 0; 543 return 0;
543 544
@@ -550,7 +551,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
550{ 551{
551 int ret; 552 int ret;
552 553
553 ret = ib_destroy_qp(sport->ibqp_aqp1); 554 ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
554 if (ret) { 555 if (ret) {
555 ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret); 556 ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
556 return ret; 557 return ret;
@@ -590,6 +591,11 @@ static struct attribute_group ehca_drv_attr_grp = {
590 .attrs = ehca_drv_attrs 591 .attrs = ehca_drv_attrs
591}; 592};
592 593
594static struct attribute_group *ehca_drv_attr_groups[] = {
595 &ehca_drv_attr_grp,
596 NULL,
597};
598
593#define EHCA_RESOURCE_ATTR(name) \ 599#define EHCA_RESOURCE_ATTR(name) \
594static ssize_t ehca_show_##name(struct device *dev, \ 600static ssize_t ehca_show_##name(struct device *dev, \
595 struct device_attribute *attr, \ 601 struct device_attribute *attr, \
@@ -688,7 +694,7 @@ static int __devinit ehca_probe(struct of_device *dev,
688 struct ehca_shca *shca; 694 struct ehca_shca *shca;
689 const u64 *handle; 695 const u64 *handle;
690 struct ib_pd *ibpd; 696 struct ib_pd *ibpd;
691 int ret; 697 int ret, i;
692 698
693 handle = of_get_property(dev->node, "ibm,hca-handle", NULL); 699 handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
694 if (!handle) { 700 if (!handle) {
@@ -709,6 +715,8 @@ static int __devinit ehca_probe(struct of_device *dev,
709 return -ENOMEM; 715 return -ENOMEM;
710 } 716 }
711 mutex_init(&shca->modify_mutex); 717 mutex_init(&shca->modify_mutex);
718 for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
719 spin_lock_init(&shca->sport[i].mod_sqp_lock);
712 720
713 shca->ofdev = dev; 721 shca->ofdev = dev;
714 shca->ipz_hca_handle.handle = *handle; 722 shca->ipz_hca_handle.handle = *handle;
@@ -899,6 +907,9 @@ static struct of_platform_driver ehca_driver = {
899 .match_table = ehca_device_table, 907 .match_table = ehca_device_table,
900 .probe = ehca_probe, 908 .probe = ehca_probe,
901 .remove = ehca_remove, 909 .remove = ehca_remove,
910 .driver = {
911 .groups = ehca_drv_attr_groups,
912 },
902}; 913};
903 914
904void ehca_poll_eqs(unsigned long data) 915void ehca_poll_eqs(unsigned long data)
@@ -926,7 +937,7 @@ void ehca_poll_eqs(unsigned long data)
926 ehca_process_eq(shca, 0); 937 ehca_process_eq(shca, 0);
927 } 938 }
928 } 939 }
929 mod_timer(&poll_eqs_timer, jiffies + HZ); 940 mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
930 spin_unlock(&shca_list_lock); 941 spin_unlock(&shca_list_lock);
931} 942}
932 943
@@ -957,10 +968,6 @@ int __init ehca_module_init(void)
957 goto module_init2; 968 goto module_init2;
958 } 969 }
959 970
960 ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
961 if (ret) /* only complain; we can live without attributes */
962 ehca_gen_err("Cannot create driver attributes ret=%d", ret);
963
964 if (ehca_poll_all_eqs != 1) { 971 if (ehca_poll_all_eqs != 1) {
965 ehca_gen_err("WARNING!!!"); 972 ehca_gen_err("WARNING!!!");
966 ehca_gen_err("It is possible to lose interrupts."); 973 ehca_gen_err("It is possible to lose interrupts.");
@@ -986,7 +993,6 @@ void __exit ehca_module_exit(void)
986 if (ehca_poll_all_eqs == 1) 993 if (ehca_poll_all_eqs == 1)
987 del_timer_sync(&poll_eqs_timer); 994 del_timer_sync(&poll_eqs_timer);
988 995
989 sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp);
990 ibmebus_unregister_driver(&ehca_driver); 996 ibmebus_unregister_driver(&ehca_driver);
991 997
992 ehca_destroy_slab_caches(); 998 ehca_destroy_slab_caches();
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index eff5fb55604b..1012f15a7140 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -592,10 +592,8 @@ static struct ehca_qp *internal_create_qp(
592 goto create_qp_exit1; 592 goto create_qp_exit1;
593 } 593 }
594 594
595 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 595 /* Always signal by WQE so we can hide circ. WQEs */
596 parms.sigtype = HCALL_SIGT_EVERY; 596 parms.sigtype = HCALL_SIGT_BY_WQE;
597 else
598 parms.sigtype = HCALL_SIGT_BY_WQE;
599 597
600 /* UD_AV CIRCUMVENTION */ 598 /* UD_AV CIRCUMVENTION */
601 max_send_sge = init_attr->cap.max_send_sge; 599 max_send_sge = init_attr->cap.max_send_sge;
@@ -618,6 +616,10 @@ static struct ehca_qp *internal_create_qp(
618 parms.squeue.max_sge = max_send_sge; 616 parms.squeue.max_sge = max_send_sge;
619 parms.rqueue.max_sge = max_recv_sge; 617 parms.rqueue.max_sge = max_recv_sge;
620 618
619 /* RC QPs need one more SWQE for unsolicited ack circumvention */
620 if (qp_type == IB_QPT_RC)
621 parms.squeue.max_wr++;
622
621 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) { 623 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
622 if (HAS_SQ(my_qp)) 624 if (HAS_SQ(my_qp))
623 ehca_determine_small_queue( 625 ehca_determine_small_queue(
@@ -650,6 +652,8 @@ static struct ehca_qp *internal_create_qp(
650 parms.squeue.act_nr_sges = 1; 652 parms.squeue.act_nr_sges = 1;
651 parms.rqueue.act_nr_sges = 1; 653 parms.rqueue.act_nr_sges = 1;
652 } 654 }
655 /* hide the extra WQE */
656 parms.squeue.act_nr_wqes--;
653 break; 657 break;
654 case IB_QPT_UD: 658 case IB_QPT_UD:
655 case IB_QPT_GSI: 659 case IB_QPT_GSI:
@@ -729,12 +733,31 @@ static struct ehca_qp *internal_create_qp(
729 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes; 733 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
730 my_qp->init_attr = *init_attr; 734 my_qp->init_attr = *init_attr;
731 735
736 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
737 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
738 &my_qp->ib_qp;
739 if (ehca_nr_ports < 0) {
740 /* alloc array to cache subsequent modify qp parms
741 * for autodetect mode
742 */
743 my_qp->mod_qp_parm =
744 kzalloc(EHCA_MOD_QP_PARM_MAX *
745 sizeof(*my_qp->mod_qp_parm),
746 GFP_KERNEL);
747 if (!my_qp->mod_qp_parm) {
748 ehca_err(pd->device,
749 "Could not alloc mod_qp_parm");
750 goto create_qp_exit4;
751 }
752 }
753 }
754
732 /* NOTE: define_apq0() not supported yet */ 755 /* NOTE: define_apq0() not supported yet */
733 if (qp_type == IB_QPT_GSI) { 756 if (qp_type == IB_QPT_GSI) {
734 h_ret = ehca_define_sqp(shca, my_qp, init_attr); 757 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
735 if (h_ret != H_SUCCESS) { 758 if (h_ret != H_SUCCESS) {
736 ret = ehca2ib_return_code(h_ret); 759 ret = ehca2ib_return_code(h_ret);
737 goto create_qp_exit4; 760 goto create_qp_exit5;
738 } 761 }
739 } 762 }
740 763
@@ -743,7 +766,7 @@ static struct ehca_qp *internal_create_qp(
743 if (ret) { 766 if (ret) {
744 ehca_err(pd->device, 767 ehca_err(pd->device,
745 "Couldn't assign qp to send_cq ret=%i", ret); 768 "Couldn't assign qp to send_cq ret=%i", ret);
746 goto create_qp_exit4; 769 goto create_qp_exit5;
747 } 770 }
748 } 771 }
749 772
@@ -769,12 +792,18 @@ static struct ehca_qp *internal_create_qp(
769 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 792 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
770 ehca_err(pd->device, "Copy to udata failed"); 793 ehca_err(pd->device, "Copy to udata failed");
771 ret = -EINVAL; 794 ret = -EINVAL;
772 goto create_qp_exit4; 795 goto create_qp_exit6;
773 } 796 }
774 } 797 }
775 798
776 return my_qp; 799 return my_qp;
777 800
801create_qp_exit6:
802 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
803
804create_qp_exit5:
805 kfree(my_qp->mod_qp_parm);
806
778create_qp_exit4: 807create_qp_exit4:
779 if (HAS_RQ(my_qp)) 808 if (HAS_RQ(my_qp))
780 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 809 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
@@ -858,7 +887,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
858 update_mask, 887 update_mask,
859 mqpcb, my_qp->galpas.kernel); 888 mqpcb, my_qp->galpas.kernel);
860 if (hret != H_SUCCESS) { 889 if (hret != H_SUCCESS) {
861 ehca_err(pd->device, "Could not modify SRQ to INIT" 890 ehca_err(pd->device, "Could not modify SRQ to INIT "
862 "ehca_qp=%p qp_num=%x h_ret=%li", 891 "ehca_qp=%p qp_num=%x h_ret=%li",
863 my_qp, my_qp->real_qp_num, hret); 892 my_qp, my_qp->real_qp_num, hret);
864 goto create_srq2; 893 goto create_srq2;
@@ -872,7 +901,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
872 update_mask, 901 update_mask,
873 mqpcb, my_qp->galpas.kernel); 902 mqpcb, my_qp->galpas.kernel);
874 if (hret != H_SUCCESS) { 903 if (hret != H_SUCCESS) {
875 ehca_err(pd->device, "Could not enable SRQ" 904 ehca_err(pd->device, "Could not enable SRQ "
876 "ehca_qp=%p qp_num=%x h_ret=%li", 905 "ehca_qp=%p qp_num=%x h_ret=%li",
877 my_qp, my_qp->real_qp_num, hret); 906 my_qp, my_qp->real_qp_num, hret);
878 goto create_srq2; 907 goto create_srq2;
@@ -886,7 +915,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
886 update_mask, 915 update_mask,
887 mqpcb, my_qp->galpas.kernel); 916 mqpcb, my_qp->galpas.kernel);
888 if (hret != H_SUCCESS) { 917 if (hret != H_SUCCESS) {
889 ehca_err(pd->device, "Could not modify SRQ to RTR" 918 ehca_err(pd->device, "Could not modify SRQ to RTR "
890 "ehca_qp=%p qp_num=%x h_ret=%li", 919 "ehca_qp=%p qp_num=%x h_ret=%li",
891 my_qp, my_qp->real_qp_num, hret); 920 my_qp, my_qp->real_qp_num, hret);
892 goto create_srq2; 921 goto create_srq2;
@@ -992,7 +1021,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
992 unsigned long flags = 0; 1021 unsigned long flags = 0;
993 1022
994 /* do query_qp to obtain current attr values */ 1023 /* do query_qp to obtain current attr values */
995 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1024 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
996 if (!mqpcb) { 1025 if (!mqpcb) {
997 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 1026 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
998 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 1027 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
@@ -1180,6 +1209,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1180 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1); 1209 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
1181 } 1210 }
1182 if (attr_mask & IB_QP_PORT) { 1211 if (attr_mask & IB_QP_PORT) {
1212 struct ehca_sport *sport;
1213 struct ehca_qp *aqp1;
1183 if (attr->port_num < 1 || attr->port_num > shca->num_ports) { 1214 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
1184 ret = -EINVAL; 1215 ret = -EINVAL;
1185 ehca_err(ibqp->device, "Invalid port=%x. " 1216 ehca_err(ibqp->device, "Invalid port=%x. "
@@ -1188,6 +1219,29 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1188 shca->num_ports); 1219 shca->num_ports);
1189 goto modify_qp_exit2; 1220 goto modify_qp_exit2;
1190 } 1221 }
1222 sport = &shca->sport[attr->port_num - 1];
1223 if (!sport->ibqp_sqp[IB_QPT_GSI]) {
1224 /* should not occur */
1225 ret = -EFAULT;
1226 ehca_err(ibqp->device, "AQP1 was not created for "
1227 "port=%x", attr->port_num);
1228 goto modify_qp_exit2;
1229 }
1230 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
1231 struct ehca_qp, ib_qp);
1232 if (ibqp->qp_type != IB_QPT_GSI &&
1233 ibqp->qp_type != IB_QPT_SMI &&
1234 aqp1->mod_qp_parm) {
1235 /*
1236 * firmware will reject this modify_qp() because
1237 * port is not activated/initialized fully
1238 */
1239 ret = -EFAULT;
1240 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
1241 "either port is being activated (try again) "
1242 "or cabling issue", attr->port_num);
1243 goto modify_qp_exit2;
1244 }
1191 mqpcb->prim_phys_port = attr->port_num; 1245 mqpcb->prim_phys_port = attr->port_num;
1192 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1); 1246 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
1193 } 1247 }
@@ -1244,6 +1298,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1244 } 1298 }
1245 1299
1246 if (attr_mask & IB_QP_PATH_MTU) { 1300 if (attr_mask & IB_QP_PATH_MTU) {
1301 /* store ld(MTU) */
1302 my_qp->mtu_shift = attr->path_mtu + 7;
1247 mqpcb->path_mtu = attr->path_mtu; 1303 mqpcb->path_mtu = attr->path_mtu;
1248 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1); 1304 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
1249 } 1305 }
@@ -1467,6 +1523,8 @@ modify_qp_exit1:
1467int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1523int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1468 struct ib_udata *udata) 1524 struct ib_udata *udata)
1469{ 1525{
1526 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1527 ib_device);
1470 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1528 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1471 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1529 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1472 ib_pd); 1530 ib_pd);
@@ -1479,9 +1537,100 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1479 return -EINVAL; 1537 return -EINVAL;
1480 } 1538 }
1481 1539
1540 /* The if-block below caches qp_attr to be modified for GSI and SMI
1541 * qps during the initialization by ib_mad. When the respective port
1542 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1543 * cached modify calls sequence, see ehca_recover_sqs() below.
1544 * Why that is required:
1545 * 1) If one port is connected, older code requires that port one
1546 * to be connected and module option nr_ports=1 to be given by
1547 * user, which is very inconvenient for end user.
1548 * 2) Firmware accepts modify_qp() only if respective port has become
1549 * active. Older code had a wait loop of 30sec create_qp()/
1550 * define_aqp1(), which is not appropriate in practice. This
1551 * code now removes that wait loop, see define_aqp1(), and always
1552 * reports all ports to ib_mad resp. users. Only activated ports
1553 * will then usable for the users.
1554 */
1555 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1556 int port = my_qp->init_attr.port_num;
1557 struct ehca_sport *sport = &shca->sport[port - 1];
1558 unsigned long flags;
1559 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1560 /* cache qp_attr only during init */
1561 if (my_qp->mod_qp_parm) {
1562 struct ehca_mod_qp_parm *p;
1563 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
1564 ehca_err(&shca->ib_device,
1565 "mod_qp_parm overflow state=%x port=%x"
1566 " type=%x", attr->qp_state,
1567 my_qp->init_attr.port_num,
1568 ibqp->qp_type);
1569 spin_unlock_irqrestore(&sport->mod_sqp_lock,
1570 flags);
1571 return -EINVAL;
1572 }
1573 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
1574 p->mask = attr_mask;
1575 p->attr = *attr;
1576 my_qp->mod_qp_parm_idx++;
1577 ehca_dbg(&shca->ib_device,
1578 "Saved qp_attr for state=%x port=%x type=%x",
1579 attr->qp_state, my_qp->init_attr.port_num,
1580 ibqp->qp_type);
1581 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1582 return 0;
1583 }
1584 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1585 }
1586
1482 return internal_modify_qp(ibqp, attr, attr_mask, 0); 1587 return internal_modify_qp(ibqp, attr, attr_mask, 0);
1483} 1588}
1484 1589
1590void ehca_recover_sqp(struct ib_qp *sqp)
1591{
1592 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
1593 int port = my_sqp->init_attr.port_num;
1594 struct ib_qp_attr attr;
1595 struct ehca_mod_qp_parm *qp_parm;
1596 int i, qp_parm_idx, ret;
1597 unsigned long flags, wr_cnt;
1598
1599 if (!my_sqp->mod_qp_parm)
1600 return;
1601 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
1602
1603 qp_parm = my_sqp->mod_qp_parm;
1604 qp_parm_idx = my_sqp->mod_qp_parm_idx;
1605 for (i = 0; i < qp_parm_idx; i++) {
1606 attr = qp_parm[i].attr;
1607 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
1608 if (ret) {
1609 ehca_err(sqp->device, "Could not modify SQP port=%x "
1610 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
1611 goto free_qp_parm;
1612 }
1613 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
1614 port, sqp->qp_num, attr.qp_state);
1615 }
1616
1617 /* re-trigger posted recv wrs */
1618 wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
1619 my_sqp->ipz_rqueue.qe_size;
1620 if (wr_cnt) {
1621 spin_lock_irqsave(&my_sqp->spinlock_r, flags);
1622 hipz_update_rqa(my_sqp, wr_cnt);
1623 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
1624 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1625 port, sqp->qp_num, wr_cnt);
1626 }
1627
1628free_qp_parm:
1629 kfree(qp_parm);
1630 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1631 my_sqp->mod_qp_parm = NULL;
1632}
1633
1485int ehca_query_qp(struct ib_qp *qp, 1634int ehca_query_qp(struct ib_qp *qp,
1486 struct ib_qp_attr *qp_attr, 1635 struct ib_qp_attr *qp_attr,
1487 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 1636 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
@@ -1769,6 +1918,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1769 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device); 1918 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
1770 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1919 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1771 ib_pd); 1920 ib_pd);
1921 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
1772 u32 cur_pid = current->tgid; 1922 u32 cur_pid = current->tgid;
1773 u32 qp_num = my_qp->real_qp_num; 1923 u32 qp_num = my_qp->real_qp_num;
1774 int ret; 1924 int ret;
@@ -1815,6 +1965,14 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1815 port_num = my_qp->init_attr.port_num; 1965 port_num = my_qp->init_attr.port_num;
1816 qp_type = my_qp->init_attr.qp_type; 1966 qp_type = my_qp->init_attr.qp_type;
1817 1967
1968 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
1969 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1970 kfree(my_qp->mod_qp_parm);
1971 my_qp->mod_qp_parm = NULL;
1972 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
1973 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1974 }
1975
1818 /* no support for IB_QPT_SMI yet */ 1976 /* no support for IB_QPT_SMI yet */
1819 if (qp_type == IB_QPT_GSI) { 1977 if (qp_type == IB_QPT_GSI) {
1820 struct ib_event event; 1978 struct ib_event event;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index ea91360835d3..2ce8cffb8664 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -50,6 +50,9 @@
50#include "hcp_if.h" 50#include "hcp_if.h"
51#include "hipz_fns.h" 51#include "hipz_fns.h"
52 52
53/* in RC traffic, insert an empty RDMA READ every this many packets */
54#define ACK_CIRC_THRESHOLD 2000000
55
53static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, 56static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
54 struct ehca_wqe *wqe_p, 57 struct ehca_wqe *wqe_p,
55 struct ib_recv_wr *recv_wr) 58 struct ib_recv_wr *recv_wr)
@@ -81,7 +84,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
81 if (ehca_debug_level) { 84 if (ehca_debug_level) {
82 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", 85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
83 ipz_rqueue); 86 ipz_rqueue);
84 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); 87 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
85 } 88 }
86 89
87 return 0; 90 return 0;
@@ -135,7 +138,8 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
135 138
136static inline int ehca_write_swqe(struct ehca_qp *qp, 139static inline int ehca_write_swqe(struct ehca_qp *qp,
137 struct ehca_wqe *wqe_p, 140 struct ehca_wqe *wqe_p,
138 const struct ib_send_wr *send_wr) 141 const struct ib_send_wr *send_wr,
142 int hidden)
139{ 143{
140 u32 idx; 144 u32 idx;
141 u64 dma_length; 145 u64 dma_length;
@@ -176,7 +180,9 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
176 180
177 wqe_p->wr_flag = 0; 181 wqe_p->wr_flag = 0;
178 182
179 if (send_wr->send_flags & IB_SEND_SIGNALED) 183 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
184 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
185 && !hidden)
180 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; 186 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
181 187
182 if (send_wr->opcode == IB_WR_SEND_WITH_IMM || 188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
@@ -199,10 +205,14 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
199 205
200 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8; 206 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
201 wqe_p->local_ee_context_qkey = remote_qkey; 207 wqe_p->local_ee_context_qkey = remote_qkey;
202 if (!send_wr->wr.ud.ah) { 208 if (unlikely(!send_wr->wr.ud.ah)) {
203 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); 209 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
204 return -EINVAL; 210 return -EINVAL;
205 } 211 }
212 if (unlikely(send_wr->wr.ud.remote_qpn == 0)) {
213 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
214 return -EINVAL;
215 }
206 my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah); 216 my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
207 wqe_p->u.ud_av.ud_av = my_av->av; 217 wqe_p->u.ud_av.ud_av = my_av->av;
208 218
@@ -255,6 +265,15 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
255 } /* eof idx */ 265 } /* eof idx */
256 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length; 266 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
257 267
268 /* unsolicited ack circumvention */
269 if (send_wr->opcode == IB_WR_RDMA_READ) {
270 /* on RDMA read, switch on and reset counters */
271 qp->message_count = qp->packet_count = 0;
272 qp->unsol_ack_circ = 1;
273 } else
274 /* else estimate #packets */
275 qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
276
258 break; 277 break;
259 278
260 default: 279 default:
@@ -355,13 +374,49 @@ static inline void map_ib_wc_status(u32 cqe_status,
355 *wc_status = IB_WC_SUCCESS; 374 *wc_status = IB_WC_SUCCESS;
356} 375}
357 376
377static inline int post_one_send(struct ehca_qp *my_qp,
378 struct ib_send_wr *cur_send_wr,
379 struct ib_send_wr **bad_send_wr,
380 int hidden)
381{
382 struct ehca_wqe *wqe_p;
383 int ret;
384 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
385
386 /* get pointer next to free WQE */
387 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
388 if (unlikely(!wqe_p)) {
389 /* too many posted work requests: queue overflow */
390 if (bad_send_wr)
391 *bad_send_wr = cur_send_wr;
392 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
393 "qp_num=%x", my_qp->ib_qp.qp_num);
394 return -ENOMEM;
395 }
396 /* write a SEND WQE into the QUEUE */
397 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden);
398 /*
399 * if something failed,
400 * reset the free entry pointer to the start value
401 */
402 if (unlikely(ret)) {
403 my_qp->ipz_squeue.current_q_offset = start_offset;
404 if (bad_send_wr)
405 *bad_send_wr = cur_send_wr;
406 ehca_err(my_qp->ib_qp.device, "Could not write WQE "
407 "qp_num=%x", my_qp->ib_qp.qp_num);
408 return -EINVAL;
409 }
410
411 return 0;
412}
413
358int ehca_post_send(struct ib_qp *qp, 414int ehca_post_send(struct ib_qp *qp,
359 struct ib_send_wr *send_wr, 415 struct ib_send_wr *send_wr,
360 struct ib_send_wr **bad_send_wr) 416 struct ib_send_wr **bad_send_wr)
361{ 417{
362 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); 418 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
363 struct ib_send_wr *cur_send_wr; 419 struct ib_send_wr *cur_send_wr;
364 struct ehca_wqe *wqe_p;
365 int wqe_cnt = 0; 420 int wqe_cnt = 0;
366 int ret = 0; 421 int ret = 0;
367 unsigned long flags; 422 unsigned long flags;
@@ -369,37 +424,33 @@ int ehca_post_send(struct ib_qp *qp,
369 /* LOCK the QUEUE */ 424 /* LOCK the QUEUE */
370 spin_lock_irqsave(&my_qp->spinlock_s, flags); 425 spin_lock_irqsave(&my_qp->spinlock_s, flags);
371 426
427 /* Send an empty extra RDMA read if:
428 * 1) there has been an RDMA read on this connection before
429 * 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
430 * 3) we can be sure that any previous extra RDMA read has been
431 * processed so we don't overflow the SQ
432 */
433 if (unlikely(my_qp->unsol_ack_circ &&
434 my_qp->packet_count > ACK_CIRC_THRESHOLD &&
435 my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
436 /* insert an empty RDMA READ to fix up the remote QP state */
437 struct ib_send_wr circ_wr;
438 memset(&circ_wr, 0, sizeof(circ_wr));
439 circ_wr.opcode = IB_WR_RDMA_READ;
440 post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
441 wqe_cnt++;
442 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
443 my_qp->message_count = my_qp->packet_count = 0;
444 }
445
372 /* loop processes list of send reqs */ 446 /* loop processes list of send reqs */
373 for (cur_send_wr = send_wr; cur_send_wr != NULL; 447 for (cur_send_wr = send_wr; cur_send_wr != NULL;
374 cur_send_wr = cur_send_wr->next) { 448 cur_send_wr = cur_send_wr->next) {
375 u64 start_offset = my_qp->ipz_squeue.current_q_offset; 449 ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
376 /* get pointer next to free WQE */
377 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
378 if (unlikely(!wqe_p)) {
379 /* too many posted work requests: queue overflow */
380 if (bad_send_wr)
381 *bad_send_wr = cur_send_wr;
382 if (wqe_cnt == 0) {
383 ret = -ENOMEM;
384 ehca_err(qp->device, "Too many posted WQEs "
385 "qp_num=%x", qp->qp_num);
386 }
387 goto post_send_exit0;
388 }
389 /* write a SEND WQE into the QUEUE */
390 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
391 /*
392 * if something failed,
393 * reset the free entry pointer to the start value
394 */
395 if (unlikely(ret)) { 450 if (unlikely(ret)) {
396 my_qp->ipz_squeue.current_q_offset = start_offset; 451 /* if one or more WQEs were successful, don't fail */
397 *bad_send_wr = cur_send_wr; 452 if (wqe_cnt)
398 if (wqe_cnt == 0) { 453 ret = 0;
399 ret = -EINVAL;
400 ehca_err(qp->device, "Could not write WQE "
401 "qp_num=%x", qp->qp_num);
402 }
403 goto post_send_exit0; 454 goto post_send_exit0;
404 } 455 }
405 wqe_cnt++; 456 wqe_cnt++;
@@ -410,6 +461,7 @@ int ehca_post_send(struct ib_qp *qp,
410post_send_exit0: 461post_send_exit0:
411 iosync(); /* serialize GAL register access */ 462 iosync(); /* serialize GAL register access */
412 hipz_update_sqa(my_qp, wqe_cnt); 463 hipz_update_sqa(my_qp, wqe_cnt);
464 my_qp->message_count += wqe_cnt;
413 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 465 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
414 return ret; 466 return ret;
415} 467}
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index f0792e5fbd02..706d97ad5555 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -39,15 +39,18 @@
39 * POSSIBILITY OF SUCH DAMAGE. 39 * POSSIBILITY OF SUCH DAMAGE.
40 */ 40 */
41 41
42#include <rdma/ib_mad.h>
42 43
43#include <linux/module.h>
44#include <linux/err.h>
45#include "ehca_classes.h" 44#include "ehca_classes.h"
46#include "ehca_tools.h" 45#include "ehca_tools.h"
47#include "ehca_qes.h"
48#include "ehca_iverbs.h" 46#include "ehca_iverbs.h"
49#include "hcp_if.h" 47#include "hcp_if.h"
50 48
49#define IB_MAD_STATUS_REDIRECT __constant_htons(0x0002)
50#define IB_MAD_STATUS_UNSUP_VERSION __constant_htons(0x0004)
51#define IB_MAD_STATUS_UNSUP_METHOD __constant_htons(0x0008)
52
53#define IB_PMA_CLASS_PORT_INFO __constant_htons(0x0001)
51 54
52/** 55/**
53 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue 56 * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
@@ -86,6 +89,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
86 port, ret); 89 port, ret);
87 return ret; 90 return ret;
88 } 91 }
92 shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
93 ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
94 port, pma_qp_nr);
89 break; 95 break;
90 default: 96 default:
91 ehca_err(&shca->ib_device, "invalid qp_type=%x", 97 ehca_err(&shca->ib_device, "invalid qp_type=%x",
@@ -93,6 +99,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
93 return H_PARAMETER; 99 return H_PARAMETER;
94 } 100 }
95 101
102 if (ehca_nr_ports < 0) /* autodetect mode */
103 return H_SUCCESS;
104
96 for (counter = 0; 105 for (counter = 0;
97 shca->sport[port - 1].port_state != IB_PORT_ACTIVE && 106 shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
98 counter < ehca_port_act_time; 107 counter < ehca_port_act_time;
@@ -109,3 +118,85 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
109 118
110 return H_SUCCESS; 119 return H_SUCCESS;
111} 120}
121
122struct ib_perf {
123 struct ib_mad_hdr mad_hdr;
124 u8 reserved[40];
125 u8 data[192];
126} __attribute__ ((packed));
127
128
129static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
130 struct ib_mad *in_mad, struct ib_mad *out_mad)
131{
132 struct ib_perf *in_perf = (struct ib_perf *)in_mad;
133 struct ib_perf *out_perf = (struct ib_perf *)out_mad;
134 struct ib_class_port_info *poi =
135 (struct ib_class_port_info *)out_perf->data;
136 struct ehca_shca *shca =
137 container_of(ibdev, struct ehca_shca, ib_device);
138 struct ehca_sport *sport = &shca->sport[port_num - 1];
139
140 ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
141
142 *out_mad = *in_mad;
143
144 if (in_perf->mad_hdr.class_version != 1) {
145 ehca_warn(ibdev, "Unsupported class_version=%x",
146 in_perf->mad_hdr.class_version);
147 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
148 goto perf_reply;
149 }
150
151 switch (in_perf->mad_hdr.method) {
152 case IB_MGMT_METHOD_GET:
153 case IB_MGMT_METHOD_SET:
154 /* set class port info for redirection */
155 out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
156 out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
157 memset(poi, 0, sizeof(*poi));
158 poi->base_version = 1;
159 poi->class_version = 1;
160 poi->resp_time_value = 18;
161 poi->redirect_lid = sport->saved_attr.lid;
162 poi->redirect_qp = sport->pma_qp_nr;
163 poi->redirect_qkey = IB_QP1_QKEY;
164 poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
165
166 ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
167 sport->saved_attr.lid, sport->pma_qp_nr);
168 break;
169
170 case IB_MGMT_METHOD_GET_RESP:
171 return IB_MAD_RESULT_FAILURE;
172
173 default:
174 out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
175 break;
176 }
177
178perf_reply:
179 out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
180
181 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
182}
183
184int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
185 struct ib_wc *in_wc, struct ib_grh *in_grh,
186 struct ib_mad *in_mad,
187 struct ib_mad *out_mad)
188{
189 int ret;
190
191 if (!port_num || port_num > ibdev->phys_port_cnt)
192 return IB_MAD_RESULT_FAILURE;
193
194 /* accept only pma request */
195 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
196 return IB_MAD_RESULT_SUCCESS;
197
198 ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
199 ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
200
201 return ret;
202}
diff --git a/drivers/infiniband/hw/ipath/ipath_common.h b/drivers/infiniband/hw/ipath/ipath_common.h
index 851df8a75e79..414621095540 100644
--- a/drivers/infiniband/hw/ipath/ipath_common.h
+++ b/drivers/infiniband/hw/ipath/ipath_common.h
@@ -82,6 +82,16 @@
82#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */ 82#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
83 83
84/* 84/*
85 * These 3 values (SDR and DDR may be ORed for auto-speed
86 * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
87 * with cmd IPATH_IB_CFG_SPD_ENB, by direct calls or via sysfs. They
88 * are also the the possible values for ipath_link_speed_enabled and active
89 * The values were chosen to match values used within the IB spec.
90 */
91#define IPATH_IB_SDR 1
92#define IPATH_IB_DDR 2
93
94/*
85 * stats maintained by the driver. For now, at least, this is global 95 * stats maintained by the driver. For now, at least, this is global
86 * to all minor devices. 96 * to all minor devices.
87 */ 97 */
@@ -433,8 +443,9 @@ struct ipath_user_info {
433#define IPATH_CMD_UNUSED_2 26 443#define IPATH_CMD_UNUSED_2 26
434#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */ 444#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
435#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */ 445#define IPATH_CMD_POLL_TYPE 28 /* set the kind of polling we want */
446#define IPATH_CMD_ARMLAUNCH_CTRL 29 /* armlaunch detection control */
436 447
437#define IPATH_CMD_MAX 28 448#define IPATH_CMD_MAX 29
438 449
439/* 450/*
440 * Poll types 451 * Poll types
@@ -477,6 +488,8 @@ struct ipath_cmd {
477 __u64 port_info; 488 __u64 port_info;
478 /* enable/disable receipt of packets */ 489 /* enable/disable receipt of packets */
479 __u32 recv_ctrl; 490 __u32 recv_ctrl;
491 /* enable/disable armlaunch errors (non-zero to enable) */
492 __u32 armlaunch_ctrl;
480 /* partition key to set */ 493 /* partition key to set */
481 __u16 part_key; 494 __u16 part_key;
482 /* user address of __u32 bitmask of active slaves */ 495 /* user address of __u32 bitmask of active slaves */
@@ -579,7 +592,7 @@ struct ipath_flash {
579struct infinipath_counters { 592struct infinipath_counters {
580 __u64 LBIntCnt; 593 __u64 LBIntCnt;
581 __u64 LBFlowStallCnt; 594 __u64 LBFlowStallCnt;
582 __u64 Reserved1; 595 __u64 TxSDmaDescCnt; /* was Reserved1 */
583 __u64 TxUnsupVLErrCnt; 596 __u64 TxUnsupVLErrCnt;
584 __u64 TxDataPktCnt; 597 __u64 TxDataPktCnt;
585 __u64 TxFlowPktCnt; 598 __u64 TxFlowPktCnt;
@@ -615,12 +628,26 @@ struct infinipath_counters {
615 __u64 RxP6HdrEgrOvflCnt; 628 __u64 RxP6HdrEgrOvflCnt;
616 __u64 RxP7HdrEgrOvflCnt; 629 __u64 RxP7HdrEgrOvflCnt;
617 __u64 RxP8HdrEgrOvflCnt; 630 __u64 RxP8HdrEgrOvflCnt;
618 __u64 Reserved6; 631 __u64 RxP9HdrEgrOvflCnt; /* was Reserved6 */
619 __u64 Reserved7; 632 __u64 RxP10HdrEgrOvflCnt; /* was Reserved7 */
633 __u64 RxP11HdrEgrOvflCnt; /* new for IBA7220 */
634 __u64 RxP12HdrEgrOvflCnt; /* new for IBA7220 */
635 __u64 RxP13HdrEgrOvflCnt; /* new for IBA7220 */
636 __u64 RxP14HdrEgrOvflCnt; /* new for IBA7220 */
637 __u64 RxP15HdrEgrOvflCnt; /* new for IBA7220 */
638 __u64 RxP16HdrEgrOvflCnt; /* new for IBA7220 */
620 __u64 IBStatusChangeCnt; 639 __u64 IBStatusChangeCnt;
621 __u64 IBLinkErrRecoveryCnt; 640 __u64 IBLinkErrRecoveryCnt;
622 __u64 IBLinkDownedCnt; 641 __u64 IBLinkDownedCnt;
623 __u64 IBSymbolErrCnt; 642 __u64 IBSymbolErrCnt;
643 /* The following are new for IBA7220 */
644 __u64 RxVL15DroppedPktCnt;
645 __u64 RxOtherLocalPhyErrCnt;
646 __u64 PcieRetryBufDiagQwordCnt;
647 __u64 ExcessBufferOvflCnt;
648 __u64 LocalLinkIntegrityErrCnt;
649 __u64 RxVlErrCnt;
650 __u64 RxDlidFltrCnt;
624}; 651};
625 652
626/* 653/*
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index d1380c7a1703..a03bd28d9b48 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -421,7 +421,7 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
421 else 421 else
422 n = head - tail; 422 n = head - tail;
423 if (unlikely((u32)cqe < n)) { 423 if (unlikely((u32)cqe < n)) {
424 ret = -EOVERFLOW; 424 ret = -EINVAL;
425 goto bail_unlock; 425 goto bail_unlock;
426 } 426 }
427 for (n = 0; tail != head; n++) { 427 for (n = 0; tail != head; n++) {
diff --git a/drivers/infiniband/hw/ipath/ipath_debug.h b/drivers/infiniband/hw/ipath/ipath_debug.h
index 19c56e6491eb..d6f69532d83f 100644
--- a/drivers/infiniband/hw/ipath/ipath_debug.h
+++ b/drivers/infiniband/hw/ipath/ipath_debug.h
@@ -55,7 +55,7 @@
55#define __IPATH_PKTDBG 0x80 /* print packet data */ 55#define __IPATH_PKTDBG 0x80 /* print packet data */
56/* print process startup (init)/exit messages */ 56/* print process startup (init)/exit messages */
57#define __IPATH_PROCDBG 0x100 57#define __IPATH_PROCDBG 0x100
58/* print mmap/nopage stuff, not using VDBG any more */ 58/* print mmap/fault stuff, not using VDBG any more */
59#define __IPATH_MMDBG 0x200 59#define __IPATH_MMDBG 0x200
60#define __IPATH_ERRPKTDBG 0x400 60#define __IPATH_ERRPKTDBG 0x400
61#define __IPATH_USER_SEND 0x1000 /* use user mode send */ 61#define __IPATH_USER_SEND 0x1000 /* use user mode send */
@@ -81,7 +81,7 @@
81#define __IPATH_VERBDBG 0x0 /* very verbose debug */ 81#define __IPATH_VERBDBG 0x0 /* very verbose debug */
82#define __IPATH_PKTDBG 0x0 /* print packet data */ 82#define __IPATH_PKTDBG 0x0 /* print packet data */
83#define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */ 83#define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */
84/* print mmap/nopage stuff, not using VDBG any more */ 84/* print mmap/fault stuff, not using VDBG any more */
85#define __IPATH_MMDBG 0x0 85#define __IPATH_MMDBG 0x0
86#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ 86#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
87#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ 87#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 1f152ded1e3c..d5ff6ca2db30 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -121,6 +121,9 @@ static struct pci_driver ipath_driver = {
121 .probe = ipath_init_one, 121 .probe = ipath_init_one,
122 .remove = __devexit_p(ipath_remove_one), 122 .remove = __devexit_p(ipath_remove_one),
123 .id_table = ipath_pci_tbl, 123 .id_table = ipath_pci_tbl,
124 .driver = {
125 .groups = ipath_driver_attr_groups,
126 },
124}; 127};
125 128
126static void ipath_check_status(struct work_struct *work) 129static void ipath_check_status(struct work_struct *work)
@@ -331,6 +334,8 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
331 udelay(1); 334 udelay(1);
332 } 335 }
333 336
337 ipath_disable_armlaunch(dd);
338
334 writeq(0, piobuf); /* length 0, no dwords actually sent */ 339 writeq(0, piobuf); /* length 0, no dwords actually sent */
335 ipath_flush_wc(); 340 ipath_flush_wc();
336 341
@@ -362,6 +367,7 @@ static void ipath_verify_pioperf(struct ipath_devdata *dd)
362done: 367done:
363 /* disarm piobuf, so it's available again */ 368 /* disarm piobuf, so it's available again */
364 ipath_disarm_piobufs(dd, pbnum, 1); 369 ipath_disarm_piobufs(dd, pbnum, 1);
370 ipath_enable_armlaunch(dd);
365} 371}
366 372
367static int __devinit ipath_init_one(struct pci_dev *pdev, 373static int __devinit ipath_init_one(struct pci_dev *pdev,
@@ -800,31 +806,37 @@ void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
800 unsigned cnt) 806 unsigned cnt)
801{ 807{
802 unsigned i, last = first + cnt; 808 unsigned i, last = first + cnt;
803 u64 sendctrl, sendorig; 809 unsigned long flags;
804 810
805 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first); 811 ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
806 sendorig = dd->ipath_sendctrl;
807 for (i = first; i < last; i++) { 812 for (i = first; i < last; i++) {
808 sendctrl = sendorig | INFINIPATH_S_DISARM | 813 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
809 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT); 814 /*
815 * The disarm-related bits are write-only, so it
816 * is ok to OR them in with our copy of sendctrl
817 * while we hold the lock.
818 */
810 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 819 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
811 sendctrl); 820 dd->ipath_sendctrl | INFINIPATH_S_DISARM |
821 (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
822 /* can't disarm bufs back-to-back per iba7220 spec */
823 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
824 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
812 } 825 }
813 826
814 /* 827 /*
815 * Write it again with current value, in case ipath_sendctrl changed 828 * Disable PIOAVAILUPD, then re-enable, reading scratch in
816 * while we were looping; no critical bits that would require
817 * locking.
818 *
819 * disable PIOAVAILUPD, then re-enable, reading scratch in
820 * between. This seems to avoid a chip timing race that causes 829 * between. This seems to avoid a chip timing race that causes
821 * pioavail updates to memory to stop. 830 * pioavail updates to memory to stop. We xor as we don't
831 * know the state of the bit when we're called.
822 */ 832 */
833 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
823 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 834 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
824 sendorig & ~INFINIPATH_S_PIOBUFAVAILUPD); 835 dd->ipath_sendctrl ^ INFINIPATH_S_PIOBUFAVAILUPD);
825 sendorig = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 836 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
826 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 837 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
827 dd->ipath_sendctrl); 838 dd->ipath_sendctrl);
839 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
828} 840}
829 841
830/** 842/**
@@ -1000,12 +1012,10 @@ static void get_rhf_errstring(u32 err, char *msg, size_t len)
1000 * ipath_get_egrbuf - get an eager buffer 1012 * ipath_get_egrbuf - get an eager buffer
1001 * @dd: the infinipath device 1013 * @dd: the infinipath device
1002 * @bufnum: the eager buffer to get 1014 * @bufnum: the eager buffer to get
1003 * @err: unused
1004 * 1015 *
1005 * must only be called if ipath_pd[port] is known to be allocated 1016 * must only be called if ipath_pd[port] is known to be allocated
1006 */ 1017 */
1007static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum, 1018static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
1008 int err)
1009{ 1019{
1010 return dd->ipath_port0_skbinfo ? 1020 return dd->ipath_port0_skbinfo ?
1011 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL; 1021 (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
@@ -1097,13 +1107,14 @@ static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
1097 1107
1098/* 1108/*
1099 * ipath_kreceive - receive a packet 1109 * ipath_kreceive - receive a packet
1100 * @dd: the infinipath device 1110 * @pd: the infinipath port
1101 * 1111 *
1102 * called from interrupt handler for errors or receive interrupt 1112 * called from interrupt handler for errors or receive interrupt
1103 */ 1113 */
1104void ipath_kreceive(struct ipath_devdata *dd) 1114void ipath_kreceive(struct ipath_portdata *pd)
1105{ 1115{
1106 u64 *rc; 1116 u64 *rc;
1117 struct ipath_devdata *dd = pd->port_dd;
1107 void *ebuf; 1118 void *ebuf;
1108 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */ 1119 const u32 rsize = dd->ipath_rcvhdrentsize; /* words */
1109 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */ 1120 const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
@@ -1118,8 +1129,8 @@ void ipath_kreceive(struct ipath_devdata *dd)
1118 goto bail; 1129 goto bail;
1119 } 1130 }
1120 1131
1121 l = dd->ipath_port0head; 1132 l = pd->port_head;
1122 hdrqtail = (u32) le64_to_cpu(*dd->ipath_hdrqtailptr); 1133 hdrqtail = ipath_get_rcvhdrtail(pd);
1123 if (l == hdrqtail) 1134 if (l == hdrqtail)
1124 goto bail; 1135 goto bail;
1125 1136
@@ -1128,7 +1139,7 @@ reloop:
1128 u32 qp; 1139 u32 qp;
1129 u8 *bthbytes; 1140 u8 *bthbytes;
1130 1141
1131 rc = (u64 *) (dd->ipath_pd[0]->port_rcvhdrq + (l << 2)); 1142 rc = (u64 *) (pd->port_rcvhdrq + (l << 2));
1132 hdr = (struct ipath_message_header *)&rc[1]; 1143 hdr = (struct ipath_message_header *)&rc[1];
1133 /* 1144 /*
1134 * could make a network order version of IPATH_KD_QP, and 1145 * could make a network order version of IPATH_KD_QP, and
@@ -1153,7 +1164,7 @@ reloop:
1153 etail = ipath_hdrget_index((__le32 *) rc); 1164 etail = ipath_hdrget_index((__le32 *) rc);
1154 if (tlen > sizeof(*hdr) || 1165 if (tlen > sizeof(*hdr) ||
1155 etype == RCVHQ_RCV_TYPE_NON_KD) 1166 etype == RCVHQ_RCV_TYPE_NON_KD)
1156 ebuf = ipath_get_egrbuf(dd, etail, 0); 1167 ebuf = ipath_get_egrbuf(dd, etail);
1157 } 1168 }
1158 1169
1159 /* 1170 /*
@@ -1188,7 +1199,7 @@ reloop:
1188 be32_to_cpu(hdr->bth[0]) & 0xff); 1199 be32_to_cpu(hdr->bth[0]) & 0xff);
1189 else { 1200 else {
1190 /* 1201 /*
1191 * error packet, type of error unknown. 1202 * error packet, type of error unknown.
1192 * Probably type 3, but we don't know, so don't 1203 * Probably type 3, but we don't know, so don't
1193 * even try to print the opcode, etc. 1204 * even try to print the opcode, etc.
1194 */ 1205 */
@@ -1238,7 +1249,7 @@ reloop:
1238 * earlier packets, we "almost" guarantee we have covered 1249 * earlier packets, we "almost" guarantee we have covered
1239 * that case. 1250 * that case.
1240 */ 1251 */
1241 u32 hqtail = (u32)le64_to_cpu(*dd->ipath_hdrqtailptr); 1252 u32 hqtail = ipath_get_rcvhdrtail(pd);
1242 if (hqtail != hdrqtail) { 1253 if (hqtail != hdrqtail) {
1243 hdrqtail = hqtail; 1254 hdrqtail = hqtail;
1244 reloop = 1; /* loop 1 extra time at most */ 1255 reloop = 1; /* loop 1 extra time at most */
@@ -1248,7 +1259,7 @@ reloop:
1248 1259
1249 pkttot += i; 1260 pkttot += i;
1250 1261
1251 dd->ipath_port0head = l; 1262 pd->port_head = l;
1252 1263
1253 if (pkttot > ipath_stats.sps_maxpkts_call) 1264 if (pkttot > ipath_stats.sps_maxpkts_call)
1254 ipath_stats.sps_maxpkts_call = pkttot; 1265 ipath_stats.sps_maxpkts_call = pkttot;
@@ -1332,14 +1343,9 @@ static void ipath_update_pio_bufs(struct ipath_devdata *dd)
1332 /* 1343 /*
1333 * Chip Errata: bug 6641; even and odd qwords>3 are swapped 1344 * Chip Errata: bug 6641; even and odd qwords>3 are swapped
1334 */ 1345 */
1335 if (i > 3) { 1346 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
1336 if (i & 1) 1347 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
1337 piov = le64_to_cpu( 1348 else
1338 dd->ipath_pioavailregs_dma[i - 1]);
1339 else
1340 piov = le64_to_cpu(
1341 dd->ipath_pioavailregs_dma[i + 1]);
1342 } else
1343 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]); 1349 piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
1344 pchg = _IPATH_ALL_CHECKBITS & 1350 pchg = _IPATH_ALL_CHECKBITS &
1345 ~(dd->ipath_pioavailshadow[i] ^ piov); 1351 ~(dd->ipath_pioavailshadow[i] ^ piov);
@@ -1598,7 +1604,8 @@ int ipath_create_rcvhdrq(struct ipath_devdata *dd,
1598 1604
1599 /* clear for security and sanity on each use */ 1605 /* clear for security and sanity on each use */
1600 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size); 1606 memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
1601 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE); 1607 if (pd->port_rcvhdrtail_kvaddr)
1608 memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1602 1609
1603 /* 1610 /*
1604 * tell chip each time we init it, even if we are re-using previous 1611 * tell chip each time we init it, even if we are re-using previous
@@ -1614,77 +1621,6 @@ bail:
1614 return ret; 1621 return ret;
1615} 1622}
1616 1623
1617int ipath_waitfor_complete(struct ipath_devdata *dd, ipath_kreg reg_id,
1618 u64 bits_to_wait_for, u64 * valp)
1619{
1620 unsigned long timeout;
1621 u64 lastval, val;
1622 int ret;
1623
1624 lastval = ipath_read_kreg64(dd, reg_id);
1625 /* wait a ridiculously long time */
1626 timeout = jiffies + msecs_to_jiffies(5);
1627 do {
1628 val = ipath_read_kreg64(dd, reg_id);
1629 /* set so they have something, even on failures. */
1630 *valp = val;
1631 if ((val & bits_to_wait_for) == bits_to_wait_for) {
1632 ret = 0;
1633 break;
1634 }
1635 if (val != lastval)
1636 ipath_cdbg(VERBOSE, "Changed from %llx to %llx, "
1637 "waiting for %llx bits\n",
1638 (unsigned long long) lastval,
1639 (unsigned long long) val,
1640 (unsigned long long) bits_to_wait_for);
1641 cond_resched();
1642 if (time_after(jiffies, timeout)) {
1643 ipath_dbg("Didn't get bits %llx in register 0x%x, "
1644 "got %llx\n",
1645 (unsigned long long) bits_to_wait_for,
1646 reg_id, (unsigned long long) *valp);
1647 ret = -ENODEV;
1648 break;
1649 }
1650 } while (1);
1651
1652 return ret;
1653}
1654
1655/**
1656 * ipath_waitfor_mdio_cmdready - wait for last command to complete
1657 * @dd: the infinipath device
1658 *
1659 * Like ipath_waitfor_complete(), but we wait for the CMDVALID bit to go
1660 * away indicating the last command has completed. It doesn't return data
1661 */
1662int ipath_waitfor_mdio_cmdready(struct ipath_devdata *dd)
1663{
1664 unsigned long timeout;
1665 u64 val;
1666 int ret;
1667
1668 /* wait a ridiculously long time */
1669 timeout = jiffies + msecs_to_jiffies(5);
1670 do {
1671 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_mdio);
1672 if (!(val & IPATH_MDIO_CMDVALID)) {
1673 ret = 0;
1674 break;
1675 }
1676 cond_resched();
1677 if (time_after(jiffies, timeout)) {
1678 ipath_dbg("CMDVALID stuck in mdio reg? (%llx)\n",
1679 (unsigned long long) val);
1680 ret = -ENODEV;
1681 break;
1682 }
1683 } while (1);
1684
1685 return ret;
1686}
1687
1688 1624
1689/* 1625/*
1690 * Flush all sends that might be in the ready to send state, as well as any 1626 * Flush all sends that might be in the ready to send state, as well as any
@@ -2053,6 +1989,8 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
2053 */ 1989 */
2054void ipath_shutdown_device(struct ipath_devdata *dd) 1990void ipath_shutdown_device(struct ipath_devdata *dd)
2055{ 1991{
1992 unsigned long flags;
1993
2056 ipath_dbg("Shutting down the device\n"); 1994 ipath_dbg("Shutting down the device\n");
2057 1995
2058 dd->ipath_flags |= IPATH_LINKUNK; 1996 dd->ipath_flags |= IPATH_LINKUNK;
@@ -2073,9 +2011,13 @@ void ipath_shutdown_device(struct ipath_devdata *dd)
2073 * gracefully stop all sends allowing any in progress to trickle out 2011 * gracefully stop all sends allowing any in progress to trickle out
2074 * first. 2012 * first.
2075 */ 2013 */
2076 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL); 2014 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2015 dd->ipath_sendctrl = 0;
2016 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2077 /* flush it */ 2017 /* flush it */
2078 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 2018 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2019 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2020
2079 /* 2021 /*
2080 * enough for anything that's going to trickle out to have actually 2022 * enough for anything that's going to trickle out to have actually
2081 * done so. 2023 * done so.
@@ -2217,25 +2159,15 @@ static int __init infinipath_init(void)
2217 goto bail_unit; 2159 goto bail_unit;
2218 } 2160 }
2219 2161
2220 ret = ipath_driver_create_group(&ipath_driver.driver);
2221 if (ret < 0) {
2222 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create driver "
2223 "sysfs entries: error %d\n", -ret);
2224 goto bail_pci;
2225 }
2226
2227 ret = ipath_init_ipathfs(); 2162 ret = ipath_init_ipathfs();
2228 if (ret < 0) { 2163 if (ret < 0) {
2229 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create " 2164 printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
2230 "ipathfs: error %d\n", -ret); 2165 "ipathfs: error %d\n", -ret);
2231 goto bail_group; 2166 goto bail_pci;
2232 } 2167 }
2233 2168
2234 goto bail; 2169 goto bail;
2235 2170
2236bail_group:
2237 ipath_driver_remove_group(&ipath_driver.driver);
2238
2239bail_pci: 2171bail_pci:
2240 pci_unregister_driver(&ipath_driver); 2172 pci_unregister_driver(&ipath_driver);
2241 2173
@@ -2250,8 +2182,6 @@ static void __exit infinipath_cleanup(void)
2250{ 2182{
2251 ipath_exit_ipathfs(); 2183 ipath_exit_ipathfs();
2252 2184
2253 ipath_driver_remove_group(&ipath_driver.driver);
2254
2255 ipath_cdbg(VERBOSE, "Unregistering pci driver\n"); 2185 ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
2256 pci_unregister_driver(&ipath_driver); 2186 pci_unregister_driver(&ipath_driver);
2257 2187
@@ -2344,5 +2274,34 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
2344 } 2274 }
2345 return 0; 2275 return 0;
2346} 2276}
2277
2278/*
2279 * Disable and enable the armlaunch error. Used for PIO bandwidth testing on
2280 * the 7220, which is count-based, rather than trigger-based. Safe for the
2281 * driver check, since it's at init. Not completely safe when used for
2282 * user-mode checking, since some error checking can be lost, but not
2283 * particularly risky, and only has problematic side-effects in the face of
2284 * very buggy user code. There is no reference counting, but that's also
2285 * fine, given the intended use.
2286 */
2287void ipath_enable_armlaunch(struct ipath_devdata *dd)
2288{
2289 dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
2290 ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
2291 INFINIPATH_E_SPIOARMLAUNCH);
2292 dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
2293 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2294 dd->ipath_errormask);
2295}
2296
2297void ipath_disable_armlaunch(struct ipath_devdata *dd)
2298{
2299 /* so don't re-enable if already set */
2300 dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
2301 dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
2302 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
2303 dd->ipath_errormask);
2304}
2305
2347module_init(infinipath_init); 2306module_init(infinipath_init);
2348module_exit(infinipath_cleanup); 2307module_exit(infinipath_cleanup);
diff --git a/drivers/infiniband/hw/ipath/ipath_eeprom.c b/drivers/infiniband/hw/ipath/ipath_eeprom.c
index e7c25dbbcdc9..e28a42f53769 100644
--- a/drivers/infiniband/hw/ipath/ipath_eeprom.c
+++ b/drivers/infiniband/hw/ipath/ipath_eeprom.c
@@ -510,10 +510,10 @@ int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
510{ 510{
511 int ret; 511 int ret;
512 512
513 ret = down_interruptible(&dd->ipath_eep_sem); 513 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
514 if (!ret) { 514 if (!ret) {
515 ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len); 515 ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
516 up(&dd->ipath_eep_sem); 516 mutex_unlock(&dd->ipath_eep_lock);
517 } 517 }
518 518
519 return ret; 519 return ret;
@@ -524,10 +524,10 @@ int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
524{ 524{
525 int ret; 525 int ret;
526 526
527 ret = down_interruptible(&dd->ipath_eep_sem); 527 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
528 if (!ret) { 528 if (!ret) {
529 ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len); 529 ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
530 up(&dd->ipath_eep_sem); 530 mutex_unlock(&dd->ipath_eep_lock);
531 } 531 }
532 532
533 return ret; 533 return ret;
@@ -574,7 +574,7 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
574 struct ipath_devdata *dd0 = ipath_lookup(0); 574 struct ipath_devdata *dd0 = ipath_lookup(0);
575 575
576 if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) { 576 if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
577 u8 *bguid, oguid; 577 u8 oguid;
578 dd->ipath_guid = dd0->ipath_guid; 578 dd->ipath_guid = dd0->ipath_guid;
579 bguid = (u8 *) & dd->ipath_guid; 579 bguid = (u8 *) & dd->ipath_guid;
580 580
@@ -616,9 +616,9 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
616 goto bail; 616 goto bail;
617 } 617 }
618 618
619 down(&dd->ipath_eep_sem); 619 mutex_lock(&dd->ipath_eep_lock);
620 eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len); 620 eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
621 up(&dd->ipath_eep_sem); 621 mutex_unlock(&dd->ipath_eep_lock);
622 622
623 if (eep_stat) { 623 if (eep_stat) {
624 ipath_dev_err(dd, "Failed reading GUID from eeprom\n"); 624 ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
@@ -674,7 +674,6 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
674 * elsewhere for backward-compatibility. 674 * elsewhere for backward-compatibility.
675 */ 675 */
676 char *snp = dd->ipath_serial; 676 char *snp = dd->ipath_serial;
677 int len;
678 memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); 677 memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
679 snp[sizeof ifp->if_sprefix] = '\0'; 678 snp[sizeof ifp->if_sprefix] = '\0';
680 len = strlen(snp); 679 len = strlen(snp);
@@ -764,14 +763,14 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
764 /* Grab semaphore and read current EEPROM. If we get an 763 /* Grab semaphore and read current EEPROM. If we get an
765 * error, let go, but if not, keep it until we finish write. 764 * error, let go, but if not, keep it until we finish write.
766 */ 765 */
767 ret = down_interruptible(&dd->ipath_eep_sem); 766 ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
768 if (ret) { 767 if (ret) {
769 ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n"); 768 ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
770 goto free_bail; 769 goto free_bail;
771 } 770 }
772 ret = ipath_eeprom_internal_read(dd, 0, buf, len); 771 ret = ipath_eeprom_internal_read(dd, 0, buf, len);
773 if (ret) { 772 if (ret) {
774 up(&dd->ipath_eep_sem); 773 mutex_unlock(&dd->ipath_eep_lock);
775 ipath_dev_err(dd, "Unable read EEPROM for logging\n"); 774 ipath_dev_err(dd, "Unable read EEPROM for logging\n");
776 goto free_bail; 775 goto free_bail;
777 } 776 }
@@ -779,7 +778,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
779 778
780 csum = flash_csum(ifp, 0); 779 csum = flash_csum(ifp, 0);
781 if (csum != ifp->if_csum) { 780 if (csum != ifp->if_csum) {
782 up(&dd->ipath_eep_sem); 781 mutex_unlock(&dd->ipath_eep_lock);
783 ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", 782 ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
784 csum, ifp->if_csum); 783 csum, ifp->if_csum);
785 ret = 1; 784 ret = 1;
@@ -849,7 +848,7 @@ int ipath_update_eeprom_log(struct ipath_devdata *dd)
849 csum = flash_csum(ifp, 1); 848 csum = flash_csum(ifp, 1);
850 ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1); 849 ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
851 } 850 }
852 up(&dd->ipath_eep_sem); 851 mutex_unlock(&dd->ipath_eep_lock);
853 if (ret) 852 if (ret)
854 ipath_dev_err(dd, "Failed updating EEPROM\n"); 853 ipath_dev_err(dd, "Failed updating EEPROM\n");
855 854
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 5de3243a47c3..7e025c8e01b6 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -169,7 +169,7 @@ static int ipath_get_base_info(struct file *fp,
169 kinfo->spi_piocnt = dd->ipath_pbufsport; 169 kinfo->spi_piocnt = dd->ipath_pbufsport;
170 kinfo->spi_piobufbase = (u64) pd->port_piobufs; 170 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
171 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + 171 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
172 dd->ipath_palign * pd->port_port; 172 dd->ipath_ureg_align * pd->port_port;
173 } else if (master) { 173 } else if (master) {
174 kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) + 174 kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) +
175 (dd->ipath_pbufsport % subport_cnt); 175 (dd->ipath_pbufsport % subport_cnt);
@@ -186,7 +186,7 @@ static int ipath_get_base_info(struct file *fp,
186 } 186 }
187 if (shared) { 187 if (shared) {
188 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + 188 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
189 dd->ipath_palign * pd->port_port; 189 dd->ipath_ureg_align * pd->port_port;
190 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; 190 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
191 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; 191 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
192 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; 192 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
@@ -742,11 +742,12 @@ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
742 * updated and correct itself, even in the face of software 742 * updated and correct itself, even in the face of software
743 * bugs. 743 * bugs.
744 */ 744 */
745 *(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0; 745 if (pd->port_rcvhdrtail_kvaddr)
746 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, 746 ipath_clear_rcvhdrtail(pd);
747 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
747 &dd->ipath_rcvctrl); 748 &dd->ipath_rcvctrl);
748 } else 749 } else
749 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, 750 clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
750 &dd->ipath_rcvctrl); 751 &dd->ipath_rcvctrl);
751 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 752 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
752 dd->ipath_rcvctrl); 753 dd->ipath_rcvctrl);
@@ -881,7 +882,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
881 882
882 egrcnt = dd->ipath_rcvegrcnt; 883 egrcnt = dd->ipath_rcvegrcnt;
883 /* TID number offset for this port */ 884 /* TID number offset for this port */
884 egroff = pd->port_port * egrcnt; 885 egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
885 egrsize = dd->ipath_rcvegrbufsize; 886 egrsize = dd->ipath_rcvegrbufsize;
886 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " 887 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
887 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); 888 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
@@ -1049,11 +1050,6 @@ static int mmap_piobufs(struct vm_area_struct *vma,
1049 1050
1050 phys = dd->ipath_physaddr + piobufs; 1051 phys = dd->ipath_physaddr + piobufs;
1051 1052
1052 /*
1053 * Don't mark this as non-cached, or we don't get the
1054 * write combining behavior we want on the PIO buffers!
1055 */
1056
1057#if defined(__powerpc__) 1053#if defined(__powerpc__)
1058 /* There isn't a generic way to specify writethrough mappings */ 1054 /* There isn't a generic way to specify writethrough mappings */
1059 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 1055 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
@@ -1120,33 +1116,24 @@ bail:
1120} 1116}
1121 1117
1122/* 1118/*
1123 * ipath_file_vma_nopage - handle a VMA page fault. 1119 * ipath_file_vma_fault - handle a VMA page fault.
1124 */ 1120 */
1125static struct page *ipath_file_vma_nopage(struct vm_area_struct *vma, 1121static int ipath_file_vma_fault(struct vm_area_struct *vma,
1126 unsigned long address, int *type) 1122 struct vm_fault *vmf)
1127{ 1123{
1128 unsigned long offset = address - vma->vm_start; 1124 struct page *page;
1129 struct page *page = NOPAGE_SIGBUS;
1130 void *pageptr;
1131 1125
1132 /* 1126 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
1133 * Convert the vmalloc address into a struct page.
1134 */
1135 pageptr = (void *)(offset + (vma->vm_pgoff << PAGE_SHIFT));
1136 page = vmalloc_to_page(pageptr);
1137 if (!page) 1127 if (!page)
1138 goto out; 1128 return VM_FAULT_SIGBUS;
1139
1140 /* Increment the reference count. */
1141 get_page(page); 1129 get_page(page);
1142 if (type) 1130 vmf->page = page;
1143 *type = VM_FAULT_MINOR; 1131
1144out: 1132 return 0;
1145 return page;
1146} 1133}
1147 1134
1148static struct vm_operations_struct ipath_file_vm_ops = { 1135static struct vm_operations_struct ipath_file_vm_ops = {
1149 .nopage = ipath_file_vma_nopage, 1136 .fault = ipath_file_vma_fault,
1150}; 1137};
1151 1138
1152static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, 1139static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
@@ -1284,7 +1271,7 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1284 goto bail; 1271 goto bail;
1285 } 1272 }
1286 1273
1287 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; 1274 ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
1288 if (!pd->port_subport_cnt) { 1275 if (!pd->port_subport_cnt) {
1289 /* port is not shared */ 1276 /* port is not shared */
1290 piocnt = dd->ipath_pbufsport; 1277 piocnt = dd->ipath_pbufsport;
@@ -1400,7 +1387,10 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd,
1400 pollflag = ipath_poll_hdrqfull(pd); 1387 pollflag = ipath_poll_hdrqfull(pd);
1401 1388
1402 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); 1389 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
1403 tail = *(volatile u64 *)pd->port_rcvhdrtail_kvaddr; 1390 if (pd->port_rcvhdrtail_kvaddr)
1391 tail = ipath_get_rcvhdrtail(pd);
1392 else
1393 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
1404 1394
1405 if (head != tail) 1395 if (head != tail)
1406 pollflag |= POLLIN | POLLRDNORM; 1396 pollflag |= POLLIN | POLLRDNORM;
@@ -1410,7 +1400,7 @@ static unsigned int ipath_poll_next(struct ipath_portdata *pd,
1410 /* flush waiting flag so we don't miss an event */ 1400 /* flush waiting flag so we don't miss an event */
1411 wmb(); 1401 wmb();
1412 1402
1413 set_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT, 1403 set_bit(pd->port_port + dd->ipath_r_intravail_shift,
1414 &dd->ipath_rcvctrl); 1404 &dd->ipath_rcvctrl);
1415 1405
1416 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1406 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
@@ -1790,6 +1780,7 @@ static int find_shared_port(struct file *fp,
1790 } 1780 }
1791 port_fp(fp) = pd; 1781 port_fp(fp) = pd;
1792 subport_fp(fp) = pd->port_cnt++; 1782 subport_fp(fp) = pd->port_cnt++;
1783 pd->port_subpid[subport_fp(fp)] = current->pid;
1793 tidcursor_fp(fp) = 0; 1784 tidcursor_fp(fp) = 0;
1794 pd->active_slaves |= 1 << subport_fp(fp); 1785 pd->active_slaves |= 1 << subport_fp(fp);
1795 ipath_cdbg(PROC, 1786 ipath_cdbg(PROC,
@@ -1920,8 +1911,7 @@ static int ipath_do_user_init(struct file *fp,
1920 */ 1911 */
1921 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); 1912 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
1922 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); 1913 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
1923 dd->ipath_lastegrheads[pd->port_port] = -1; 1914 pd->port_lastrcvhdrqtail = -1;
1924 dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
1925 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", 1915 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
1926 pd->port_port, head32); 1916 pd->port_port, head32);
1927 pd->port_tidcursor = 0; /* start at beginning after open */ 1917 pd->port_tidcursor = 0; /* start at beginning after open */
@@ -1941,11 +1931,13 @@ static int ipath_do_user_init(struct file *fp,
1941 * We explictly set the in-memory copy to 0 beforehand, so we don't 1931 * We explictly set the in-memory copy to 0 beforehand, so we don't
1942 * have to wait to be sure the DMA update has happened. 1932 * have to wait to be sure the DMA update has happened.
1943 */ 1933 */
1944 *(volatile u64 *)pd->port_rcvhdrtail_kvaddr = 0ULL; 1934 if (pd->port_rcvhdrtail_kvaddr)
1945 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, 1935 ipath_clear_rcvhdrtail(pd);
1936 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
1946 &dd->ipath_rcvctrl); 1937 &dd->ipath_rcvctrl);
1947 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1938 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1948 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); 1939 dd->ipath_rcvctrl &
1940 ~(1ULL << dd->ipath_r_tailupd_shift));
1949 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 1941 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1950 dd->ipath_rcvctrl); 1942 dd->ipath_rcvctrl);
1951 /* Notify any waiting slaves */ 1943 /* Notify any waiting slaves */
@@ -2022,6 +2014,7 @@ static int ipath_close(struct inode *in, struct file *fp)
2022 * the slave(s) don't wait for receive data forever. 2014 * the slave(s) don't wait for receive data forever.
2023 */ 2015 */
2024 pd->active_slaves &= ~(1 << fd->subport); 2016 pd->active_slaves &= ~(1 << fd->subport);
2017 pd->port_subpid[fd->subport] = 0;
2025 mutex_unlock(&ipath_mutex); 2018 mutex_unlock(&ipath_mutex);
2026 goto bail; 2019 goto bail;
2027 } 2020 }
@@ -2054,9 +2047,9 @@ static int ipath_close(struct inode *in, struct file *fp)
2054 if (dd->ipath_kregbase) { 2047 if (dd->ipath_kregbase) {
2055 int i; 2048 int i;
2056 /* atomically clear receive enable port and intr avail. */ 2049 /* atomically clear receive enable port and intr avail. */
2057 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port, 2050 clear_bit(dd->ipath_r_portenable_shift + port,
2058 &dd->ipath_rcvctrl); 2051 &dd->ipath_rcvctrl);
2059 clear_bit(pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT, 2052 clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
2060 &dd->ipath_rcvctrl); 2053 &dd->ipath_rcvctrl);
2061 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl, 2054 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
2062 dd->ipath_rcvctrl); 2055 dd->ipath_rcvctrl);
@@ -2149,11 +2142,15 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
2149 2142
2150static int ipath_force_pio_avail_update(struct ipath_devdata *dd) 2143static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
2151{ 2144{
2152 u64 reg = dd->ipath_sendctrl; 2145 unsigned long flags;
2153 2146
2154 clear_bit(IPATH_S_PIOBUFAVAILUPD, &reg); 2147 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2155 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg); 2148 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
2149 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
2150 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2156 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); 2151 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2152 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2153 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2157 2154
2158 return 0; 2155 return 0;
2159} 2156}
@@ -2227,6 +2224,11 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2227 dest = &cmd.cmd.poll_type; 2224 dest = &cmd.cmd.poll_type;
2228 src = &ucmd->cmd.poll_type; 2225 src = &ucmd->cmd.poll_type;
2229 break; 2226 break;
2227 case IPATH_CMD_ARMLAUNCH_CTRL:
2228 copy = sizeof(cmd.cmd.armlaunch_ctrl);
2229 dest = &cmd.cmd.armlaunch_ctrl;
2230 src = &ucmd->cmd.armlaunch_ctrl;
2231 break;
2230 default: 2232 default:
2231 ret = -EINVAL; 2233 ret = -EINVAL;
2232 goto bail; 2234 goto bail;
@@ -2302,6 +2304,12 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
2302 case IPATH_CMD_POLL_TYPE: 2304 case IPATH_CMD_POLL_TYPE:
2303 pd->poll_type = cmd.cmd.poll_type; 2305 pd->poll_type = cmd.cmd.poll_type;
2304 break; 2306 break;
2307 case IPATH_CMD_ARMLAUNCH_CTRL:
2308 if (cmd.cmd.armlaunch_ctrl)
2309 ipath_enable_armlaunch(pd->port_dd);
2310 else
2311 ipath_disable_armlaunch(pd->port_dd);
2312 break;
2305 } 2313 }
2306 2314
2307 if (ret >= 0) 2315 if (ret >= 0)
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 262c25db05cd..23faba9d21eb 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -108,21 +108,16 @@ static const struct file_operations atomic_stats_ops = {
108 .read = atomic_stats_read, 108 .read = atomic_stats_read,
109}; 109};
110 110
111#define NUM_COUNTERS sizeof(struct infinipath_counters) / sizeof(u64)
112
113static ssize_t atomic_counters_read(struct file *file, char __user *buf, 111static ssize_t atomic_counters_read(struct file *file, char __user *buf,
114 size_t count, loff_t *ppos) 112 size_t count, loff_t *ppos)
115{ 113{
116 u64 counters[NUM_COUNTERS]; 114 struct infinipath_counters counters;
117 u16 i;
118 struct ipath_devdata *dd; 115 struct ipath_devdata *dd;
119 116
120 dd = file->f_path.dentry->d_inode->i_private; 117 dd = file->f_path.dentry->d_inode->i_private;
118 dd->ipath_f_read_counters(dd, &counters);
121 119
122 for (i = 0; i < NUM_COUNTERS; i++) 120 return simple_read_from_buffer(buf, count, ppos, &counters,
123 counters[i] = ipath_snap_cntr(dd, i);
124
125 return simple_read_from_buffer(buf, count, ppos, counters,
126 sizeof counters); 121 sizeof counters);
127} 122}
128 123
@@ -243,8 +238,7 @@ static int create_device_files(struct super_block *sb,
243 238
244 snprintf(unit, sizeof unit, "%02d", dd->ipath_unit); 239 snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
245 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, 240 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
246 (struct file_operations *) &simple_dir_operations, 241 &simple_dir_operations, dd);
247 dd);
248 if (ret) { 242 if (ret) {
249 printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret); 243 printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
250 goto bail; 244 goto bail;
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6110.c b/drivers/infiniband/hw/ipath/ipath_iba6110.c
index ddbebe4bdb27..9e2ced3cdc5e 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6110.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6110.c
@@ -148,10 +148,57 @@ struct _infinipath_do_not_use_kernel_regs {
148 unsigned long long ReservedSW2[4]; 148 unsigned long long ReservedSW2[4];
149}; 149};
150 150
151#define IPATH_KREG_OFFSET(field) (offsetof(struct \ 151struct _infinipath_do_not_use_counters {
152 _infinipath_do_not_use_kernel_regs, field) / sizeof(u64)) 152 __u64 LBIntCnt;
153 __u64 LBFlowStallCnt;
154 __u64 Reserved1;
155 __u64 TxUnsupVLErrCnt;
156 __u64 TxDataPktCnt;
157 __u64 TxFlowPktCnt;
158 __u64 TxDwordCnt;
159 __u64 TxLenErrCnt;
160 __u64 TxMaxMinLenErrCnt;
161 __u64 TxUnderrunCnt;
162 __u64 TxFlowStallCnt;
163 __u64 TxDroppedPktCnt;
164 __u64 RxDroppedPktCnt;
165 __u64 RxDataPktCnt;
166 __u64 RxFlowPktCnt;
167 __u64 RxDwordCnt;
168 __u64 RxLenErrCnt;
169 __u64 RxMaxMinLenErrCnt;
170 __u64 RxICRCErrCnt;
171 __u64 RxVCRCErrCnt;
172 __u64 RxFlowCtrlErrCnt;
173 __u64 RxBadFormatCnt;
174 __u64 RxLinkProblemCnt;
175 __u64 RxEBPCnt;
176 __u64 RxLPCRCErrCnt;
177 __u64 RxBufOvflCnt;
178 __u64 RxTIDFullErrCnt;
179 __u64 RxTIDValidErrCnt;
180 __u64 RxPKeyMismatchCnt;
181 __u64 RxP0HdrEgrOvflCnt;
182 __u64 RxP1HdrEgrOvflCnt;
183 __u64 RxP2HdrEgrOvflCnt;
184 __u64 RxP3HdrEgrOvflCnt;
185 __u64 RxP4HdrEgrOvflCnt;
186 __u64 RxP5HdrEgrOvflCnt;
187 __u64 RxP6HdrEgrOvflCnt;
188 __u64 RxP7HdrEgrOvflCnt;
189 __u64 RxP8HdrEgrOvflCnt;
190 __u64 Reserved6;
191 __u64 Reserved7;
192 __u64 IBStatusChangeCnt;
193 __u64 IBLinkErrRecoveryCnt;
194 __u64 IBLinkDownedCnt;
195 __u64 IBSymbolErrCnt;
196};
197
198#define IPATH_KREG_OFFSET(field) (offsetof( \
199 struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
153#define IPATH_CREG_OFFSET(field) (offsetof( \ 200#define IPATH_CREG_OFFSET(field) (offsetof( \
154 struct infinipath_counters, field) / sizeof(u64)) 201 struct _infinipath_do_not_use_counters, field) / sizeof(u64))
155 202
156static const struct ipath_kregs ipath_ht_kregs = { 203static const struct ipath_kregs ipath_ht_kregs = {
157 .kr_control = IPATH_KREG_OFFSET(Control), 204 .kr_control = IPATH_KREG_OFFSET(Control),
@@ -282,6 +329,9 @@ static const struct ipath_cregs ipath_ht_cregs = {
282#define INFINIPATH_HWE_HTAPLL_RFSLIP 0x1000000000000000ULL 329#define INFINIPATH_HWE_HTAPLL_RFSLIP 0x1000000000000000ULL
283#define INFINIPATH_HWE_SERDESPLLFAILED 0x2000000000000000ULL 330#define INFINIPATH_HWE_SERDESPLLFAILED 0x2000000000000000ULL
284 331
332#define IBA6110_IBCS_LINKTRAININGSTATE_MASK 0xf
333#define IBA6110_IBCS_LINKSTATE_SHIFT 4
334
285/* kr_extstatus bits */ 335/* kr_extstatus bits */
286#define INFINIPATH_EXTS_FREQSEL 0x2 336#define INFINIPATH_EXTS_FREQSEL 0x2
287#define INFINIPATH_EXTS_SERDESSEL 0x4 337#define INFINIPATH_EXTS_SERDESSEL 0x4
@@ -296,6 +346,12 @@ static const struct ipath_cregs ipath_ht_cregs = {
296#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL 346#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
297#define INFINIPATH_RT_BUFSIZE_SHIFT 48 347#define INFINIPATH_RT_BUFSIZE_SHIFT 48
298 348
349#define INFINIPATH_R_INTRAVAIL_SHIFT 16
350#define INFINIPATH_R_TAILUPD_SHIFT 31
351
352/* kr_xgxsconfig bits */
353#define INFINIPATH_XGXS_RESET 0x7ULL
354
299/* 355/*
300 * masks and bits that are different in different chips, or present only 356 * masks and bits that are different in different chips, or present only
301 * in one 357 * in one
@@ -652,7 +708,6 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
652 "with ID %u\n", boardrev); 708 "with ID %u\n", boardrev);
653 snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u", 709 snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
654 boardrev); 710 boardrev);
655 ret = 1;
656 break; 711 break;
657 } 712 }
658 if (n) 713 if (n)
@@ -686,6 +741,13 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
686 dd->ipath_htspeed); 741 dd->ipath_htspeed);
687 ret = 0; 742 ret = 0;
688 743
744 /*
745 * set here, not in ipath_init_*_funcs because we have to do
746 * it after we can read chip registers.
747 */
748 dd->ipath_ureg_align =
749 ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
750
689bail: 751bail:
690 return ret; 752 return ret;
691} 753}
@@ -969,7 +1031,8 @@ static int ipath_setup_ht_config(struct ipath_devdata *dd,
969 do { 1031 do {
970 u8 cap_type; 1032 u8 cap_type;
971 1033
972 /* the HT capability type byte is 3 bytes after the 1034 /*
1035 * The HT capability type byte is 3 bytes after the
973 * capability byte. 1036 * capability byte.
974 */ 1037 */
975 if (pci_read_config_byte(pdev, pos + 3, &cap_type)) { 1038 if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
@@ -982,6 +1045,8 @@ static int ipath_setup_ht_config(struct ipath_devdata *dd,
982 } while ((pos = pci_find_next_capability(pdev, pos, 1045 } while ((pos = pci_find_next_capability(pdev, pos,
983 PCI_CAP_ID_HT))); 1046 PCI_CAP_ID_HT)));
984 1047
1048 dd->ipath_flags |= IPATH_SWAP_PIOBUFS;
1049
985bail: 1050bail:
986 return ret; 1051 return ret;
987} 1052}
@@ -1074,11 +1139,55 @@ static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
1074 1139
1075static void ipath_init_ht_variables(struct ipath_devdata *dd) 1140static void ipath_init_ht_variables(struct ipath_devdata *dd)
1076{ 1141{
1142 /*
1143 * setup the register offsets, since they are different for each
1144 * chip
1145 */
1146 dd->ipath_kregs = &ipath_ht_kregs;
1147 dd->ipath_cregs = &ipath_ht_cregs;
1148
1077 dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; 1149 dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
1078 dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; 1150 dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
1079 dd->ipath_gpio_sda = IPATH_GPIO_SDA; 1151 dd->ipath_gpio_sda = IPATH_GPIO_SDA;
1080 dd->ipath_gpio_scl = IPATH_GPIO_SCL; 1152 dd->ipath_gpio_scl = IPATH_GPIO_SCL;
1081 1153
1154 /*
1155 * Fill in data for field-values that change in newer chips.
1156 * We dynamically specify only the mask for LINKTRAININGSTATE
1157 * and only the shift for LINKSTATE, as they are the only ones
1158 * that change. Also precalculate the 3 link states of interest
1159 * and the combined mask.
1160 */
1161 dd->ibcs_ls_shift = IBA6110_IBCS_LINKSTATE_SHIFT;
1162 dd->ibcs_lts_mask = IBA6110_IBCS_LINKTRAININGSTATE_MASK;
1163 dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
1164 dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
1165 dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1166 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1167 (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
1168 dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1169 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1170 (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
1171 dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
1172 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
1173 (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
1174
1175 /*
1176 * Fill in data for ibcc field-values that change in newer chips.
1177 * We dynamically specify only the mask for LINKINITCMD
1178 * and only the shift for LINKCMD and MAXPKTLEN, as they are
1179 * the only ones that change.
1180 */
1181 dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
1182 dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
1183 dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
1184
1185 /* Fill in shifts for RcvCtrl. */
1186 dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
1187 dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
1188 dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
1189 dd->ipath_r_portcfg_shift = 0; /* Not on IBA6110 */
1190
1082 dd->ipath_i_bitsextant = 1191 dd->ipath_i_bitsextant =
1083 (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | 1192 (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
1084 (INFINIPATH_I_RCVAVAIL_MASK << 1193 (INFINIPATH_I_RCVAVAIL_MASK <<
@@ -1135,6 +1244,8 @@ static void ipath_init_ht_variables(struct ipath_devdata *dd)
1135 1244
1136 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; 1245 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
1137 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; 1246 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
1247 dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
1248 dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
1138 1249
1139 /* 1250 /*
1140 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. 1251 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
@@ -1148,9 +1259,17 @@ static void ipath_init_ht_variables(struct ipath_devdata *dd)
1148 INFINIPATH_HWE_RXEMEMPARITYERR_MASK << 1259 INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
1149 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT; 1260 INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
1150 1261
1151 dd->ipath_eep_st_masks[2].errs_to_log = 1262 dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
1152 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
1153 1263
1264 dd->delay_mult = 2; /* SDR, 4X, can't change */
1265
1266 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
1267 dd->ipath_link_speed_supported = IPATH_IB_SDR;
1268 dd->ipath_link_width_enabled = IB_WIDTH_4X;
1269 dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
1270 /* these can't change for this chip, so set once */
1271 dd->ipath_link_width_active = dd->ipath_link_width_enabled;
1272 dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
1154} 1273}
1155 1274
1156/** 1275/**
@@ -1205,14 +1324,16 @@ static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
1205 val &= ~INFINIPATH_HWE_HTCMISCERR4; 1324 val &= ~INFINIPATH_HWE_HTCMISCERR4;
1206 1325
1207 /* 1326 /*
1208 * PLL ignored because MDIO interface has a logic problem 1327 * PLL ignored because unused MDIO interface has a logic problem
1209 * for reads, on Comstock and Ponderosa. BRINGUP
1210 */ 1328 */
1211 if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9) 1329 if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
1212 val &= ~INFINIPATH_HWE_SERDESPLLFAILED; 1330 val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
1213 dd->ipath_hwerrmask = val; 1331 dd->ipath_hwerrmask = val;
1214} 1332}
1215 1333
1334
1335
1336
1216/** 1337/**
1217 * ipath_ht_bringup_serdes - bring up the serdes 1338 * ipath_ht_bringup_serdes - bring up the serdes
1218 * @dd: the infinipath device 1339 * @dd: the infinipath device
@@ -1284,16 +1405,6 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
1284 } 1405 }
1285 1406
1286 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); 1407 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1287 if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) &
1288 INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
1289 val &= ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
1290 INFINIPATH_XGXS_MDIOADDR_SHIFT);
1291 /*
1292 * we use address 3
1293 */
1294 val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
1295 change = 1;
1296 }
1297 if (val & INFINIPATH_XGXS_RESET) { 1408 if (val & INFINIPATH_XGXS_RESET) {
1298 /* normally true after boot */ 1409 /* normally true after boot */
1299 val &= ~INFINIPATH_XGXS_RESET; 1410 val &= ~INFINIPATH_XGXS_RESET;
@@ -1329,21 +1440,6 @@ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
1329 (unsigned long long) 1440 (unsigned long long)
1330 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); 1441 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
1331 1442
1332 if (!ipath_waitfor_mdio_cmdready(dd)) {
1333 ipath_write_kreg(dd, dd->ipath_kregs->kr_mdio,
1334 ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
1335 IPATH_MDIO_CTRL_XGXS_REG_8,
1336 0));
1337 if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
1338 IPATH_MDIO_DATAVALID, &val))
1339 ipath_dbg("Never got MDIO data for XGXS status "
1340 "read\n");
1341 else
1342 ipath_cdbg(VERBOSE, "MDIO Read reg8, "
1343 "'bank' 31 %x\n", (u32) val);
1344 } else
1345 ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
1346
1347 return ret; /* for now, say we always succeeded */ 1443 return ret; /* for now, say we always succeeded */
1348} 1444}
1349 1445
@@ -1396,6 +1492,7 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd,
1396 pa |= lenvalid | INFINIPATH_RT_VALID; 1492 pa |= lenvalid | INFINIPATH_RT_VALID;
1397 } 1493 }
1398 } 1494 }
1495
1399 writeq(pa, tidptr); 1496 writeq(pa, tidptr);
1400} 1497}
1401 1498
@@ -1526,8 +1623,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
1526 } 1623 }
1527 1624
1528 ipath_get_eeprom_info(dd); 1625 ipath_get_eeprom_info(dd);
1529 if (dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' && 1626 if (dd->ipath_boardrev == 5) {
1530 dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
1531 /* 1627 /*
1532 * Later production QHT7040 has same changes as QHT7140, so 1628 * Later production QHT7040 has same changes as QHT7140, so
1533 * can use GPIO interrupts. They have serial #'s starting 1629 * can use GPIO interrupts. They have serial #'s starting
@@ -1602,6 +1698,210 @@ static void ipath_ht_free_irq(struct ipath_devdata *dd)
1602 dd->ipath_intconfig = 0; 1698 dd->ipath_intconfig = 0;
1603} 1699}
1604 1700
1701static struct ipath_message_header *
1702ipath_ht_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
1703{
1704 return (struct ipath_message_header *)
1705 &rhf_addr[sizeof(u64) / sizeof(u32)];
1706}
1707
1708static void ipath_ht_config_ports(struct ipath_devdata *dd, ushort cfgports)
1709{
1710 dd->ipath_portcnt =
1711 ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
1712 dd->ipath_p0_rcvegrcnt =
1713 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
1714}
1715
1716static void ipath_ht_read_counters(struct ipath_devdata *dd,
1717 struct infinipath_counters *cntrs)
1718{
1719 cntrs->LBIntCnt =
1720 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
1721 cntrs->LBFlowStallCnt =
1722 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
1723 cntrs->TxSDmaDescCnt = 0;
1724 cntrs->TxUnsupVLErrCnt =
1725 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
1726 cntrs->TxDataPktCnt =
1727 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
1728 cntrs->TxFlowPktCnt =
1729 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
1730 cntrs->TxDwordCnt =
1731 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
1732 cntrs->TxLenErrCnt =
1733 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
1734 cntrs->TxMaxMinLenErrCnt =
1735 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
1736 cntrs->TxUnderrunCnt =
1737 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
1738 cntrs->TxFlowStallCnt =
1739 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
1740 cntrs->TxDroppedPktCnt =
1741 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
1742 cntrs->RxDroppedPktCnt =
1743 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
1744 cntrs->RxDataPktCnt =
1745 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
1746 cntrs->RxFlowPktCnt =
1747 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
1748 cntrs->RxDwordCnt =
1749 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
1750 cntrs->RxLenErrCnt =
1751 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
1752 cntrs->RxMaxMinLenErrCnt =
1753 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
1754 cntrs->RxICRCErrCnt =
1755 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
1756 cntrs->RxVCRCErrCnt =
1757 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
1758 cntrs->RxFlowCtrlErrCnt =
1759 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
1760 cntrs->RxBadFormatCnt =
1761 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
1762 cntrs->RxLinkProblemCnt =
1763 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
1764 cntrs->RxEBPCnt =
1765 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
1766 cntrs->RxLPCRCErrCnt =
1767 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
1768 cntrs->RxBufOvflCnt =
1769 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
1770 cntrs->RxTIDFullErrCnt =
1771 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
1772 cntrs->RxTIDValidErrCnt =
1773 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
1774 cntrs->RxPKeyMismatchCnt =
1775 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
1776 cntrs->RxP0HdrEgrOvflCnt =
1777 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
1778 cntrs->RxP1HdrEgrOvflCnt =
1779 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
1780 cntrs->RxP2HdrEgrOvflCnt =
1781 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
1782 cntrs->RxP3HdrEgrOvflCnt =
1783 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
1784 cntrs->RxP4HdrEgrOvflCnt =
1785 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
1786 cntrs->RxP5HdrEgrOvflCnt =
1787 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP5HdrEgrOvflCnt));
1788 cntrs->RxP6HdrEgrOvflCnt =
1789 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP6HdrEgrOvflCnt));
1790 cntrs->RxP7HdrEgrOvflCnt =
1791 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP7HdrEgrOvflCnt));
1792 cntrs->RxP8HdrEgrOvflCnt =
1793 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP8HdrEgrOvflCnt));
1794 cntrs->RxP9HdrEgrOvflCnt = 0;
1795 cntrs->RxP10HdrEgrOvflCnt = 0;
1796 cntrs->RxP11HdrEgrOvflCnt = 0;
1797 cntrs->RxP12HdrEgrOvflCnt = 0;
1798 cntrs->RxP13HdrEgrOvflCnt = 0;
1799 cntrs->RxP14HdrEgrOvflCnt = 0;
1800 cntrs->RxP15HdrEgrOvflCnt = 0;
1801 cntrs->RxP16HdrEgrOvflCnt = 0;
1802 cntrs->IBStatusChangeCnt =
1803 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
1804 cntrs->IBLinkErrRecoveryCnt =
1805 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
1806 cntrs->IBLinkDownedCnt =
1807 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
1808 cntrs->IBSymbolErrCnt =
1809 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
1810 cntrs->RxVL15DroppedPktCnt = 0;
1811 cntrs->RxOtherLocalPhyErrCnt = 0;
1812 cntrs->PcieRetryBufDiagQwordCnt = 0;
1813 cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
1814 cntrs->LocalLinkIntegrityErrCnt =
1815 (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
1816 dd->ipath_lli_errs : dd->ipath_lli_errors;
1817 cntrs->RxVlErrCnt = 0;
1818 cntrs->RxDlidFltrCnt = 0;
1819}
1820
1821
1822/* no interrupt fallback for these chips */
1823static int ipath_ht_nointr_fallback(struct ipath_devdata *dd)
1824{
1825 return 0;
1826}
1827
1828
1829/*
1830 * reset the XGXS (between serdes and IBC). Slightly less intrusive
1831 * than resetting the IBC or external link state, and useful in some
1832 * cases to cause some retraining. To do this right, we reset IBC
1833 * as well.
1834 */
1835static void ipath_ht_xgxs_reset(struct ipath_devdata *dd)
1836{
1837 u64 val, prev_val;
1838
1839 prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1840 val = prev_val | INFINIPATH_XGXS_RESET;
1841 prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
1842 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1843 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
1844 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1845 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1846 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
1847 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1848 dd->ipath_control);
1849}
1850
1851
1852static int ipath_ht_get_ib_cfg(struct ipath_devdata *dd, int which)
1853{
1854 int ret;
1855
1856 switch (which) {
1857 case IPATH_IB_CFG_LWID:
1858 ret = dd->ipath_link_width_active;
1859 break;
1860 case IPATH_IB_CFG_SPD:
1861 ret = dd->ipath_link_speed_active;
1862 break;
1863 case IPATH_IB_CFG_LWID_ENB:
1864 ret = dd->ipath_link_width_enabled;
1865 break;
1866 case IPATH_IB_CFG_SPD_ENB:
1867 ret = dd->ipath_link_speed_enabled;
1868 break;
1869 default:
1870 ret = -ENOTSUPP;
1871 break;
1872 }
1873 return ret;
1874}
1875
1876
1877/* we assume range checking is already done, if needed */
1878static int ipath_ht_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
1879{
1880 int ret = 0;
1881
1882 if (which == IPATH_IB_CFG_LWID_ENB)
1883 dd->ipath_link_width_enabled = val;
1884 else if (which == IPATH_IB_CFG_SPD_ENB)
1885 dd->ipath_link_speed_enabled = val;
1886 else
1887 ret = -ENOTSUPP;
1888 return ret;
1889}
1890
1891
1892static void ipath_ht_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
1893{
1894}
1895
1896
1897static int ipath_ht_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
1898{
1899 ipath_setup_ht_setextled(dd, ipath_ib_linkstate(dd, ibcs),
1900 ipath_ib_linktrstate(dd, ibcs));
1901 return 0;
1902}
1903
1904
1605/** 1905/**
1606 * ipath_init_iba6110_funcs - set up the chip-specific function pointers 1906 * ipath_init_iba6110_funcs - set up the chip-specific function pointers
1607 * @dd: the infinipath device 1907 * @dd: the infinipath device
@@ -1626,22 +1926,19 @@ void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
1626 dd->ipath_f_setextled = ipath_setup_ht_setextled; 1926 dd->ipath_f_setextled = ipath_setup_ht_setextled;
1627 dd->ipath_f_get_base_info = ipath_ht_get_base_info; 1927 dd->ipath_f_get_base_info = ipath_ht_get_base_info;
1628 dd->ipath_f_free_irq = ipath_ht_free_irq; 1928 dd->ipath_f_free_irq = ipath_ht_free_irq;
1629
1630 /*
1631 * initialize chip-specific variables
1632 */
1633 dd->ipath_f_tidtemplate = ipath_ht_tidtemplate; 1929 dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
1930 dd->ipath_f_intr_fallback = ipath_ht_nointr_fallback;
1931 dd->ipath_f_get_msgheader = ipath_ht_get_msgheader;
1932 dd->ipath_f_config_ports = ipath_ht_config_ports;
1933 dd->ipath_f_read_counters = ipath_ht_read_counters;
1934 dd->ipath_f_xgxs_reset = ipath_ht_xgxs_reset;
1935 dd->ipath_f_get_ib_cfg = ipath_ht_get_ib_cfg;
1936 dd->ipath_f_set_ib_cfg = ipath_ht_set_ib_cfg;
1937 dd->ipath_f_config_jint = ipath_ht_config_jint;
1938 dd->ipath_f_ib_updown = ipath_ht_ib_updown;
1634 1939
1635 /* 1940 /*
1636 * setup the register offsets, since they are different for each 1941 * initialize chip-specific variables
1637 * chip
1638 */
1639 dd->ipath_kregs = &ipath_ht_kregs;
1640 dd->ipath_cregs = &ipath_ht_cregs;
1641
1642 /*
1643 * do very early init that is needed before ipath_f_bus is
1644 * called
1645 */ 1942 */
1646 ipath_init_ht_variables(dd); 1943 ipath_init_ht_variables(dd);
1647} 1944}
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c
index 0103d6f4847b..c7a2f50824c0 100644
--- a/drivers/infiniband/hw/ipath/ipath_iba6120.c
+++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c
@@ -145,10 +145,57 @@ struct _infinipath_do_not_use_kernel_regs {
145 unsigned long long Reserved12; 145 unsigned long long Reserved12;
146}; 146};
147 147
148#define IPATH_KREG_OFFSET(field) (offsetof(struct \ 148struct _infinipath_do_not_use_counters {
149 _infinipath_do_not_use_kernel_regs, field) / sizeof(u64)) 149 __u64 LBIntCnt;
150 __u64 LBFlowStallCnt;
151 __u64 Reserved1;
152 __u64 TxUnsupVLErrCnt;
153 __u64 TxDataPktCnt;
154 __u64 TxFlowPktCnt;
155 __u64 TxDwordCnt;
156 __u64 TxLenErrCnt;
157 __u64 TxMaxMinLenErrCnt;
158 __u64 TxUnderrunCnt;
159 __u64 TxFlowStallCnt;
160 __u64 TxDroppedPktCnt;
161 __u64 RxDroppedPktCnt;
162 __u64 RxDataPktCnt;
163 __u64 RxFlowPktCnt;
164 __u64 RxDwordCnt;
165 __u64 RxLenErrCnt;
166 __u64 RxMaxMinLenErrCnt;
167 __u64 RxICRCErrCnt;
168 __u64 RxVCRCErrCnt;
169 __u64 RxFlowCtrlErrCnt;
170 __u64 RxBadFormatCnt;
171 __u64 RxLinkProblemCnt;
172 __u64 RxEBPCnt;
173 __u64 RxLPCRCErrCnt;
174 __u64 RxBufOvflCnt;
175 __u64 RxTIDFullErrCnt;
176 __u64 RxTIDValidErrCnt;
177 __u64 RxPKeyMismatchCnt;
178 __u64 RxP0HdrEgrOvflCnt;
179 __u64 RxP1HdrEgrOvflCnt;
180 __u64 RxP2HdrEgrOvflCnt;
181 __u64 RxP3HdrEgrOvflCnt;
182 __u64 RxP4HdrEgrOvflCnt;
183 __u64 RxP5HdrEgrOvflCnt;
184 __u64 RxP6HdrEgrOvflCnt;
185 __u64 RxP7HdrEgrOvflCnt;
186 __u64 RxP8HdrEgrOvflCnt;
187 __u64 Reserved6;
188 __u64 Reserved7;
189 __u64 IBStatusChangeCnt;
190 __u64 IBLinkErrRecoveryCnt;
191 __u64 IBLinkDownedCnt;
192 __u64 IBSymbolErrCnt;
193};
194
195#define IPATH_KREG_OFFSET(field) (offsetof( \
196 struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
150#define IPATH_CREG_OFFSET(field) (offsetof( \ 197#define IPATH_CREG_OFFSET(field) (offsetof( \
151 struct infinipath_counters, field) / sizeof(u64)) 198 struct _infinipath_do_not_use_counters, field) / sizeof(u64))
152 199
153static const struct ipath_kregs ipath_pe_kregs = { 200static const struct ipath_kregs ipath_pe_kregs = {
154 .kr_control = IPATH_KREG_OFFSET(Control), 201 .kr_control = IPATH_KREG_OFFSET(Control),
@@ -282,6 +329,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
282#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL 329#define INFINIPATH_HWE_PCIE0PLLFAILED 0x0800000000000000ULL
283#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL 330#define INFINIPATH_HWE_SERDESPLLFAILED 0x1000000000000000ULL
284 331
332#define IBA6120_IBCS_LINKTRAININGSTATE_MASK 0xf
333#define IBA6120_IBCS_LINKSTATE_SHIFT 4
334
285/* kr_extstatus bits */ 335/* kr_extstatus bits */
286#define INFINIPATH_EXTS_FREQSEL 0x2 336#define INFINIPATH_EXTS_FREQSEL 0x2
287#define INFINIPATH_EXTS_SERDESSEL 0x4 337#define INFINIPATH_EXTS_SERDESSEL 0x4
@@ -296,6 +346,9 @@ static const struct ipath_cregs ipath_pe_cregs = {
296#define IPATH_GPIO_SCL (1ULL << \ 346#define IPATH_GPIO_SCL (1ULL << \
297 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) 347 (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
298 348
349#define INFINIPATH_R_INTRAVAIL_SHIFT 16
350#define INFINIPATH_R_TAILUPD_SHIFT 31
351
299/* 6120 specific hardware errors... */ 352/* 6120 specific hardware errors... */
300static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = { 353static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
301 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"), 354 INFINIPATH_HWE_MSG(PCIEPOISONEDTLP, "PCIe Poisoned TLP"),
@@ -320,10 +373,28 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
320 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \ 373 INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
321 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) 374 << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
322 375
323static int ipath_pe_txe_recover(struct ipath_devdata *);
324static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *, 376static void ipath_pe_put_tid_2(struct ipath_devdata *, u64 __iomem *,
325 u32, unsigned long); 377 u32, unsigned long);
326 378
379/*
380 * On platforms using this chip, and not having ordered WC stores, we
381 * can get TXE parity errors due to speculative reads to the PIO buffers,
382 * and this, due to a chip bug can result in (many) false parity error
383 * reports. So it's a debug print on those, and an info print on systems
384 * where the speculative reads don't occur.
385 */
386static void ipath_pe_txe_recover(struct ipath_devdata *dd)
387{
388 if (ipath_unordered_wc())
389 ipath_dbg("Recovering from TXE PIO parity error\n");
390 else {
391 ++ipath_stats.sps_txeparity;
392 dev_info(&dd->pcidev->dev,
393 "Recovering from TXE PIO parity error\n");
394 }
395}
396
397
327/** 398/**
328 * ipath_pe_handle_hwerrors - display hardware errors. 399 * ipath_pe_handle_hwerrors - display hardware errors.
329 * @dd: the infinipath device 400 * @dd: the infinipath device
@@ -403,35 +474,11 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
403 * occur if a processor speculative read is done to the PIO 474 * occur if a processor speculative read is done to the PIO
404 * buffer while we are sending a packet, for example. 475 * buffer while we are sending a packet, for example.
405 */ 476 */
406 if ((hwerrs & TXE_PIO_PARITY) && ipath_pe_txe_recover(dd)) 477 if (hwerrs & TXE_PIO_PARITY) {
478 ipath_pe_txe_recover(dd);
407 hwerrs &= ~TXE_PIO_PARITY; 479 hwerrs &= ~TXE_PIO_PARITY;
408 if (hwerrs) { 480 }
409 /* 481 if (!hwerrs) {
410 * if any set that we aren't ignoring only make the
411 * complaint once, in case it's stuck or recurring,
412 * and we get here multiple times
413 * Force link down, so switch knows, and
414 * LEDs are turned off
415 */
416 if (dd->ipath_flags & IPATH_INITTED) {
417 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
418 ipath_setup_pe_setextled(dd,
419 INFINIPATH_IBCS_L_STATE_DOWN,
420 INFINIPATH_IBCS_LT_STATE_DISABLED);
421 ipath_dev_err(dd, "Fatal Hardware Error (freeze "
422 "mode), no longer usable, SN %.16s\n",
423 dd->ipath_serial);
424 isfatal = 1;
425 }
426 /*
427 * Mark as having had an error for driver, and also
428 * for /sys and status word mapped to user programs.
429 * This marks unit as not usable, until reset
430 */
431 *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
432 *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
433 dd->ipath_flags &= ~IPATH_INITTED;
434 } else {
435 static u32 freeze_cnt; 482 static u32 freeze_cnt;
436 483
437 freeze_cnt++; 484 freeze_cnt++;
@@ -485,7 +532,7 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
485 532
486 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) { 533 if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
487 /* 534 /*
488 * If it occurs, it is left masked since the eternal 535 * If it occurs, it is left masked since the external
489 * interface is unused 536 * interface is unused
490 */ 537 */
491 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED; 538 dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
@@ -563,6 +610,14 @@ static int ipath_pe_boardname(struct ipath_devdata *dd, char *name,
563 dd->ipath_f_put_tid = ipath_pe_put_tid_2; 610 dd->ipath_f_put_tid = ipath_pe_put_tid_2;
564 } 611 }
565 612
613
614 /*
615 * set here, not in ipath_init_*_funcs because we have to do
616 * it after we can read chip registers.
617 */
618 dd->ipath_ureg_align =
619 ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
620
566 return ret; 621 return ret;
567} 622}
568 623
@@ -667,17 +722,8 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
667 722
668 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); 723 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
669 prev_val = val; 724 prev_val = val;
670 if (((val >> INFINIPATH_XGXS_MDIOADDR_SHIFT) & 725 if (val & INFINIPATH_XGXS_RESET)
671 INFINIPATH_XGXS_MDIOADDR_MASK) != 3) {
672 val &=
673 ~(INFINIPATH_XGXS_MDIOADDR_MASK <<
674 INFINIPATH_XGXS_MDIOADDR_SHIFT);
675 /* MDIO address 3 */
676 val |= 3ULL << INFINIPATH_XGXS_MDIOADDR_SHIFT;
677 }
678 if (val & INFINIPATH_XGXS_RESET) {
679 val &= ~INFINIPATH_XGXS_RESET; 726 val &= ~INFINIPATH_XGXS_RESET;
680 }
681 if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & 727 if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
682 INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { 728 INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
683 /* need to compensate for Tx inversion in partner */ 729 /* need to compensate for Tx inversion in partner */
@@ -707,21 +753,6 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
707 (unsigned long long) 753 (unsigned long long)
708 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); 754 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
709 755
710 if (!ipath_waitfor_mdio_cmdready(dd)) {
711 ipath_write_kreg(
712 dd, dd->ipath_kregs->kr_mdio,
713 ipath_mdio_req(IPATH_MDIO_CMD_READ, 31,
714 IPATH_MDIO_CTRL_XGXS_REG_8, 0));
715 if (ipath_waitfor_complete(dd, dd->ipath_kregs->kr_mdio,
716 IPATH_MDIO_DATAVALID, &val))
717 ipath_dbg("Never got MDIO data for XGXS "
718 "status read\n");
719 else
720 ipath_cdbg(VERBOSE, "MDIO Read reg8, "
721 "'bank' 31 %x\n", (u32) val);
722 } else
723 ipath_dbg("Never got MDIO cmdready for XGXS status read\n");
724
725 return ret; 756 return ret;
726} 757}
727 758
@@ -902,12 +933,27 @@ static int ipath_setup_pe_config(struct ipath_devdata *dd,
902 else 933 else
903 ipath_dev_err(dd, "Can't find PCI Express " 934 ipath_dev_err(dd, "Can't find PCI Express "
904 "capability!\n"); 935 "capability!\n");
936
937 dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
938 dd->ipath_link_speed_supported = IPATH_IB_SDR;
939 dd->ipath_link_width_enabled = IB_WIDTH_4X;
940 dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
941 /* these can't change for this chip, so set once */
942 dd->ipath_link_width_active = dd->ipath_link_width_enabled;
943 dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
905 return 0; 944 return 0;
906} 945}
907 946
908static void ipath_init_pe_variables(struct ipath_devdata *dd) 947static void ipath_init_pe_variables(struct ipath_devdata *dd)
909{ 948{
910 /* 949 /*
950 * setup the register offsets, since they are different for each
951 * chip
952 */
953 dd->ipath_kregs = &ipath_pe_kregs;
954 dd->ipath_cregs = &ipath_pe_cregs;
955
956 /*
911 * bits for selecting i2c direction and values, 957 * bits for selecting i2c direction and values,
912 * used for I2C serial flash 958 * used for I2C serial flash
913 */ 959 */
@@ -916,6 +962,43 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
916 dd->ipath_gpio_sda = IPATH_GPIO_SDA; 962 dd->ipath_gpio_sda = IPATH_GPIO_SDA;
917 dd->ipath_gpio_scl = IPATH_GPIO_SCL; 963 dd->ipath_gpio_scl = IPATH_GPIO_SCL;
918 964
965 /*
966 * Fill in data for field-values that change in newer chips.
967 * We dynamically specify only the mask for LINKTRAININGSTATE
968 * and only the shift for LINKSTATE, as they are the only ones
969 * that change. Also precalculate the 3 link states of interest
970 * and the combined mask.
971 */
972 dd->ibcs_ls_shift = IBA6120_IBCS_LINKSTATE_SHIFT;
973 dd->ibcs_lts_mask = IBA6120_IBCS_LINKTRAININGSTATE_MASK;
974 dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
975 dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
976 dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
977 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
978 (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
979 dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
980 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
981 (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
982 dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
983 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
984 (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
985
986 /*
987 * Fill in data for ibcc field-values that change in newer chips.
988 * We dynamically specify only the mask for LINKINITCMD
989 * and only the shift for LINKCMD and MAXPKTLEN, as they are
990 * the only ones that change.
991 */
992 dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
993 dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
994 dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
995
996 /* Fill in shifts for RcvCtrl. */
997 dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
998 dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
999 dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
1000 dd->ipath_r_portcfg_shift = 0; /* Not on IBA6120 */
1001
919 /* variables for sanity checking interrupt and errors */ 1002 /* variables for sanity checking interrupt and errors */
920 dd->ipath_hwe_bitsextant = 1003 dd->ipath_hwe_bitsextant =
921 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK << 1004 (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
@@ -963,6 +1046,8 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
963 1046
964 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; 1047 dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
965 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; 1048 dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
1049 dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
1050 dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
966 1051
967 /* 1052 /*
968 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. 1053 * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
@@ -984,6 +1069,7 @@ static void ipath_init_pe_variables(struct ipath_devdata *dd)
984 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET; 1069 INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET;
985 1070
986 1071
1072 dd->delay_mult = 2; /* SDR, 4X, can't change */
987} 1073}
988 1074
989/* setup the MSI stuff again after a reset. I'd like to just call 1075/* setup the MSI stuff again after a reset. I'd like to just call
@@ -1289,6 +1375,9 @@ static int ipath_pe_early_init(struct ipath_devdata *dd)
1289 */ 1375 */
1290 dd->ipath_rcvhdrentsize = 24; 1376 dd->ipath_rcvhdrentsize = 24;
1291 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; 1377 dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
1378 dd->ipath_rhf_offset = 0;
1379 dd->ipath_egrtidbase = (u64 __iomem *)
1380 ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase);
1292 1381
1293 /* 1382 /*
1294 * To truly support a 4KB MTU (for usermode), we need to 1383 * To truly support a 4KB MTU (for usermode), we need to
@@ -1359,34 +1448,204 @@ static void ipath_pe_free_irq(struct ipath_devdata *dd)
1359 dd->ipath_irq = 0; 1448 dd->ipath_irq = 0;
1360} 1449}
1361 1450
1451
1452static struct ipath_message_header *
1453ipath_pe_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
1454{
1455 return (struct ipath_message_header *)
1456 &rhf_addr[sizeof(u64) / sizeof(u32)];
1457}
1458
1459static void ipath_pe_config_ports(struct ipath_devdata *dd, ushort cfgports)
1460{
1461 dd->ipath_portcnt =
1462 ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
1463 dd->ipath_p0_rcvegrcnt =
1464 ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
1465}
1466
1467static void ipath_pe_read_counters(struct ipath_devdata *dd,
1468 struct infinipath_counters *cntrs)
1469{
1470 cntrs->LBIntCnt =
1471 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
1472 cntrs->LBFlowStallCnt =
1473 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
1474 cntrs->TxSDmaDescCnt = 0;
1475 cntrs->TxUnsupVLErrCnt =
1476 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
1477 cntrs->TxDataPktCnt =
1478 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
1479 cntrs->TxFlowPktCnt =
1480 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
1481 cntrs->TxDwordCnt =
1482 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
1483 cntrs->TxLenErrCnt =
1484 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
1485 cntrs->TxMaxMinLenErrCnt =
1486 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
1487 cntrs->TxUnderrunCnt =
1488 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
1489 cntrs->TxFlowStallCnt =
1490 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
1491 cntrs->TxDroppedPktCnt =
1492 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
1493 cntrs->RxDroppedPktCnt =
1494 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
1495 cntrs->RxDataPktCnt =
1496 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
1497 cntrs->RxFlowPktCnt =
1498 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
1499 cntrs->RxDwordCnt =
1500 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
1501 cntrs->RxLenErrCnt =
1502 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
1503 cntrs->RxMaxMinLenErrCnt =
1504 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
1505 cntrs->RxICRCErrCnt =
1506 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
1507 cntrs->RxVCRCErrCnt =
1508 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
1509 cntrs->RxFlowCtrlErrCnt =
1510 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
1511 cntrs->RxBadFormatCnt =
1512 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
1513 cntrs->RxLinkProblemCnt =
1514 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
1515 cntrs->RxEBPCnt =
1516 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
1517 cntrs->RxLPCRCErrCnt =
1518 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
1519 cntrs->RxBufOvflCnt =
1520 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
1521 cntrs->RxTIDFullErrCnt =
1522 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
1523 cntrs->RxTIDValidErrCnt =
1524 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
1525 cntrs->RxPKeyMismatchCnt =
1526 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
1527 cntrs->RxP0HdrEgrOvflCnt =
1528 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
1529 cntrs->RxP1HdrEgrOvflCnt =
1530 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
1531 cntrs->RxP2HdrEgrOvflCnt =
1532 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
1533 cntrs->RxP3HdrEgrOvflCnt =
1534 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
1535 cntrs->RxP4HdrEgrOvflCnt =
1536 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
1537 cntrs->RxP5HdrEgrOvflCnt = 0;
1538 cntrs->RxP6HdrEgrOvflCnt = 0;
1539 cntrs->RxP7HdrEgrOvflCnt = 0;
1540 cntrs->RxP8HdrEgrOvflCnt = 0;
1541 cntrs->RxP9HdrEgrOvflCnt = 0;
1542 cntrs->RxP10HdrEgrOvflCnt = 0;
1543 cntrs->RxP11HdrEgrOvflCnt = 0;
1544 cntrs->RxP12HdrEgrOvflCnt = 0;
1545 cntrs->RxP13HdrEgrOvflCnt = 0;
1546 cntrs->RxP14HdrEgrOvflCnt = 0;
1547 cntrs->RxP15HdrEgrOvflCnt = 0;
1548 cntrs->RxP16HdrEgrOvflCnt = 0;
1549 cntrs->IBStatusChangeCnt =
1550 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
1551 cntrs->IBLinkErrRecoveryCnt =
1552 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
1553 cntrs->IBLinkDownedCnt =
1554 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
1555 cntrs->IBSymbolErrCnt =
1556 ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
1557 cntrs->RxVL15DroppedPktCnt = 0;
1558 cntrs->RxOtherLocalPhyErrCnt = 0;
1559 cntrs->PcieRetryBufDiagQwordCnt = 0;
1560 cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
1561 cntrs->LocalLinkIntegrityErrCnt = dd->ipath_lli_errs;
1562 cntrs->RxVlErrCnt = 0;
1563 cntrs->RxDlidFltrCnt = 0;
1564}
1565
1566
1567/* no interrupt fallback for these chips */
1568static int ipath_pe_nointr_fallback(struct ipath_devdata *dd)
1569{
1570 return 0;
1571}
1572
1573
1362/* 1574/*
1363 * On platforms using this chip, and not having ordered WC stores, we 1575 * reset the XGXS (between serdes and IBC). Slightly less intrusive
1364 * can get TXE parity errors due to speculative reads to the PIO buffers, 1576 * than resetting the IBC or external link state, and useful in some
1365 * and this, due to a chip bug can result in (many) false parity error 1577 * cases to cause some retraining. To do this right, we reset IBC
1366 * reports. So it's a debug print on those, and an info print on systems 1578 * as well.
1367 * where the speculative reads don't occur.
1368 * Because we can get lots of false errors, we have no upper limit
1369 * on recovery attempts on those platforms.
1370 */ 1579 */
1371static int ipath_pe_txe_recover(struct ipath_devdata *dd) 1580static void ipath_pe_xgxs_reset(struct ipath_devdata *dd)
1372{ 1581{
1373 if (ipath_unordered_wc()) 1582 u64 val, prev_val;
1374 ipath_dbg("Recovering from TXE PIO parity error\n"); 1583
1375 else { 1584 prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
1376 int cnt = ++ipath_stats.sps_txeparity; 1585 val = prev_val | INFINIPATH_XGXS_RESET;
1377 if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) { 1586 prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
1378 if (cnt == IPATH_MAX_PARITY_ATTEMPTS) 1587 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1379 ipath_dev_err(dd, 1588 dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
1380 "Too many attempts to recover from " 1589 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
1381 "TXE parity, giving up\n"); 1590 ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
1382 return 0; 1591 ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
1383 } 1592 ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
1384 dev_info(&dd->pcidev->dev, 1593 dd->ipath_control);
1385 "Recovering from TXE PIO parity error\n"); 1594}
1595
1596
1597static int ipath_pe_get_ib_cfg(struct ipath_devdata *dd, int which)
1598{
1599 int ret;
1600
1601 switch (which) {
1602 case IPATH_IB_CFG_LWID:
1603 ret = dd->ipath_link_width_active;
1604 break;
1605 case IPATH_IB_CFG_SPD:
1606 ret = dd->ipath_link_speed_active;
1607 break;
1608 case IPATH_IB_CFG_LWID_ENB:
1609 ret = dd->ipath_link_width_enabled;
1610 break;
1611 case IPATH_IB_CFG_SPD_ENB:
1612 ret = dd->ipath_link_speed_enabled;
1613 break;
1614 default:
1615 ret = -ENOTSUPP;
1616 break;
1386 } 1617 }
1387 return 1; 1618 return ret;
1619}
1620
1621
1622/* we assume range checking is already done, if needed */
1623static int ipath_pe_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
1624{
1625 int ret = 0;
1626
1627 if (which == IPATH_IB_CFG_LWID_ENB)
1628 dd->ipath_link_width_enabled = val;
1629 else if (which == IPATH_IB_CFG_SPD_ENB)
1630 dd->ipath_link_speed_enabled = val;
1631 else
1632 ret = -ENOTSUPP;
1633 return ret;
1388} 1634}
1389 1635
1636static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
1637{
1638}
1639
1640
1641static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
1642{
1643 ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs),
1644 ipath_ib_linktrstate(dd, ibcs));
1645 return 0;
1646}
1647
1648
1390/** 1649/**
1391 * ipath_init_iba6120_funcs - set up the chip-specific function pointers 1650 * ipath_init_iba6120_funcs - set up the chip-specific function pointers
1392 * @dd: the infinipath device 1651 * @dd: the infinipath device
@@ -1407,7 +1666,7 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
1407 dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes; 1666 dd->ipath_f_bringup_serdes = ipath_pe_bringup_serdes;
1408 dd->ipath_f_clear_tids = ipath_pe_clear_tids; 1667 dd->ipath_f_clear_tids = ipath_pe_clear_tids;
1409 /* 1668 /*
1410 * this may get changed after we read the chip revision, 1669 * _f_put_tid may get changed after we read the chip revision,
1411 * but we start with the safe version for all revs 1670 * but we start with the safe version for all revs
1412 */ 1671 */
1413 dd->ipath_f_put_tid = ipath_pe_put_tid; 1672 dd->ipath_f_put_tid = ipath_pe_put_tid;
@@ -1415,17 +1674,19 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *dd)
1415 dd->ipath_f_setextled = ipath_setup_pe_setextled; 1674 dd->ipath_f_setextled = ipath_setup_pe_setextled;
1416 dd->ipath_f_get_base_info = ipath_pe_get_base_info; 1675 dd->ipath_f_get_base_info = ipath_pe_get_base_info;
1417 dd->ipath_f_free_irq = ipath_pe_free_irq; 1676 dd->ipath_f_free_irq = ipath_pe_free_irq;
1418
1419 /* initialize chip-specific variables */
1420 dd->ipath_f_tidtemplate = ipath_pe_tidtemplate; 1677 dd->ipath_f_tidtemplate = ipath_pe_tidtemplate;
1678 dd->ipath_f_intr_fallback = ipath_pe_nointr_fallback;
1679 dd->ipath_f_xgxs_reset = ipath_pe_xgxs_reset;
1680 dd->ipath_f_get_msgheader = ipath_pe_get_msgheader;
1681 dd->ipath_f_config_ports = ipath_pe_config_ports;
1682 dd->ipath_f_read_counters = ipath_pe_read_counters;
1683 dd->ipath_f_get_ib_cfg = ipath_pe_get_ib_cfg;
1684 dd->ipath_f_set_ib_cfg = ipath_pe_set_ib_cfg;
1685 dd->ipath_f_config_jint = ipath_pe_config_jint;
1686 dd->ipath_f_ib_updown = ipath_pe_ib_updown;
1421 1687
1422 /*
1423 * setup the register offsets, since they are different for each
1424 * chip
1425 */
1426 dd->ipath_kregs = &ipath_pe_kregs;
1427 dd->ipath_cregs = &ipath_pe_cregs;
1428 1688
1689 /* initialize chip-specific variables */
1429 ipath_init_pe_variables(dd); 1690 ipath_init_pe_variables(dd);
1430} 1691}
1431 1692
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c
index 9dd0bacf8461..4471674975cd 100644
--- a/drivers/infiniband/hw/ipath/ipath_init_chip.c
+++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c
@@ -91,7 +91,7 @@ static int create_port0_egr(struct ipath_devdata *dd)
91 struct ipath_skbinfo *skbinfo; 91 struct ipath_skbinfo *skbinfo;
92 int ret; 92 int ret;
93 93
94 egrcnt = dd->ipath_rcvegrcnt; 94 egrcnt = dd->ipath_p0_rcvegrcnt;
95 95
96 skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt); 96 skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);
97 if (skbinfo == NULL) { 97 if (skbinfo == NULL) {
@@ -244,8 +244,7 @@ static int init_chip_first(struct ipath_devdata *dd,
244 * cfgports. We do still check and report a difference, if 244 * cfgports. We do still check and report a difference, if
245 * not same (should be impossible). 245 * not same (should be impossible).
246 */ 246 */
247 dd->ipath_portcnt = 247 dd->ipath_f_config_ports(dd, ipath_cfgports);
248 ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
249 if (!ipath_cfgports) 248 if (!ipath_cfgports)
250 dd->ipath_cfgports = dd->ipath_portcnt; 249 dd->ipath_cfgports = dd->ipath_portcnt;
251 else if (ipath_cfgports <= dd->ipath_portcnt) { 250 else if (ipath_cfgports <= dd->ipath_portcnt) {
@@ -272,22 +271,7 @@ static int init_chip_first(struct ipath_devdata *dd,
272 goto done; 271 goto done;
273 } 272 }
274 273
275 dd->ipath_lastegrheads = kzalloc(sizeof(*dd->ipath_lastegrheads)
276 * dd->ipath_cfgports,
277 GFP_KERNEL);
278 dd->ipath_lastrcvhdrqtails =
279 kzalloc(sizeof(*dd->ipath_lastrcvhdrqtails)
280 * dd->ipath_cfgports, GFP_KERNEL);
281
282 if (!dd->ipath_lastegrheads || !dd->ipath_lastrcvhdrqtails) {
283 ipath_dev_err(dd, "Unable to allocate head arrays, "
284 "failing\n");
285 ret = -ENOMEM;
286 goto done;
287 }
288
289 pd = create_portdata0(dd); 274 pd = create_portdata0(dd);
290
291 if (!pd) { 275 if (!pd) {
292 ipath_dev_err(dd, "Unable to allocate portdata for port " 276 ipath_dev_err(dd, "Unable to allocate portdata for port "
293 "0, failing\n"); 277 "0, failing\n");
@@ -345,10 +329,10 @@ static int init_chip_first(struct ipath_devdata *dd,
345 dd->ipath_piobcnt2k, dd->ipath_pio2kbase); 329 dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
346 330
347 spin_lock_init(&dd->ipath_tid_lock); 331 spin_lock_init(&dd->ipath_tid_lock);
348 332 spin_lock_init(&dd->ipath_sendctrl_lock);
349 spin_lock_init(&dd->ipath_gpio_lock); 333 spin_lock_init(&dd->ipath_gpio_lock);
350 spin_lock_init(&dd->ipath_eep_st_lock); 334 spin_lock_init(&dd->ipath_eep_st_lock);
351 sema_init(&dd->ipath_eep_sem, 1); 335 mutex_init(&dd->ipath_eep_lock);
352 336
353done: 337done:
354 *pdp = pd; 338 *pdp = pd;
@@ -372,9 +356,9 @@ static int init_chip_reset(struct ipath_devdata *dd,
372 *pdp = dd->ipath_pd[0]; 356 *pdp = dd->ipath_pd[0];
373 /* ensure chip does no sends or receives while we re-initialize */ 357 /* ensure chip does no sends or receives while we re-initialize */
374 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U; 358 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U;
375 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0); 359 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl);
376 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0); 360 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
377 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0); 361 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control);
378 362
379 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); 363 rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
380 if (dd->ipath_portcnt != rtmp) 364 if (dd->ipath_portcnt != rtmp)
@@ -487,6 +471,7 @@ static void enable_chip(struct ipath_devdata *dd,
487 struct ipath_portdata *pd, int reinit) 471 struct ipath_portdata *pd, int reinit)
488{ 472{
489 u32 val; 473 u32 val;
474 unsigned long flags;
490 int i; 475 int i;
491 476
492 if (!reinit) 477 if (!reinit)
@@ -495,19 +480,21 @@ static void enable_chip(struct ipath_devdata *dd,
495 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 480 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
496 dd->ipath_rcvctrl); 481 dd->ipath_rcvctrl);
497 482
483 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
498 /* Enable PIO send, and update of PIOavail regs to memory. */ 484 /* Enable PIO send, and update of PIOavail regs to memory. */
499 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE | 485 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
500 INFINIPATH_S_PIOBUFAVAILUPD; 486 INFINIPATH_S_PIOBUFAVAILUPD;
501 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 487 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
502 dd->ipath_sendctrl); 488 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
489 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
503 490
504 /* 491 /*
505 * enable port 0 receive, and receive interrupt. other ports 492 * enable port 0 receive, and receive interrupt. other ports
506 * done as user opens and inits them. 493 * done as user opens and inits them.
507 */ 494 */
508 dd->ipath_rcvctrl = INFINIPATH_R_TAILUPD | 495 dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) |
509 (1ULL << INFINIPATH_R_PORTENABLE_SHIFT) | 496 (1ULL << dd->ipath_r_portenable_shift) |
510 (1ULL << INFINIPATH_R_INTRAVAIL_SHIFT); 497 (1ULL << dd->ipath_r_intravail_shift);
511 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 498 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
512 dd->ipath_rcvctrl); 499 dd->ipath_rcvctrl);
513 500
@@ -523,12 +510,11 @@ static void enable_chip(struct ipath_devdata *dd,
523 */ 510 */
524 val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0); 511 val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
525 (void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0); 512 (void)ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
526 dd->ipath_port0head = ipath_read_ureg32(dd, ur_rcvhdrtail, 0);
527 513
528 /* Initialize so we interrupt on next packet received */ 514 /* Initialize so we interrupt on next packet received */
529 (void)ipath_write_ureg(dd, ur_rcvhdrhead, 515 (void)ipath_write_ureg(dd, ur_rcvhdrhead,
530 dd->ipath_rhdrhead_intr_off | 516 dd->ipath_rhdrhead_intr_off |
531 dd->ipath_port0head, 0); 517 dd->ipath_pd[0]->port_head, 0);
532 518
533 /* 519 /*
534 * by now pioavail updates to memory should have occurred, so 520 * by now pioavail updates to memory should have occurred, so
@@ -542,12 +528,8 @@ static void enable_chip(struct ipath_devdata *dd,
542 /* 528 /*
543 * Chip Errata bug 6641; even and odd qwords>3 are swapped. 529 * Chip Errata bug 6641; even and odd qwords>3 are swapped.
544 */ 530 */
545 if (i > 3) { 531 if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
546 if (i & 1) 532 val = dd->ipath_pioavailregs_dma[i ^ 1];
547 val = dd->ipath_pioavailregs_dma[i - 1];
548 else
549 val = dd->ipath_pioavailregs_dma[i + 1];
550 }
551 else 533 else
552 val = dd->ipath_pioavailregs_dma[i]; 534 val = dd->ipath_pioavailregs_dma[i];
553 dd->ipath_pioavailshadow[i] = le64_to_cpu(val); 535 dd->ipath_pioavailshadow[i] = le64_to_cpu(val);
@@ -690,12 +672,13 @@ done:
690 */ 672 */
691int ipath_init_chip(struct ipath_devdata *dd, int reinit) 673int ipath_init_chip(struct ipath_devdata *dd, int reinit)
692{ 674{
693 int ret = 0, i; 675 int ret = 0;
694 u32 val32, kpiobufs; 676 u32 val32, kpiobufs;
695 u32 piobufs, uports; 677 u32 piobufs, uports;
696 u64 val; 678 u64 val;
697 struct ipath_portdata *pd = NULL; /* keep gcc4 happy */ 679 struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
698 gfp_t gfp_flags = GFP_USER | __GFP_COMP; 680 gfp_t gfp_flags = GFP_USER | __GFP_COMP;
681 unsigned long flags;
699 682
700 ret = init_housekeeping(dd, &pd, reinit); 683 ret = init_housekeeping(dd, &pd, reinit);
701 if (ret) 684 if (ret)
@@ -746,7 +729,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
746 kpiobufs = ipath_kpiobufs; 729 kpiobufs = ipath_kpiobufs;
747 730
748 if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { 731 if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
749 i = (int) piobufs - 732 int i = (int) piobufs -
750 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); 733 (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
751 if (i < 0) 734 if (i < 0)
752 i = 0; 735 i = 0;
@@ -827,8 +810,12 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
827 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 810 ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
828 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED); 811 ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
829 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL); 812 ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
830 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 813
831 INFINIPATH_S_PIOENABLE); 814 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
815 dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE;
816 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
817 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
818 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
832 819
833 /* 820 /*
834 * before error clears, since we expect serdes pll errors during 821 * before error clears, since we expect serdes pll errors during
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c
index c61f9da2964a..92e58c921522 100644
--- a/drivers/infiniband/hw/ipath/ipath_intr.c
+++ b/drivers/infiniband/hw/ipath/ipath_intr.c
@@ -683,7 +683,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
683 for (i = 0; i < dd->ipath_cfgports; i++) { 683 for (i = 0; i < dd->ipath_cfgports; i++) {
684 struct ipath_portdata *pd = dd->ipath_pd[i]; 684 struct ipath_portdata *pd = dd->ipath_pd[i];
685 if (i == 0) { 685 if (i == 0) {
686 hd = dd->ipath_port0head; 686 hd = pd->port_head;
687 tl = (u32) le64_to_cpu( 687 tl = (u32) le64_to_cpu(
688 *dd->ipath_hdrqtailptr); 688 *dd->ipath_hdrqtailptr);
689 } else if (pd && pd->port_cnt && 689 } else if (pd && pd->port_cnt &&
@@ -693,7 +693,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
693 * except kernel 693 * except kernel
694 */ 694 */
695 tl = *(u64 *) pd->port_rcvhdrtail_kvaddr; 695 tl = *(u64 *) pd->port_rcvhdrtail_kvaddr;
696 if (tl == dd->ipath_lastrcvhdrqtails[i]) 696 if (tl == pd->port_lastrcvhdrqtail)
697 continue; 697 continue;
698 hd = ipath_read_ureg32(dd, ur_rcvhdrhead, 698 hd = ipath_read_ureg32(dd, ur_rcvhdrhead,
699 i); 699 i);
@@ -703,7 +703,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
703 (!hd && tl == dd->ipath_hdrqlast)) { 703 (!hd && tl == dd->ipath_hdrqlast)) {
704 if (i == 0) 704 if (i == 0)
705 chkerrpkts = 1; 705 chkerrpkts = 1;
706 dd->ipath_lastrcvhdrqtails[i] = tl; 706 pd->port_lastrcvhdrqtail = tl;
707 pd->port_hdrqfull++; 707 pd->port_hdrqfull++;
708 /* flush hdrqfull so that poll() sees it */ 708 /* flush hdrqfull so that poll() sees it */
709 wmb(); 709 wmb();
@@ -712,6 +712,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
712 } 712 }
713 } 713 }
714 if (errs & INFINIPATH_E_RRCVEGRFULL) { 714 if (errs & INFINIPATH_E_RRCVEGRFULL) {
715 struct ipath_portdata *pd = dd->ipath_pd[0];
716
715 /* 717 /*
716 * since this is of less importance and not likely to 718 * since this is of less importance and not likely to
717 * happen without also getting hdrfull, only count 719 * happen without also getting hdrfull, only count
@@ -719,7 +721,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
719 * vs user) 721 * vs user)
720 */ 722 */
721 ipath_stats.sps_etidfull++; 723 ipath_stats.sps_etidfull++;
722 if (dd->ipath_port0head != 724 if (pd->port_head !=
723 (u32) le64_to_cpu(*dd->ipath_hdrqtailptr)) 725 (u32) le64_to_cpu(*dd->ipath_hdrqtailptr))
724 chkerrpkts = 1; 726 chkerrpkts = 1;
725 } 727 }
@@ -795,6 +797,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
795{ 797{
796 int i, im; 798 int i, im;
797 __le64 val; 799 __le64 val;
800 unsigned long flags;
798 801
799 /* disable error interrupts, to avoid confusion */ 802 /* disable error interrupts, to avoid confusion */
800 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL); 803 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
@@ -813,11 +816,14 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
813 dd->ipath_control); 816 dd->ipath_control);
814 817
815 /* ensure pio avail updates continue */ 818 /* ensure pio avail updates continue */
819 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
816 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 820 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
817 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD); 821 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
818 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 822 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
819 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 823 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
820 dd->ipath_sendctrl); 824 dd->ipath_sendctrl);
825 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
826 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
821 827
822 /* 828 /*
823 * We just enabled pioavailupdate, so dma copy is almost certainly 829 * We just enabled pioavailupdate, so dma copy is almost certainly
@@ -825,8 +831,8 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
825 */ 831 */
826 for (i = 0; i < dd->ipath_pioavregs; i++) { 832 for (i = 0; i < dd->ipath_pioavregs; i++) {
827 /* deal with 6110 chip bug */ 833 /* deal with 6110 chip bug */
828 im = i > 3 ? ((i&1) ? i-1 : i+1) : i; 834 im = i > 3 ? i ^ 1 : i;
829 val = ipath_read_kreg64(dd, (0x1000/sizeof(u64))+im); 835 val = ipath_read_kreg64(dd, (0x1000 / sizeof(u64)) + im);
830 dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i] 836 dd->ipath_pioavailregs_dma[i] = dd->ipath_pioavailshadow[i]
831 = le64_to_cpu(val); 837 = le64_to_cpu(val);
832 } 838 }
@@ -849,7 +855,7 @@ void ipath_clear_freeze(struct ipath_devdata *dd)
849 855
850/* this is separate to allow for better optimization of ipath_intr() */ 856/* this is separate to allow for better optimization of ipath_intr() */
851 857
852static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp) 858static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
853{ 859{
854 /* 860 /*
855 * sometimes happen during driver init and unload, don't want 861 * sometimes happen during driver init and unload, don't want
@@ -877,7 +883,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
877 dd->ipath_f_free_irq(dd); 883 dd->ipath_f_free_irq(dd);
878 } 884 }
879 } 885 }
880 if (ipath_read_kreg32(dd, dd->ipath_kregs->kr_intmask)) { 886 if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
881 ipath_dev_err(dd, "%u unexpected interrupts, " 887 ipath_dev_err(dd, "%u unexpected interrupts, "
882 "disabling interrupts completely\n", 888 "disabling interrupts completely\n",
883 *unexpectp); 889 *unexpectp);
@@ -892,7 +898,7 @@ static void ipath_bad_intr(struct ipath_devdata *dd, u32 * unexpectp)
892 "ignoring\n"); 898 "ignoring\n");
893} 899}
894 900
895static void ipath_bad_regread(struct ipath_devdata *dd) 901static noinline void ipath_bad_regread(struct ipath_devdata *dd)
896{ 902{
897 static int allbits; 903 static int allbits;
898 904
@@ -920,31 +926,9 @@ static void ipath_bad_regread(struct ipath_devdata *dd)
920 } 926 }
921} 927}
922 928
923static void handle_port_pioavail(struct ipath_devdata *dd)
924{
925 u32 i;
926 /*
927 * start from port 1, since for now port 0 is never using
928 * wait_event for PIO
929 */
930 for (i = 1; dd->ipath_portpiowait && i < dd->ipath_cfgports; i++) {
931 struct ipath_portdata *pd = dd->ipath_pd[i];
932
933 if (pd && pd->port_cnt &&
934 dd->ipath_portpiowait & (1U << i)) {
935 clear_bit(i, &dd->ipath_portpiowait);
936 if (test_bit(IPATH_PORT_WAITING_PIO,
937 &pd->port_flag)) {
938 clear_bit(IPATH_PORT_WAITING_PIO,
939 &pd->port_flag);
940 wake_up_interruptible(&pd->port_wait);
941 }
942 }
943 }
944}
945
946static void handle_layer_pioavail(struct ipath_devdata *dd) 929static void handle_layer_pioavail(struct ipath_devdata *dd)
947{ 930{
931 unsigned long flags;
948 int ret; 932 int ret;
949 933
950 ret = ipath_ib_piobufavail(dd->verbs_dev); 934 ret = ipath_ib_piobufavail(dd->verbs_dev);
@@ -953,9 +937,12 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
953 937
954 return; 938 return;
955set: 939set:
956 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); 940 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
941 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
957 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 942 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
958 dd->ipath_sendctrl); 943 dd->ipath_sendctrl);
944 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
945 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
959} 946}
960 947
961/* 948/*
@@ -969,7 +956,15 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
969 int i; 956 int i;
970 int rcvdint = 0; 957 int rcvdint = 0;
971 958
972 /* test_bit below needs this... */ 959 /*
960 * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
961 * test_and_clear_bit(IPATH_PORT_WAITING_URG) below
962 * would both like timely updates of the bits so that
963 * we don't pass them by unnecessarily. the rmb()
964 * here ensures that we see them promptly -- the
965 * corresponding wmb()'s are in ipath_poll_urgent()
966 * and ipath_poll_next()...
967 */
973 rmb(); 968 rmb();
974 portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) & 969 portr = ((istat >> INFINIPATH_I_RCVAVAIL_SHIFT) &
975 dd->ipath_i_rcvavail_mask) 970 dd->ipath_i_rcvavail_mask)
@@ -980,7 +975,7 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
980 if (portr & (1 << i) && pd && pd->port_cnt) { 975 if (portr & (1 << i) && pd && pd->port_cnt) {
981 if (test_and_clear_bit(IPATH_PORT_WAITING_RCV, 976 if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
982 &pd->port_flag)) { 977 &pd->port_flag)) {
983 clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT, 978 clear_bit(i + dd->ipath_r_intravail_shift,
984 &dd->ipath_rcvctrl); 979 &dd->ipath_rcvctrl);
985 wake_up_interruptible(&pd->port_wait); 980 wake_up_interruptible(&pd->port_wait);
986 rcvdint = 1; 981 rcvdint = 1;
@@ -1039,7 +1034,7 @@ irqreturn_t ipath_intr(int irq, void *data)
1039 goto bail; 1034 goto bail;
1040 } 1035 }
1041 1036
1042 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); 1037 istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
1043 1038
1044 if (unlikely(!istat)) { 1039 if (unlikely(!istat)) {
1045 ipath_stats.sps_nullintr++; 1040 ipath_stats.sps_nullintr++;
@@ -1180,7 +1175,7 @@ irqreturn_t ipath_intr(int irq, void *data)
1180 * for receive are at the bottom. 1175 * for receive are at the bottom.
1181 */ 1176 */
1182 if (chk0rcv) { 1177 if (chk0rcv) {
1183 ipath_kreceive(dd); 1178 ipath_kreceive(dd->ipath_pd[0]);
1184 istat &= ~port0rbits; 1179 istat &= ~port0rbits;
1185 } 1180 }
1186 1181
@@ -1191,12 +1186,14 @@ irqreturn_t ipath_intr(int irq, void *data)
1191 handle_urcv(dd, istat); 1186 handle_urcv(dd, istat);
1192 1187
1193 if (istat & INFINIPATH_I_SPIOBUFAVAIL) { 1188 if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
1194 clear_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); 1189 unsigned long flags;
1190
1191 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
1192 dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
1195 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 1193 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
1196 dd->ipath_sendctrl); 1194 dd->ipath_sendctrl);
1197 1195 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
1198 if (dd->ipath_portpiowait) 1196 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
1199 handle_port_pioavail(dd);
1200 1197
1201 handle_layer_pioavail(dd); 1198 handle_layer_pioavail(dd);
1202 } 1199 }
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h
index 8786dd7922e4..4cc0f95ea877 100644
--- a/drivers/infiniband/hw/ipath/ipath_kernel.h
+++ b/drivers/infiniband/hw/ipath/ipath_kernel.h
@@ -41,6 +41,7 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/pci.h> 42#include <linux/pci.h>
43#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
44#include <linux/mutex.h>
44#include <asm/io.h> 45#include <asm/io.h>
45#include <rdma/ib_verbs.h> 46#include <rdma/ib_verbs.h>
46 47
@@ -140,6 +141,11 @@ struct ipath_portdata {
140 u32 port_pionowait; 141 u32 port_pionowait;
141 /* total number of rcvhdrqfull errors */ 142 /* total number of rcvhdrqfull errors */
142 u32 port_hdrqfull; 143 u32 port_hdrqfull;
144 /*
145 * Used to suppress multiple instances of same
146 * port staying stuck at same point.
147 */
148 u32 port_lastrcvhdrqtail;
143 /* saved total number of rcvhdrqfull errors for poll edge trigger */ 149 /* saved total number of rcvhdrqfull errors for poll edge trigger */
144 u32 port_hdrqfull_poll; 150 u32 port_hdrqfull_poll;
145 /* total number of polled urgent packets */ 151 /* total number of polled urgent packets */
@@ -148,6 +154,7 @@ struct ipath_portdata {
148 u32 port_urgent_poll; 154 u32 port_urgent_poll;
149 /* pid of process using this port */ 155 /* pid of process using this port */
150 pid_t port_pid; 156 pid_t port_pid;
157 pid_t port_subpid[INFINIPATH_MAX_SUBPORT];
151 /* same size as task_struct .comm[] */ 158 /* same size as task_struct .comm[] */
152 char port_comm[16]; 159 char port_comm[16];
153 /* pkeys set by this use of this port */ 160 /* pkeys set by this use of this port */
@@ -166,6 +173,8 @@ struct ipath_portdata {
166 u32 active_slaves; 173 u32 active_slaves;
167 /* Type of packets or conditions we want to poll for */ 174 /* Type of packets or conditions we want to poll for */
168 u16 poll_type; 175 u16 poll_type;
176 /* port rcvhdrq head offset */
177 u32 port_head;
169}; 178};
170 179
171struct sk_buff; 180struct sk_buff;
@@ -182,6 +191,22 @@ struct ipath_skbinfo {
182 dma_addr_t phys; 191 dma_addr_t phys;
183}; 192};
184 193
194/*
195 * Possible IB config parameters for ipath_f_get/set_ib_cfg()
196 */
197#define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
198#define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
199#define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
200#define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
201#define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
202#define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
203#define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
204#define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
205#define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
206#define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
207#define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
208
209
185struct ipath_devdata { 210struct ipath_devdata {
186 struct list_head ipath_list; 211 struct list_head ipath_list;
187 212
@@ -222,6 +247,8 @@ struct ipath_devdata {
222 struct _ipath_layer ipath_layer; 247 struct _ipath_layer ipath_layer;
223 /* setup intr */ 248 /* setup intr */
224 int (*ipath_f_intrsetup)(struct ipath_devdata *); 249 int (*ipath_f_intrsetup)(struct ipath_devdata *);
250 /* fallback to alternate interrupt type if possible */
251 int (*ipath_f_intr_fallback)(struct ipath_devdata *);
225 /* setup on-chip bus config */ 252 /* setup on-chip bus config */
226 int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *); 253 int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
227 /* hard reset chip */ 254 /* hard reset chip */
@@ -244,6 +271,18 @@ struct ipath_devdata {
244 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *); 271 int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
245 /* free irq */ 272 /* free irq */
246 void (*ipath_f_free_irq)(struct ipath_devdata *); 273 void (*ipath_f_free_irq)(struct ipath_devdata *);
274 struct ipath_message_header *(*ipath_f_get_msgheader)
275 (struct ipath_devdata *, __le32 *);
276 void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
277 int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
278 int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
279 void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
280 void (*ipath_f_read_counters)(struct ipath_devdata *,
281 struct infinipath_counters *);
282 void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
283 /* per chip actions needed for IB Link up/down changes */
284 int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
285
247 struct ipath_ibdev *verbs_dev; 286 struct ipath_ibdev *verbs_dev;
248 struct timer_list verbs_timer; 287 struct timer_list verbs_timer;
249 /* total dwords sent (summed from counter) */ 288 /* total dwords sent (summed from counter) */
@@ -313,22 +352,12 @@ struct ipath_devdata {
313 * supports, less gives more pio bufs/port, etc. 352 * supports, less gives more pio bufs/port, etc.
314 */ 353 */
315 u32 ipath_cfgports; 354 u32 ipath_cfgports;
316 /* port0 rcvhdrq head offset */
317 u32 ipath_port0head;
318 /* count of port 0 hdrqfull errors */ 355 /* count of port 0 hdrqfull errors */
319 u32 ipath_p0_hdrqfull; 356 u32 ipath_p0_hdrqfull;
357 /* port 0 number of receive eager buffers */
358 u32 ipath_p0_rcvegrcnt;
320 359
321 /* 360 /*
322 * (*cfgports) used to suppress multiple instances of same
323 * port staying stuck at same point
324 */
325 u32 *ipath_lastrcvhdrqtails;
326 /*
327 * (*cfgports) used to suppress multiple instances of same
328 * port staying stuck at same point
329 */
330 u32 *ipath_lastegrheads;
331 /*
332 * index of last piobuffer we used. Speeds up searching, by 361 * index of last piobuffer we used. Speeds up searching, by
333 * starting at this point. Doesn't matter if multiple cpu's use and 362 * starting at this point. Doesn't matter if multiple cpu's use and
334 * update, last updater is only write that matters. Whenever it 363 * update, last updater is only write that matters. Whenever it
@@ -367,14 +396,15 @@ struct ipath_devdata {
367 unsigned long ipath_wc_len; 396 unsigned long ipath_wc_len;
368 /* ref count for each pkey */ 397 /* ref count for each pkey */
369 atomic_t ipath_pkeyrefs[4]; 398 atomic_t ipath_pkeyrefs[4];
370 /* shadow copy of all exptids physaddr; used only by funcsim */
371 u64 *ipath_tidsimshadow;
372 /* shadow copy of struct page *'s for exp tid pages */ 399 /* shadow copy of struct page *'s for exp tid pages */
373 struct page **ipath_pageshadow; 400 struct page **ipath_pageshadow;
374 /* shadow copy of dma handles for exp tid pages */ 401 /* shadow copy of dma handles for exp tid pages */
375 dma_addr_t *ipath_physshadow; 402 dma_addr_t *ipath_physshadow;
376 /* lock to workaround chip bug 9437 */ 403 u64 __iomem *ipath_egrtidbase;
404 /* lock to workaround chip bug 9437 and others */
405 spinlock_t ipath_kernel_tid_lock;
377 spinlock_t ipath_tid_lock; 406 spinlock_t ipath_tid_lock;
407 spinlock_t ipath_sendctrl_lock;
378 408
379 /* 409 /*
380 * IPATH_STATUS_*, 410 * IPATH_STATUS_*,
@@ -395,6 +425,8 @@ struct ipath_devdata {
395 void *ipath_dummy_hdrq; /* used after port close */ 425 void *ipath_dummy_hdrq; /* used after port close */
396 dma_addr_t ipath_dummy_hdrq_phys; 426 dma_addr_t ipath_dummy_hdrq_phys;
397 427
428 unsigned long ipath_ureg_align; /* user register alignment */
429
398 /* 430 /*
399 * Shadow copies of registers; size indicates read access size. 431 * Shadow copies of registers; size indicates read access size.
400 * Most of them are readonly, but some are write-only register, 432 * Most of them are readonly, but some are write-only register,
@@ -456,8 +488,6 @@ struct ipath_devdata {
456 unsigned long ipath_rcvctrl; 488 unsigned long ipath_rcvctrl;
457 /* shadow kr_sendctrl */ 489 /* shadow kr_sendctrl */
458 unsigned long ipath_sendctrl; 490 unsigned long ipath_sendctrl;
459 /* ports waiting for PIOavail intr */
460 unsigned long ipath_portpiowait;
461 unsigned long ipath_lastcancel; /* to not count armlaunch after cancel */ 491 unsigned long ipath_lastcancel; /* to not count armlaunch after cancel */
462 492
463 /* value we put in kr_rcvhdrcnt */ 493 /* value we put in kr_rcvhdrcnt */
@@ -550,12 +580,26 @@ struct ipath_devdata {
550 u8 ipath_minrev; 580 u8 ipath_minrev;
551 /* board rev, from ipath_revision */ 581 /* board rev, from ipath_revision */
552 u8 ipath_boardrev; 582 u8 ipath_boardrev;
583
584 u8 ipath_r_portenable_shift;
585 u8 ipath_r_intravail_shift;
586 u8 ipath_r_tailupd_shift;
587 u8 ipath_r_portcfg_shift;
588
553 /* unit # of this chip, if present */ 589 /* unit # of this chip, if present */
554 int ipath_unit; 590 int ipath_unit;
555 /* saved for restore after reset */ 591 /* saved for restore after reset */
556 u8 ipath_pci_cacheline; 592 u8 ipath_pci_cacheline;
557 /* LID mask control */ 593 /* LID mask control */
558 u8 ipath_lmc; 594 u8 ipath_lmc;
595 /* link width supported */
596 u8 ipath_link_width_supported;
597 /* link speed supported */
598 u8 ipath_link_speed_supported;
599 u8 ipath_link_width_enabled;
600 u8 ipath_link_speed_enabled;
601 u8 ipath_link_width_active;
602 u8 ipath_link_speed_active;
559 /* Rx Polarity inversion (compensate for ~tx on partner) */ 603 /* Rx Polarity inversion (compensate for ~tx on partner) */
560 u8 ipath_rx_pol_inv; 604 u8 ipath_rx_pol_inv;
561 605
@@ -590,6 +634,8 @@ struct ipath_devdata {
590 */ 634 */
591 u32 ipath_i_rcvavail_mask; 635 u32 ipath_i_rcvavail_mask;
592 u32 ipath_i_rcvurg_mask; 636 u32 ipath_i_rcvurg_mask;
637 u16 ipath_i_rcvurg_shift;
638 u16 ipath_i_rcvavail_shift;
593 639
594 /* 640 /*
595 * Register bits for selecting i2c direction and values, used for 641 * Register bits for selecting i2c direction and values, used for
@@ -603,6 +649,29 @@ struct ipath_devdata {
603 /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */ 649 /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
604 spinlock_t ipath_gpio_lock; 650 spinlock_t ipath_gpio_lock;
605 651
652 /*
653 * IB link and linktraining states and masks that vary per chip in
654 * some way. Set at init, to avoid each IB status change interrupt
655 */
656 u8 ibcs_ls_shift;
657 u8 ibcs_lts_mask;
658 u32 ibcs_mask;
659 u32 ib_init;
660 u32 ib_arm;
661 u32 ib_active;
662
663 u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
664
665 /*
666 * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
667 * reg. Changes for IBA7220
668 */
669 u8 ibcc_lic_mask; /* LinkInitCmd */
670 u8 ibcc_lc_shift; /* LinkCmd */
671 u8 ibcc_mpl_shift; /* Maxpktlen */
672
673 u8 delay_mult;
674
606 /* used to override LED behavior */ 675 /* used to override LED behavior */
607 u8 ipath_led_override; /* Substituted for normal value, if non-zero */ 676 u8 ipath_led_override; /* Substituted for normal value, if non-zero */
608 u16 ipath_led_override_timeoff; /* delta to next timer event */ 677 u16 ipath_led_override_timeoff; /* delta to next timer event */
@@ -616,7 +685,7 @@ struct ipath_devdata {
616 /* control access to actual counters, timer */ 685 /* control access to actual counters, timer */
617 spinlock_t ipath_eep_st_lock; 686 spinlock_t ipath_eep_st_lock;
618 /* control high-level access to EEPROM */ 687 /* control high-level access to EEPROM */
619 struct semaphore ipath_eep_sem; 688 struct mutex ipath_eep_lock;
620 /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */ 689 /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
621 uint64_t ipath_traffic_wds; 690 uint64_t ipath_traffic_wds;
622 /* active time is kept in seconds, but logged in hours */ 691 /* active time is kept in seconds, but logged in hours */
@@ -630,6 +699,10 @@ struct ipath_devdata {
630 * each of the counters to increment. 699 * each of the counters to increment.
631 */ 700 */
632 struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT]; 701 struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
702
703 /* interrupt mitigation reload register info */
704 u16 ipath_jint_idle_ticks; /* idle clock ticks */
705 u16 ipath_jint_max_packets; /* max packets across all ports */
633}; 706};
634 707
635/* Private data for file operations */ 708/* Private data for file operations */
@@ -690,7 +763,7 @@ void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
690 763
691int ipath_parse_ushort(const char *str, unsigned short *valp); 764int ipath_parse_ushort(const char *str, unsigned short *valp);
692 765
693void ipath_kreceive(struct ipath_devdata *); 766void ipath_kreceive(struct ipath_portdata *);
694int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned); 767int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
695int ipath_reset_device(int); 768int ipath_reset_device(int);
696void ipath_get_faststats(unsigned long); 769void ipath_get_faststats(unsigned long);
@@ -698,6 +771,8 @@ int ipath_set_linkstate(struct ipath_devdata *, u8);
698int ipath_set_mtu(struct ipath_devdata *, u16); 771int ipath_set_mtu(struct ipath_devdata *, u16);
699int ipath_set_lid(struct ipath_devdata *, u32, u8); 772int ipath_set_lid(struct ipath_devdata *, u32, u8);
700int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv); 773int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
774void ipath_enable_armlaunch(struct ipath_devdata *);
775void ipath_disable_armlaunch(struct ipath_devdata *);
701 776
702/* for use in system calls, where we want to know device type, etc. */ 777/* for use in system calls, where we want to know device type, etc. */
703#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd 778#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
@@ -744,9 +819,15 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
744 * are 64bit */ 819 * are 64bit */
745#define IPATH_32BITCOUNTERS 0x20000 820#define IPATH_32BITCOUNTERS 0x20000
746 /* can miss port0 rx interrupts */ 821 /* can miss port0 rx interrupts */
822 /* Interrupt register is 64 bits */
823#define IPATH_INTREG_64 0x40000
747#define IPATH_DISABLED 0x80000 /* administratively disabled */ 824#define IPATH_DISABLED 0x80000 /* administratively disabled */
748 /* Use GPIO interrupts for new counters */ 825 /* Use GPIO interrupts for new counters */
749#define IPATH_GPIO_ERRINTRS 0x100000 826#define IPATH_GPIO_ERRINTRS 0x100000
827#define IPATH_SWAP_PIOBUFS 0x200000
828 /* Suppress heartbeat, even if turning off loopback */
829#define IPATH_NO_HRTBT 0x1000000
830#define IPATH_HAS_MULT_IB_SPEED 0x8000000
750 831
751/* Bits in GPIO for the added interrupts */ 832/* Bits in GPIO for the added interrupts */
752#define IPATH_GPIO_PORT0_BIT 2 833#define IPATH_GPIO_PORT0_BIT 2
@@ -758,8 +839,6 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
758/* portdata flag bit offsets */ 839/* portdata flag bit offsets */
759 /* waiting for a packet to arrive */ 840 /* waiting for a packet to arrive */
760#define IPATH_PORT_WAITING_RCV 2 841#define IPATH_PORT_WAITING_RCV 2
761 /* waiting for a PIO buffer to be available */
762#define IPATH_PORT_WAITING_PIO 3
763 /* master has not finished initializing */ 842 /* master has not finished initializing */
764#define IPATH_PORT_MASTER_UNINIT 4 843#define IPATH_PORT_MASTER_UNINIT 4
765 /* waiting for an urgent packet to arrive */ 844 /* waiting for an urgent packet to arrive */
@@ -767,8 +846,6 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
767 846
768/* free up any allocated data at closes */ 847/* free up any allocated data at closes */
769void ipath_free_data(struct ipath_portdata *dd); 848void ipath_free_data(struct ipath_portdata *dd);
770int ipath_waitfor_mdio_cmdready(struct ipath_devdata *);
771int ipath_waitfor_complete(struct ipath_devdata *, ipath_kreg, u64, u64 *);
772u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *); 849u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32 *);
773void ipath_init_iba6120_funcs(struct ipath_devdata *); 850void ipath_init_iba6120_funcs(struct ipath_devdata *);
774void ipath_init_iba6110_funcs(struct ipath_devdata *); 851void ipath_init_iba6110_funcs(struct ipath_devdata *);
@@ -792,33 +869,6 @@ void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
792 */ 869 */
793#define IPATH_DFLT_RCVHDRSIZE 9 870#define IPATH_DFLT_RCVHDRSIZE 9
794 871
795#define IPATH_MDIO_CMD_WRITE 1
796#define IPATH_MDIO_CMD_READ 2
797#define IPATH_MDIO_CLD_DIV 25 /* to get 2.5 Mhz mdio clock */
798#define IPATH_MDIO_CMDVALID 0x40000000 /* bit 30 */
799#define IPATH_MDIO_DATAVALID 0x80000000 /* bit 31 */
800#define IPATH_MDIO_CTRL_STD 0x0
801
802static inline u64 ipath_mdio_req(int cmd, int dev, int reg, int data)
803{
804 return (((u64) IPATH_MDIO_CLD_DIV) << 32) |
805 (cmd << 26) |
806 (dev << 21) |
807 (reg << 16) |
808 (data & 0xFFFF);
809}
810
811 /* signal and fifo status, in bank 31 */
812#define IPATH_MDIO_CTRL_XGXS_REG_8 0x8
813 /* controls loopback, redundancy */
814#define IPATH_MDIO_CTRL_8355_REG_1 0x10
815 /* premph, encdec, etc. */
816#define IPATH_MDIO_CTRL_8355_REG_2 0x11
817 /* Kchars, etc. */
818#define IPATH_MDIO_CTRL_8355_REG_6 0x15
819#define IPATH_MDIO_CTRL_8355_REG_9 0x18
820#define IPATH_MDIO_CTRL_8355_REG_10 0x1D
821
822int ipath_get_user_pages(unsigned long, size_t, struct page **); 872int ipath_get_user_pages(unsigned long, size_t, struct page **);
823void ipath_release_user_pages(struct page **, size_t); 873void ipath_release_user_pages(struct page **, size_t);
824void ipath_release_user_pages_on_close(struct page **, size_t); 874void ipath_release_user_pages_on_close(struct page **, size_t);
@@ -863,7 +913,7 @@ static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
863 return readl(regno + (u64 __iomem *) 913 return readl(regno + (u64 __iomem *)
864 (dd->ipath_uregbase + 914 (dd->ipath_uregbase +
865 (char __iomem *)dd->ipath_kregbase + 915 (char __iomem *)dd->ipath_kregbase +
866 dd->ipath_palign * port)); 916 dd->ipath_ureg_align * port));
867} 917}
868 918
869/** 919/**
@@ -880,7 +930,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd,
880{ 930{
881 u64 __iomem *ubase = (u64 __iomem *) 931 u64 __iomem *ubase = (u64 __iomem *)
882 (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase + 932 (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
883 dd->ipath_palign * port); 933 dd->ipath_ureg_align * port);
884 if (dd->ipath_kregbase) 934 if (dd->ipath_kregbase)
885 writeq(value, &ubase[regno]); 935 writeq(value, &ubase[regno]);
886} 936}
@@ -930,6 +980,53 @@ static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
930 (char __iomem *)dd->ipath_kregbase)); 980 (char __iomem *)dd->ipath_kregbase));
931} 981}
932 982
983static inline void ipath_write_creg(const struct ipath_devdata *dd,
984 ipath_creg regno, u64 value)
985{
986 if (dd->ipath_kregbase)
987 writeq(value, regno + (u64 __iomem *)
988 (dd->ipath_cregbase +
989 (char __iomem *)dd->ipath_kregbase));
990}
991
992static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
993{
994 *((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
995}
996
997static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
998{
999 return (u32) le64_to_cpu(*((volatile __le64 *)
1000 pd->port_rcvhdrtail_kvaddr));
1001}
1002
1003static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
1004{
1005 return (dd->ipath_flags & IPATH_INTREG_64) ?
1006 ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
1007}
1008
1009/*
1010 * from contents of IBCStatus (or a saved copy), return linkstate
1011 * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
1012 * everywhere, anyway (and should be, for almost all purposes).
1013 */
1014static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
1015{
1016 u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
1017 INFINIPATH_IBCS_LINKSTATE_MASK;
1018 if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
1019 state = INFINIPATH_IBCS_L_STATE_ACTIVE;
1020 return state;
1021}
1022
1023/* from contents of IBCStatus (or a saved copy), return linktrainingstate */
1024static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
1025{
1026 return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
1027 dd->ibcs_lts_mask;
1028}
1029
933/* 1030/*
934 * sysfs interface. 1031 * sysfs interface.
935 */ 1032 */
@@ -938,8 +1035,7 @@ struct device_driver;
938 1035
939extern const char ib_ipath_version[]; 1036extern const char ib_ipath_version[];
940 1037
941int ipath_driver_create_group(struct device_driver *); 1038extern struct attribute_group *ipath_driver_attr_groups[];
942void ipath_driver_remove_group(struct device_driver *);
943 1039
944int ipath_device_create_group(struct device *, struct ipath_devdata *); 1040int ipath_device_create_group(struct device *, struct ipath_devdata *);
945void ipath_device_remove_group(struct device *, struct ipath_devdata *); 1041void ipath_device_remove_group(struct device *, struct ipath_devdata *);
diff --git a/drivers/infiniband/hw/ipath/ipath_keys.c b/drivers/infiniband/hw/ipath/ipath_keys.c
index 85a4aefc6c03..8f32b17a5eed 100644
--- a/drivers/infiniband/hw/ipath/ipath_keys.c
+++ b/drivers/infiniband/hw/ipath/ipath_keys.c
@@ -128,9 +128,8 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
128 int ret; 128 int ret;
129 129
130 /* 130 /*
131 * We use LKEY == zero to mean a physical kmalloc() address. 131 * We use LKEY == zero for kernel virtual addresses
132 * This is a bit of a hack since we rely on dma_map_single() 132 * (see ipath_get_dma_mr and ipath_dma.c).
133 * being reversible by calling bus_to_virt().
134 */ 133 */
135 if (sge->lkey == 0) { 134 if (sge->lkey == 0) {
136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); 135 struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 3d1432d1e3f4..d98d5f103700 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -934,6 +934,7 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
934 struct ib_pma_portsamplescontrol *p = 934 struct ib_pma_portsamplescontrol *p =
935 (struct ib_pma_portsamplescontrol *)pmp->data; 935 (struct ib_pma_portsamplescontrol *)pmp->data;
936 struct ipath_ibdev *dev = to_idev(ibdev); 936 struct ipath_ibdev *dev = to_idev(ibdev);
937 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
937 unsigned long flags; 938 unsigned long flags;
938 u8 port_select = p->port_select; 939 u8 port_select = p->port_select;
939 940
@@ -955,7 +956,10 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
955 p->counter_width = 4; /* 32 bit counters */ 956 p->counter_width = 4; /* 32 bit counters */
956 p->counter_mask0_9 = COUNTER_MASK0_9; 957 p->counter_mask0_9 = COUNTER_MASK0_9;
957 spin_lock_irqsave(&dev->pending_lock, flags); 958 spin_lock_irqsave(&dev->pending_lock, flags);
958 p->sample_status = dev->pma_sample_status; 959 if (crp->cr_psstat)
960 p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
961 else
962 p->sample_status = dev->pma_sample_status;
959 p->sample_start = cpu_to_be32(dev->pma_sample_start); 963 p->sample_start = cpu_to_be32(dev->pma_sample_start);
960 p->sample_interval = cpu_to_be32(dev->pma_sample_interval); 964 p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
961 p->tag = cpu_to_be16(dev->pma_tag); 965 p->tag = cpu_to_be16(dev->pma_tag);
@@ -975,8 +979,9 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
975 struct ib_pma_portsamplescontrol *p = 979 struct ib_pma_portsamplescontrol *p =
976 (struct ib_pma_portsamplescontrol *)pmp->data; 980 (struct ib_pma_portsamplescontrol *)pmp->data;
977 struct ipath_ibdev *dev = to_idev(ibdev); 981 struct ipath_ibdev *dev = to_idev(ibdev);
982 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
978 unsigned long flags; 983 unsigned long flags;
979 u32 start; 984 u8 status;
980 int ret; 985 int ret;
981 986
982 if (pmp->attr_mod != 0 || 987 if (pmp->attr_mod != 0 ||
@@ -986,59 +991,67 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
986 goto bail; 991 goto bail;
987 } 992 }
988 993
989 start = be32_to_cpu(p->sample_start); 994 spin_lock_irqsave(&dev->pending_lock, flags);
990 if (start != 0) { 995 if (crp->cr_psstat)
991 spin_lock_irqsave(&dev->pending_lock, flags); 996 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
992 if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_DONE) { 997 else
993 dev->pma_sample_status = 998 status = dev->pma_sample_status;
994 IB_PMA_SAMPLE_STATUS_STARTED; 999 if (status == IB_PMA_SAMPLE_STATUS_DONE) {
995 dev->pma_sample_start = start; 1000 dev->pma_sample_start = be32_to_cpu(p->sample_start);
996 dev->pma_sample_interval = 1001 dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
997 be32_to_cpu(p->sample_interval); 1002 dev->pma_tag = be16_to_cpu(p->tag);
998 dev->pma_tag = be16_to_cpu(p->tag); 1003 dev->pma_counter_select[0] = p->counter_select[0];
999 if (p->counter_select[0]) 1004 dev->pma_counter_select[1] = p->counter_select[1];
1000 dev->pma_counter_select[0] = 1005 dev->pma_counter_select[2] = p->counter_select[2];
1001 p->counter_select[0]; 1006 dev->pma_counter_select[3] = p->counter_select[3];
1002 if (p->counter_select[1]) 1007 dev->pma_counter_select[4] = p->counter_select[4];
1003 dev->pma_counter_select[1] = 1008 if (crp->cr_psstat) {
1004 p->counter_select[1]; 1009 ipath_write_creg(dev->dd, crp->cr_psinterval,
1005 if (p->counter_select[2]) 1010 dev->pma_sample_interval);
1006 dev->pma_counter_select[2] = 1011 ipath_write_creg(dev->dd, crp->cr_psstart,
1007 p->counter_select[2]; 1012 dev->pma_sample_start);
1008 if (p->counter_select[3]) 1013 } else
1009 dev->pma_counter_select[3] = 1014 dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
1010 p->counter_select[3];
1011 if (p->counter_select[4])
1012 dev->pma_counter_select[4] =
1013 p->counter_select[4];
1014 }
1015 spin_unlock_irqrestore(&dev->pending_lock, flags);
1016 } 1015 }
1016 spin_unlock_irqrestore(&dev->pending_lock, flags);
1017
1017 ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port); 1018 ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
1018 1019
1019bail: 1020bail:
1020 return ret; 1021 return ret;
1021} 1022}
1022 1023
1023static u64 get_counter(struct ipath_ibdev *dev, __be16 sel) 1024static u64 get_counter(struct ipath_ibdev *dev,
1025 struct ipath_cregs const *crp,
1026 __be16 sel)
1024{ 1027{
1025 u64 ret; 1028 u64 ret;
1026 1029
1027 switch (sel) { 1030 switch (sel) {
1028 case IB_PMA_PORT_XMIT_DATA: 1031 case IB_PMA_PORT_XMIT_DATA:
1029 ret = dev->ipath_sword; 1032 ret = (crp->cr_psxmitdatacount) ?
1033 ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
1034 dev->ipath_sword;
1030 break; 1035 break;
1031 case IB_PMA_PORT_RCV_DATA: 1036 case IB_PMA_PORT_RCV_DATA:
1032 ret = dev->ipath_rword; 1037 ret = (crp->cr_psrcvdatacount) ?
1038 ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
1039 dev->ipath_rword;
1033 break; 1040 break;
1034 case IB_PMA_PORT_XMIT_PKTS: 1041 case IB_PMA_PORT_XMIT_PKTS:
1035 ret = dev->ipath_spkts; 1042 ret = (crp->cr_psxmitpktscount) ?
1043 ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
1044 dev->ipath_spkts;
1036 break; 1045 break;
1037 case IB_PMA_PORT_RCV_PKTS: 1046 case IB_PMA_PORT_RCV_PKTS:
1038 ret = dev->ipath_rpkts; 1047 ret = (crp->cr_psrcvpktscount) ?
1048 ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
1049 dev->ipath_rpkts;
1039 break; 1050 break;
1040 case IB_PMA_PORT_XMIT_WAIT: 1051 case IB_PMA_PORT_XMIT_WAIT:
1041 ret = dev->ipath_xmit_wait; 1052 ret = (crp->cr_psxmitwaitcount) ?
1053 ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
1054 dev->ipath_xmit_wait;
1042 break; 1055 break;
1043 default: 1056 default:
1044 ret = 0; 1057 ret = 0;
@@ -1053,14 +1066,21 @@ static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
1053 struct ib_pma_portsamplesresult *p = 1066 struct ib_pma_portsamplesresult *p =
1054 (struct ib_pma_portsamplesresult *)pmp->data; 1067 (struct ib_pma_portsamplesresult *)pmp->data;
1055 struct ipath_ibdev *dev = to_idev(ibdev); 1068 struct ipath_ibdev *dev = to_idev(ibdev);
1069 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
1070 u8 status;
1056 int i; 1071 int i;
1057 1072
1058 memset(pmp->data, 0, sizeof(pmp->data)); 1073 memset(pmp->data, 0, sizeof(pmp->data));
1059 p->tag = cpu_to_be16(dev->pma_tag); 1074 p->tag = cpu_to_be16(dev->pma_tag);
1060 p->sample_status = cpu_to_be16(dev->pma_sample_status); 1075 if (crp->cr_psstat)
1076 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
1077 else
1078 status = dev->pma_sample_status;
1079 p->sample_status = cpu_to_be16(status);
1061 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) 1080 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
1062 p->counter[i] = cpu_to_be32( 1081 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
1063 get_counter(dev, dev->pma_counter_select[i])); 1082 cpu_to_be32(
1083 get_counter(dev, crp, dev->pma_counter_select[i]));
1064 1084
1065 return reply((struct ib_smp *) pmp); 1085 return reply((struct ib_smp *) pmp);
1066} 1086}
@@ -1071,16 +1091,23 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1071 struct ib_pma_portsamplesresult_ext *p = 1091 struct ib_pma_portsamplesresult_ext *p =
1072 (struct ib_pma_portsamplesresult_ext *)pmp->data; 1092 (struct ib_pma_portsamplesresult_ext *)pmp->data;
1073 struct ipath_ibdev *dev = to_idev(ibdev); 1093 struct ipath_ibdev *dev = to_idev(ibdev);
1094 struct ipath_cregs const *crp = dev->dd->ipath_cregs;
1095 u8 status;
1074 int i; 1096 int i;
1075 1097
1076 memset(pmp->data, 0, sizeof(pmp->data)); 1098 memset(pmp->data, 0, sizeof(pmp->data));
1077 p->tag = cpu_to_be16(dev->pma_tag); 1099 p->tag = cpu_to_be16(dev->pma_tag);
1078 p->sample_status = cpu_to_be16(dev->pma_sample_status); 1100 if (crp->cr_psstat)
1101 status = ipath_read_creg32(dev->dd, crp->cr_psstat);
1102 else
1103 status = dev->pma_sample_status;
1104 p->sample_status = cpu_to_be16(status);
1079 /* 64 bits */ 1105 /* 64 bits */
1080 p->extended_width = __constant_cpu_to_be32(0x80000000); 1106 p->extended_width = __constant_cpu_to_be32(0x80000000);
1081 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++) 1107 for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
1082 p->counter[i] = cpu_to_be64( 1108 p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
1083 get_counter(dev, dev->pma_counter_select[i])); 1109 cpu_to_be64(
1110 get_counter(dev, crp, dev->pma_counter_select[i]));
1084 1111
1085 return reply((struct ib_smp *) pmp); 1112 return reply((struct ib_smp *) pmp);
1086} 1113}
@@ -1113,6 +1140,8 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1113 dev->z_local_link_integrity_errors; 1140 dev->z_local_link_integrity_errors;
1114 cntrs.excessive_buffer_overrun_errors -= 1141 cntrs.excessive_buffer_overrun_errors -=
1115 dev->z_excessive_buffer_overrun_errors; 1142 dev->z_excessive_buffer_overrun_errors;
1143 cntrs.vl15_dropped -= dev->z_vl15_dropped;
1144 cntrs.vl15_dropped += dev->n_vl15_dropped;
1116 1145
1117 memset(pmp->data, 0, sizeof(pmp->data)); 1146 memset(pmp->data, 0, sizeof(pmp->data));
1118 1147
@@ -1156,10 +1185,10 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1156 cntrs.excessive_buffer_overrun_errors = 0xFUL; 1185 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1157 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | 1186 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
1158 cntrs.excessive_buffer_overrun_errors; 1187 cntrs.excessive_buffer_overrun_errors;
1159 if (dev->n_vl15_dropped > 0xFFFFUL) 1188 if (cntrs.vl15_dropped > 0xFFFFUL)
1160 p->vl15_dropped = __constant_cpu_to_be16(0xFFFF); 1189 p->vl15_dropped = __constant_cpu_to_be16(0xFFFF);
1161 else 1190 else
1162 p->vl15_dropped = cpu_to_be16((u16)dev->n_vl15_dropped); 1191 p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
1163 if (cntrs.port_xmit_data > 0xFFFFFFFFUL) 1192 if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
1164 p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF); 1193 p->port_xmit_data = __constant_cpu_to_be32(0xFFFFFFFF);
1165 else 1194 else
@@ -1262,8 +1291,10 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
1262 dev->z_excessive_buffer_overrun_errors = 1291 dev->z_excessive_buffer_overrun_errors =
1263 cntrs.excessive_buffer_overrun_errors; 1292 cntrs.excessive_buffer_overrun_errors;
1264 1293
1265 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) 1294 if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
1266 dev->n_vl15_dropped = 0; 1295 dev->n_vl15_dropped = 0;
1296 dev->z_vl15_dropped = cntrs.vl15_dropped;
1297 }
1267 1298
1268 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) 1299 if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
1269 dev->z_port_xmit_data = cntrs.port_xmit_data; 1300 dev->z_port_xmit_data = cntrs.port_xmit_data;
@@ -1434,7 +1465,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
1434 * before checking for other consumers. 1465 * before checking for other consumers.
1435 * Just tell the caller to process it normally. 1466 * Just tell the caller to process it normally.
1436 */ 1467 */
1437 ret = IB_MAD_RESULT_FAILURE; 1468 ret = IB_MAD_RESULT_SUCCESS;
1438 goto bail; 1469 goto bail;
1439 default: 1470 default:
1440 smp->status |= IB_SMP_UNSUP_METHOD; 1471 smp->status |= IB_SMP_UNSUP_METHOD;
@@ -1516,7 +1547,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
1516 * before checking for other consumers. 1547 * before checking for other consumers.
1517 * Just tell the caller to process it normally. 1548 * Just tell the caller to process it normally.
1518 */ 1549 */
1519 ret = IB_MAD_RESULT_FAILURE; 1550 ret = IB_MAD_RESULT_SUCCESS;
1520 goto bail; 1551 goto bail;
1521 default: 1552 default:
1522 pmp->status |= IB_SMP_UNSUP_METHOD; 1553 pmp->status |= IB_SMP_UNSUP_METHOD;
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index b997ff88401b..80dc623cee40 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -387,8 +387,8 @@ int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
387 struct ib_wc wc; 387 struct ib_wc wc;
388 int ret = 0; 388 int ret = 0;
389 389
390 ipath_dbg("QP%d/%d in error state\n", 390 ipath_dbg("QP%d/%d in error state (%d)\n",
391 qp->ibqp.qp_num, qp->remote_qpn); 391 qp->ibqp.qp_num, qp->remote_qpn, err);
392 392
393 spin_lock(&dev->pending_lock); 393 spin_lock(&dev->pending_lock);
394 /* XXX What if its already removed by the timeout code? */ 394 /* XXX What if its already removed by the timeout code? */
@@ -855,8 +855,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
855 * See ipath_mmap() for details. 855 * See ipath_mmap() for details.
856 */ 856 */
857 if (udata && udata->outlen >= sizeof(__u64)) { 857 if (udata && udata->outlen >= sizeof(__u64)) {
858 int err;
859
860 if (!qp->r_rq.wq) { 858 if (!qp->r_rq.wq) {
861 __u64 offset = 0; 859 __u64 offset = 0;
862 860
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 120a61b03bc4..459e46e2c016 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -647,6 +647,7 @@ static void send_rc_ack(struct ipath_qp *qp)
647 647
648queue_ack: 648queue_ack:
649 spin_lock_irqsave(&qp->s_lock, flags); 649 spin_lock_irqsave(&qp->s_lock, flags);
650 dev->n_rc_qacks++;
650 qp->s_flags |= IPATH_S_ACK_PENDING; 651 qp->s_flags |= IPATH_S_ACK_PENDING;
651 qp->s_nak_state = qp->r_nak_state; 652 qp->s_nak_state = qp->r_nak_state;
652 qp->s_ack_psn = qp->r_ack_psn; 653 qp->s_ack_psn = qp->r_ack_psn;
@@ -798,11 +799,13 @@ bail:
798 799
799static inline void update_last_psn(struct ipath_qp *qp, u32 psn) 800static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
800{ 801{
801 if (qp->s_wait_credit) { 802 if (qp->s_last_psn != psn) {
802 qp->s_wait_credit = 0; 803 qp->s_last_psn = psn;
803 tasklet_hi_schedule(&qp->s_task); 804 if (qp->s_wait_credit) {
805 qp->s_wait_credit = 0;
806 tasklet_hi_schedule(&qp->s_task);
807 }
804 } 808 }
805 qp->s_last_psn = psn;
806} 809}
807 810
808/** 811/**
@@ -1653,13 +1656,6 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1653 case OP(SEND_FIRST): 1656 case OP(SEND_FIRST):
1654 if (!ipath_get_rwqe(qp, 0)) { 1657 if (!ipath_get_rwqe(qp, 0)) {
1655 rnr_nak: 1658 rnr_nak:
1656 /*
1657 * A RNR NAK will ACK earlier sends and RDMA writes.
1658 * Don't queue the NAK if a RDMA read or atomic
1659 * is pending though.
1660 */
1661 if (qp->r_nak_state)
1662 goto done;
1663 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; 1659 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
1664 qp->r_ack_psn = qp->r_psn; 1660 qp->r_ack_psn = qp->r_psn;
1665 goto send_ack; 1661 goto send_ack;
diff --git a/drivers/infiniband/hw/ipath/ipath_registers.h b/drivers/infiniband/hw/ipath/ipath_registers.h
index 708eba3165d7..6d2a17f9c1da 100644
--- a/drivers/infiniband/hw/ipath/ipath_registers.h
+++ b/drivers/infiniband/hw/ipath/ipath_registers.h
@@ -82,8 +82,7 @@
82 82
83/* kr_rcvctrl bits */ 83/* kr_rcvctrl bits */
84#define INFINIPATH_R_PORTENABLE_SHIFT 0 84#define INFINIPATH_R_PORTENABLE_SHIFT 0
85#define INFINIPATH_R_INTRAVAIL_SHIFT 16 85#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
86#define INFINIPATH_R_TAILUPD 0x80000000
87 86
88/* kr_intstatus, kr_intclear, kr_intmask bits */ 87/* kr_intstatus, kr_intclear, kr_intmask bits */
89#define INFINIPATH_I_RCVURG_SHIFT 0 88#define INFINIPATH_I_RCVURG_SHIFT 0
@@ -272,20 +271,6 @@
272#define INFINIPATH_EXTC_LEDGBLOK_ON 0x00000002ULL 271#define INFINIPATH_EXTC_LEDGBLOK_ON 0x00000002ULL
273#define INFINIPATH_EXTC_LEDGBLERR_OFF 0x00000001ULL 272#define INFINIPATH_EXTC_LEDGBLERR_OFF 0x00000001ULL
274 273
275/* kr_mdio bits */
276#define INFINIPATH_MDIO_CLKDIV_MASK 0x7FULL
277#define INFINIPATH_MDIO_CLKDIV_SHIFT 32
278#define INFINIPATH_MDIO_COMMAND_MASK 0x7ULL
279#define INFINIPATH_MDIO_COMMAND_SHIFT 26
280#define INFINIPATH_MDIO_DEVADDR_MASK 0x1FULL
281#define INFINIPATH_MDIO_DEVADDR_SHIFT 21
282#define INFINIPATH_MDIO_REGADDR_MASK 0x1FULL
283#define INFINIPATH_MDIO_REGADDR_SHIFT 16
284#define INFINIPATH_MDIO_DATA_MASK 0xFFFFULL
285#define INFINIPATH_MDIO_DATA_SHIFT 0
286#define INFINIPATH_MDIO_CMDVALID 0x0000000040000000ULL
287#define INFINIPATH_MDIO_RDDATAVALID 0x0000000080000000ULL
288
289/* kr_partitionkey bits */ 274/* kr_partitionkey bits */
290#define INFINIPATH_PKEY_SIZE 16 275#define INFINIPATH_PKEY_SIZE 16
291#define INFINIPATH_PKEY_MASK 0xFFFF 276#define INFINIPATH_PKEY_MASK 0xFFFF
@@ -303,8 +288,6 @@
303 288
304/* kr_xgxsconfig bits */ 289/* kr_xgxsconfig bits */
305#define INFINIPATH_XGXS_RESET 0x7ULL 290#define INFINIPATH_XGXS_RESET 0x7ULL
306#define INFINIPATH_XGXS_MDIOADDR_MASK 0xfULL
307#define INFINIPATH_XGXS_MDIOADDR_SHIFT 4
308#define INFINIPATH_XGXS_RX_POL_SHIFT 19 291#define INFINIPATH_XGXS_RX_POL_SHIFT 19
309#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL 292#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
310 293
@@ -470,6 +453,20 @@ struct ipath_cregs {
470 ipath_creg cr_unsupvlcnt; 453 ipath_creg cr_unsupvlcnt;
471 ipath_creg cr_wordrcvcnt; 454 ipath_creg cr_wordrcvcnt;
472 ipath_creg cr_wordsendcnt; 455 ipath_creg cr_wordsendcnt;
456 ipath_creg cr_vl15droppedpktcnt;
457 ipath_creg cr_rxotherlocalphyerrcnt;
458 ipath_creg cr_excessbufferovflcnt;
459 ipath_creg cr_locallinkintegrityerrcnt;
460 ipath_creg cr_rxvlerrcnt;
461 ipath_creg cr_rxdlidfltrcnt;
462 ipath_creg cr_psstat;
463 ipath_creg cr_psstart;
464 ipath_creg cr_psinterval;
465 ipath_creg cr_psrcvdatacount;
466 ipath_creg cr_psrcvpktscount;
467 ipath_creg cr_psxmitdatacount;
468 ipath_creg cr_psxmitpktscount;
469 ipath_creg cr_psxmitwaitcount;
473}; 470};
474 471
475#endif /* _IPATH_REGISTERS_H */ 472#endif /* _IPATH_REGISTERS_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 54c61a972de2..a59bdbd0ed87 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -98,11 +98,15 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp)
98 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { 98 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
99 qp->s_rnr_timeout -= nqp->s_rnr_timeout; 99 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
100 l = l->next; 100 l = l->next;
101 if (l->next == &dev->rnrwait) 101 if (l->next == &dev->rnrwait) {
102 nqp = NULL;
102 break; 103 break;
104 }
103 nqp = list_entry(l->next, struct ipath_qp, 105 nqp = list_entry(l->next, struct ipath_qp,
104 timerwait); 106 timerwait);
105 } 107 }
108 if (nqp)
109 nqp->s_rnr_timeout -= qp->s_rnr_timeout;
106 list_add(&qp->timerwait, l); 110 list_add(&qp->timerwait, l);
107 } 111 }
108 spin_unlock_irqrestore(&dev->pending_lock, flags); 112 spin_unlock_irqrestore(&dev->pending_lock, flags);
@@ -479,9 +483,14 @@ done:
479 483
480static void want_buffer(struct ipath_devdata *dd) 484static void want_buffer(struct ipath_devdata *dd)
481{ 485{
482 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); 486 unsigned long flags;
487
488 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
489 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
483 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 490 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
484 dd->ipath_sendctrl); 491 dd->ipath_sendctrl);
492 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
493 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
485} 494}
486 495
487/** 496/**
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 2fef36f4b675..f772102e4713 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -94,8 +94,8 @@ bail:
94/** 94/**
95 * ipath_create_srq - create a shared receive queue 95 * ipath_create_srq - create a shared receive queue
96 * @ibpd: the protection domain of the SRQ to create 96 * @ibpd: the protection domain of the SRQ to create
97 * @attr: the attributes of the SRQ 97 * @srq_init_attr: the attributes of the SRQ
98 * @udata: not used by the InfiniPath verbs driver 98 * @udata: data from libipathverbs when creating a user SRQ
99 */ 99 */
100struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, 100struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
101 struct ib_srq_init_attr *srq_init_attr, 101 struct ib_srq_init_attr *srq_init_attr,
diff --git a/drivers/infiniband/hw/ipath/ipath_stats.c b/drivers/infiniband/hw/ipath/ipath_stats.c
index f0271415cd5b..d2725cd11bdc 100644
--- a/drivers/infiniband/hw/ipath/ipath_stats.c
+++ b/drivers/infiniband/hw/ipath/ipath_stats.c
@@ -133,15 +133,16 @@ bail:
133static void ipath_qcheck(struct ipath_devdata *dd) 133static void ipath_qcheck(struct ipath_devdata *dd)
134{ 134{
135 static u64 last_tot_hdrqfull; 135 static u64 last_tot_hdrqfull;
136 struct ipath_portdata *pd = dd->ipath_pd[0];
136 size_t blen = 0; 137 size_t blen = 0;
137 char buf[128]; 138 char buf[128];
138 139
139 *buf = 0; 140 *buf = 0;
140 if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) { 141 if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
141 blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u", 142 blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
142 dd->ipath_pd[0]->port_hdrqfull - 143 pd->port_hdrqfull -
143 dd->ipath_p0_hdrqfull); 144 dd->ipath_p0_hdrqfull);
144 dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull; 145 dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
145 } 146 }
146 if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) { 147 if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
147 blen += snprintf(buf + blen, sizeof buf - blen, 148 blen += snprintf(buf + blen, sizeof buf - blen,
@@ -173,7 +174,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
173 if (blen) 174 if (blen)
174 ipath_dbg("%s\n", buf); 175 ipath_dbg("%s\n", buf);
175 176
176 if (dd->ipath_port0head != (u32) 177 if (pd->port_head != (u32)
177 le64_to_cpu(*dd->ipath_hdrqtailptr)) { 178 le64_to_cpu(*dd->ipath_hdrqtailptr)) {
178 if (dd->ipath_lastport0rcv_cnt == 179 if (dd->ipath_lastport0rcv_cnt ==
179 ipath_stats.sps_port0pkts) { 180 ipath_stats.sps_port0pkts) {
@@ -181,7 +182,7 @@ static void ipath_qcheck(struct ipath_devdata *dd)
181 "port0 hd=%llx tl=%x; port0pkts %llx\n", 182 "port0 hd=%llx tl=%x; port0pkts %llx\n",
182 (unsigned long long) 183 (unsigned long long)
183 le64_to_cpu(*dd->ipath_hdrqtailptr), 184 le64_to_cpu(*dd->ipath_hdrqtailptr),
184 dd->ipath_port0head, 185 pd->port_head,
185 (unsigned long long) 186 (unsigned long long)
186 ipath_stats.sps_port0pkts); 187 ipath_stats.sps_port0pkts);
187 } 188 }
@@ -237,7 +238,7 @@ static void ipath_chk_errormask(struct ipath_devdata *dd)
237void ipath_get_faststats(unsigned long opaque) 238void ipath_get_faststats(unsigned long opaque)
238{ 239{
239 struct ipath_devdata *dd = (struct ipath_devdata *) opaque; 240 struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
240 u32 val; 241 int i;
241 static unsigned cnt; 242 static unsigned cnt;
242 unsigned long flags; 243 unsigned long flags;
243 u64 traffic_wds; 244 u64 traffic_wds;
@@ -321,12 +322,11 @@ void ipath_get_faststats(unsigned long opaque)
321 322
322 /* limit qfull messages to ~one per minute per port */ 323 /* limit qfull messages to ~one per minute per port */
323 if ((++cnt & 0x10)) { 324 if ((++cnt & 0x10)) {
324 for (val = dd->ipath_cfgports - 1; ((int)val) >= 0; 325 for (i = (int) dd->ipath_cfgports; --i >= 0; ) {
325 val--) { 326 struct ipath_portdata *pd = dd->ipath_pd[i];
326 if (dd->ipath_lastegrheads[val] != -1) 327
327 dd->ipath_lastegrheads[val] = -1; 328 if (pd && pd->port_lastrcvhdrqtail != -1)
328 if (dd->ipath_lastrcvhdrqtails[val] != -1) 329 pd->port_lastrcvhdrqtail = -1;
329 dd->ipath_lastrcvhdrqtails[val] = -1;
330 } 330 }
331 } 331 }
332 332
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index e1ad7cfc21fd..56dfc8a2344c 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -363,6 +363,60 @@ static ssize_t show_unit(struct device *dev,
363 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit); 363 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
364} 364}
365 365
366static ssize_t show_jint_max_packets(struct device *dev,
367 struct device_attribute *attr,
368 char *buf)
369{
370 struct ipath_devdata *dd = dev_get_drvdata(dev);
371
372 return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_max_packets);
373}
374
375static ssize_t store_jint_max_packets(struct device *dev,
376 struct device_attribute *attr,
377 const char *buf,
378 size_t count)
379{
380 struct ipath_devdata *dd = dev_get_drvdata(dev);
381 u16 v = 0;
382 int ret;
383
384 ret = ipath_parse_ushort(buf, &v);
385 if (ret < 0)
386 ipath_dev_err(dd, "invalid jint_max_packets.\n");
387 else
388 dd->ipath_f_config_jint(dd, dd->ipath_jint_idle_ticks, v);
389
390 return ret;
391}
392
393static ssize_t show_jint_idle_ticks(struct device *dev,
394 struct device_attribute *attr,
395 char *buf)
396{
397 struct ipath_devdata *dd = dev_get_drvdata(dev);
398
399 return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_idle_ticks);
400}
401
402static ssize_t store_jint_idle_ticks(struct device *dev,
403 struct device_attribute *attr,
404 const char *buf,
405 size_t count)
406{
407 struct ipath_devdata *dd = dev_get_drvdata(dev);
408 u16 v = 0;
409 int ret;
410
411 ret = ipath_parse_ushort(buf, &v);
412 if (ret < 0)
413 ipath_dev_err(dd, "invalid jint_idle_ticks.\n");
414 else
415 dd->ipath_f_config_jint(dd, v, dd->ipath_jint_max_packets);
416
417 return ret;
418}
419
366#define DEVICE_COUNTER(name, attr) \ 420#define DEVICE_COUNTER(name, attr) \
367 static ssize_t show_counter_##name(struct device *dev, \ 421 static ssize_t show_counter_##name(struct device *dev, \
368 struct device_attribute *attr, \ 422 struct device_attribute *attr, \
@@ -670,6 +724,257 @@ static ssize_t show_logged_errs(struct device *dev,
670 return count; 724 return count;
671} 725}
672 726
727/*
728 * New sysfs entries to control various IB config. These all turn into
729 * accesses via ipath_f_get/set_ib_cfg.
730 *
731 * Get/Set heartbeat enable. Or of 1=enabled, 2=auto
732 */
733static ssize_t show_hrtbt_enb(struct device *dev,
734 struct device_attribute *attr,
735 char *buf)
736{
737 struct ipath_devdata *dd = dev_get_drvdata(dev);
738 int ret;
739
740 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_HRTBT);
741 if (ret >= 0)
742 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
743 return ret;
744}
745
746static ssize_t store_hrtbt_enb(struct device *dev,
747 struct device_attribute *attr,
748 const char *buf,
749 size_t count)
750{
751 struct ipath_devdata *dd = dev_get_drvdata(dev);
752 int ret, r;
753 u16 val;
754
755 ret = ipath_parse_ushort(buf, &val);
756 if (ret >= 0 && val > 3)
757 ret = -EINVAL;
758 if (ret < 0) {
759 ipath_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
760 goto bail;
761 }
762
763 /*
764 * Set the "intentional" heartbeat enable per either of
765 * "Enable" and "Auto", as these are normally set together.
766 * This bit is consulted when leaving loopback mode,
767 * because entering loopback mode overrides it and automatically
768 * disables heartbeat.
769 */
770 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT, val);
771 if (r < 0)
772 ret = r;
773 else if (val == IPATH_IB_HRTBT_OFF)
774 dd->ipath_flags |= IPATH_NO_HRTBT;
775 else
776 dd->ipath_flags &= ~IPATH_NO_HRTBT;
777
778bail:
779 return ret;
780}
781
782/*
783 * Get/Set Link-widths enabled. Or of 1=1x, 2=4x (this is human/IB centric,
784 * _not_ the particular encoding of any given chip)
785 */
786static ssize_t show_lwid_enb(struct device *dev,
787 struct device_attribute *attr,
788 char *buf)
789{
790 struct ipath_devdata *dd = dev_get_drvdata(dev);
791 int ret;
792
793 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB);
794 if (ret >= 0)
795 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
796 return ret;
797}
798
799static ssize_t store_lwid_enb(struct device *dev,
800 struct device_attribute *attr,
801 const char *buf,
802 size_t count)
803{
804 struct ipath_devdata *dd = dev_get_drvdata(dev);
805 int ret, r;
806 u16 val;
807
808 ret = ipath_parse_ushort(buf, &val);
809 if (ret >= 0 && (val == 0 || val > 3))
810 ret = -EINVAL;
811 if (ret < 0) {
812 ipath_dev_err(dd,
813 "attempt to set invalid Link Width (enable)\n");
814 goto bail;
815 }
816
817 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, val);
818 if (r < 0)
819 ret = r;
820
821bail:
822 return ret;
823}
824
825/* Get current link width */
826static ssize_t show_lwid(struct device *dev,
827 struct device_attribute *attr,
828 char *buf)
829
830{
831 struct ipath_devdata *dd = dev_get_drvdata(dev);
832 int ret;
833
834 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID);
835 if (ret >= 0)
836 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
837 return ret;
838}
839
840/*
841 * Get/Set Link-speeds enabled. Or of 1=SDR 2=DDR.
842 */
843static ssize_t show_spd_enb(struct device *dev,
844 struct device_attribute *attr,
845 char *buf)
846{
847 struct ipath_devdata *dd = dev_get_drvdata(dev);
848 int ret;
849
850 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB);
851 if (ret >= 0)
852 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
853 return ret;
854}
855
856static ssize_t store_spd_enb(struct device *dev,
857 struct device_attribute *attr,
858 const char *buf,
859 size_t count)
860{
861 struct ipath_devdata *dd = dev_get_drvdata(dev);
862 int ret, r;
863 u16 val;
864
865 ret = ipath_parse_ushort(buf, &val);
866 if (ret >= 0 && (val == 0 || val > (IPATH_IB_SDR | IPATH_IB_DDR)))
867 ret = -EINVAL;
868 if (ret < 0) {
869 ipath_dev_err(dd,
870 "attempt to set invalid Link Speed (enable)\n");
871 goto bail;
872 }
873
874 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, val);
875 if (r < 0)
876 ret = r;
877
878bail:
879 return ret;
880}
881
882/* Get current link speed */
883static ssize_t show_spd(struct device *dev,
884 struct device_attribute *attr,
885 char *buf)
886{
887 struct ipath_devdata *dd = dev_get_drvdata(dev);
888 int ret;
889
890 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD);
891 if (ret >= 0)
892 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
893 return ret;
894}
895
896/*
897 * Get/Set RX polarity-invert enable. 0=no, 1=yes.
898 */
899static ssize_t show_rx_polinv_enb(struct device *dev,
900 struct device_attribute *attr,
901 char *buf)
902{
903 struct ipath_devdata *dd = dev_get_drvdata(dev);
904 int ret;
905
906 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB);
907 if (ret >= 0)
908 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
909 return ret;
910}
911
912static ssize_t store_rx_polinv_enb(struct device *dev,
913 struct device_attribute *attr,
914 const char *buf,
915 size_t count)
916{
917 struct ipath_devdata *dd = dev_get_drvdata(dev);
918 int ret, r;
919 u16 val;
920
921 ret = ipath_parse_ushort(buf, &val);
922 if (ret < 0 || val > 1)
923 goto invalid;
924
925 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
926 if (r < 0) {
927 ret = r;
928 goto bail;
929 }
930
931 goto bail;
932invalid:
933 ipath_dev_err(dd, "attempt to set invalid Rx Polarity (enable)\n");
934bail:
935 return ret;
936}
937/*
938 * Get/Set RX lane-reversal enable. 0=no, 1=yes.
939 */
940static ssize_t show_lanerev_enb(struct device *dev,
941 struct device_attribute *attr,
942 char *buf)
943{
944 struct ipath_devdata *dd = dev_get_drvdata(dev);
945 int ret;
946
947 ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB);
948 if (ret >= 0)
949 ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
950 return ret;
951}
952
953static ssize_t store_lanerev_enb(struct device *dev,
954 struct device_attribute *attr,
955 const char *buf,
956 size_t count)
957{
958 struct ipath_devdata *dd = dev_get_drvdata(dev);
959 int ret, r;
960 u16 val;
961
962 ret = ipath_parse_ushort(buf, &val);
963 if (ret >= 0 && val > 1) {
964 ret = -EINVAL;
965 ipath_dev_err(dd,
966 "attempt to set invalid Lane reversal (enable)\n");
967 goto bail;
968 }
969
970 r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB, val);
971 if (r < 0)
972 ret = r;
973
974bail:
975 return ret;
976}
977
673static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL); 978static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
674static DRIVER_ATTR(version, S_IRUGO, show_version, NULL); 979static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
675 980
@@ -683,6 +988,11 @@ static struct attribute_group driver_attr_group = {
683 .attrs = driver_attributes 988 .attrs = driver_attributes
684}; 989};
685 990
991struct attribute_group *ipath_driver_attr_groups[] = {
992 &driver_attr_group,
993 NULL,
994};
995
686static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid); 996static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
687static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc); 997static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc);
688static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid); 998static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
@@ -701,6 +1011,10 @@ static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
701static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv); 1011static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
702static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override); 1012static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
703static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); 1013static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
1014static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
1015 show_jint_max_packets, store_jint_max_packets);
1016static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
1017 show_jint_idle_ticks, store_jint_idle_ticks);
704 1018
705static struct attribute *dev_attributes[] = { 1019static struct attribute *dev_attributes[] = {
706 &dev_attr_guid.attr, 1020 &dev_attr_guid.attr,
@@ -727,6 +1041,34 @@ static struct attribute_group dev_attr_group = {
727 .attrs = dev_attributes 1041 .attrs = dev_attributes
728}; 1042};
729 1043
1044static DEVICE_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
1045 store_hrtbt_enb);
1046static DEVICE_ATTR(link_width_enable, S_IWUSR | S_IRUGO, show_lwid_enb,
1047 store_lwid_enb);
1048static DEVICE_ATTR(link_width, S_IRUGO, show_lwid, NULL);
1049static DEVICE_ATTR(link_speed_enable, S_IWUSR | S_IRUGO, show_spd_enb,
1050 store_spd_enb);
1051static DEVICE_ATTR(link_speed, S_IRUGO, show_spd, NULL);
1052static DEVICE_ATTR(rx_pol_inv_enable, S_IWUSR | S_IRUGO, show_rx_polinv_enb,
1053 store_rx_polinv_enb);
1054static DEVICE_ATTR(rx_lane_rev_enable, S_IWUSR | S_IRUGO, show_lanerev_enb,
1055 store_lanerev_enb);
1056
1057static struct attribute *dev_ibcfg_attributes[] = {
1058 &dev_attr_hrtbt_enable.attr,
1059 &dev_attr_link_width_enable.attr,
1060 &dev_attr_link_width.attr,
1061 &dev_attr_link_speed_enable.attr,
1062 &dev_attr_link_speed.attr,
1063 &dev_attr_rx_pol_inv_enable.attr,
1064 &dev_attr_rx_lane_rev_enable.attr,
1065 NULL
1066};
1067
1068static struct attribute_group dev_ibcfg_attr_group = {
1069 .attrs = dev_ibcfg_attributes
1070};
1071
730/** 1072/**
731 * ipath_expose_reset - create a device reset file 1073 * ipath_expose_reset - create a device reset file
732 * @dev: the device structure 1074 * @dev: the device structure
@@ -753,24 +1095,9 @@ int ipath_expose_reset(struct device *dev)
753 return ret; 1095 return ret;
754} 1096}
755 1097
756int ipath_driver_create_group(struct device_driver *drv)
757{
758 int ret;
759
760 ret = sysfs_create_group(&drv->kobj, &driver_attr_group);
761
762 return ret;
763}
764
765void ipath_driver_remove_group(struct device_driver *drv)
766{
767 sysfs_remove_group(&drv->kobj, &driver_attr_group);
768}
769
770int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd) 1098int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
771{ 1099{
772 int ret; 1100 int ret;
773 char unit[5];
774 1101
775 ret = sysfs_create_group(&dev->kobj, &dev_attr_group); 1102 ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
776 if (ret) 1103 if (ret)
@@ -780,11 +1107,26 @@ int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
780 if (ret) 1107 if (ret)
781 goto bail_attrs; 1108 goto bail_attrs;
782 1109
783 snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit); 1110 if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
784 ret = sysfs_create_link(&dev->driver->kobj, &dev->kobj, unit); 1111 ret = device_create_file(dev, &dev_attr_jint_idle_ticks);
785 if (ret == 0) 1112 if (ret)
786 goto bail; 1113 goto bail_counter;
1114 ret = device_create_file(dev, &dev_attr_jint_max_packets);
1115 if (ret)
1116 goto bail_idle;
787 1117
1118 ret = sysfs_create_group(&dev->kobj, &dev_ibcfg_attr_group);
1119 if (ret)
1120 goto bail_max;
1121 }
1122
1123 return 0;
1124
1125bail_max:
1126 device_remove_file(dev, &dev_attr_jint_max_packets);
1127bail_idle:
1128 device_remove_file(dev, &dev_attr_jint_idle_ticks);
1129bail_counter:
788 sysfs_remove_group(&dev->kobj, &dev_counter_attr_group); 1130 sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
789bail_attrs: 1131bail_attrs:
790 sysfs_remove_group(&dev->kobj, &dev_attr_group); 1132 sysfs_remove_group(&dev->kobj, &dev_attr_group);
@@ -794,12 +1136,14 @@ bail:
794 1136
795void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd) 1137void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
796{ 1138{
797 char unit[5]; 1139 sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
798 1140
799 snprintf(unit, sizeof(unit), "%02d", dd->ipath_unit); 1141 if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
800 sysfs_remove_link(&dev->driver->kobj, unit); 1142 sysfs_remove_group(&dev->kobj, &dev_ibcfg_attr_group);
1143 device_remove_file(dev, &dev_attr_jint_idle_ticks);
1144 device_remove_file(dev, &dev_attr_jint_max_packets);
1145 }
801 1146
802 sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
803 sysfs_remove_group(&dev->kobj, &dev_attr_group); 1147 sysfs_remove_group(&dev->kobj, &dev_attr_group);
804 1148
805 device_remove_file(dev, &dev_attr_reset); 1149 device_remove_file(dev, &dev_attr_reset);
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c
index 16a2a938b520..de67eed08ed0 100644
--- a/drivers/infiniband/hw/ipath/ipath_ud.c
+++ b/drivers/infiniband/hw/ipath/ipath_ud.c
@@ -301,8 +301,6 @@ int ipath_make_ud_req(struct ipath_qp *qp)
301 301
302 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */ 302 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
303 qp->s_hdrwords = 7; 303 qp->s_hdrwords = 7;
304 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
305 qp->s_hdrwords++;
306 qp->s_cur_size = wqe->length; 304 qp->s_cur_size = wqe->length;
307 qp->s_cur_sge = &qp->s_sge; 305 qp->s_cur_sge = &qp->s_sge;
308 qp->s_wqe = wqe; 306 qp->s_wqe = wqe;
@@ -327,6 +325,7 @@ int ipath_make_ud_req(struct ipath_qp *qp)
327 ohdr = &qp->s_hdr.u.oth; 325 ohdr = &qp->s_hdr.u.oth;
328 } 326 }
329 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 327 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
328 qp->s_hdrwords++;
330 ohdr->u.ud.imm_data = wqe->wr.imm_data; 329 ohdr->u.ud.imm_data = wqe->wr.imm_data;
331 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24; 330 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
332 } else 331 } else
@@ -455,6 +454,28 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
455 } 454 }
456 } 455 }
457 456
457 /*
458 * The opcode is in the low byte when its in network order
459 * (top byte when in host order).
460 */
461 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
462 if (qp->ibqp.qp_num > 1 &&
463 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
464 if (header_in_data) {
465 wc.imm_data = *(__be32 *) data;
466 data += sizeof(__be32);
467 } else
468 wc.imm_data = ohdr->u.ud.imm_data;
469 wc.wc_flags = IB_WC_WITH_IMM;
470 hdrsize += sizeof(u32);
471 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
472 wc.imm_data = 0;
473 wc.wc_flags = 0;
474 } else {
475 dev->n_pkt_drops++;
476 goto bail;
477 }
478
458 /* Get the number of bytes the message was padded by. */ 479 /* Get the number of bytes the message was padded by. */
459 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; 480 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
460 if (unlikely(tlen < (hdrsize + pad + 4))) { 481 if (unlikely(tlen < (hdrsize + pad + 4))) {
@@ -482,28 +503,6 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
482 wc.byte_len = tlen + sizeof(struct ib_grh); 503 wc.byte_len = tlen + sizeof(struct ib_grh);
483 504
484 /* 505 /*
485 * The opcode is in the low byte when its in network order
486 * (top byte when in host order).
487 */
488 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
489 if (qp->ibqp.qp_num > 1 &&
490 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
491 if (header_in_data) {
492 wc.imm_data = *(__be32 *) data;
493 data += sizeof(__be32);
494 } else
495 wc.imm_data = ohdr->u.ud.imm_data;
496 wc.wc_flags = IB_WC_WITH_IMM;
497 hdrsize += sizeof(u32);
498 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
499 wc.imm_data = 0;
500 wc.wc_flags = 0;
501 } else {
502 dev->n_pkt_drops++;
503 goto bail;
504 }
505
506 /*
507 * Get the next work request entry to find where to put the data. 506 * Get the next work request entry to find where to put the data.
508 */ 507 */
509 if (qp->r_reuse_sge) 508 if (qp->r_reuse_sge)
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index c4c998446c7b..32d8f882e56c 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -943,7 +943,7 @@ bail:
943 * ipath_verbs_send - send a packet 943 * ipath_verbs_send - send a packet
944 * @qp: the QP to send on 944 * @qp: the QP to send on
945 * @hdr: the packet header 945 * @hdr: the packet header
946 * @hdrwords: the number of words in the header 946 * @hdrwords: the number of 32-bit words in the header
947 * @ss: the SGE to send 947 * @ss: the SGE to send
948 * @len: the length of the packet in bytes 948 * @len: the length of the packet in bytes
949 */ 949 */
@@ -955,7 +955,10 @@ int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
955 int ret; 955 int ret;
956 u32 dwords = (len + 3) >> 2; 956 u32 dwords = (len + 3) >> 2;
957 957
958 /* +1 is for the qword padding of pbc */ 958 /*
959 * Calculate the send buffer trigger address.
960 * The +1 counts for the pbc control dword following the pbc length.
961 */
959 plen = hdrwords + dwords + 1; 962 plen = hdrwords + dwords + 1;
960 963
961 /* Drop non-VL15 packets if we are not in the active state */ 964 /* Drop non-VL15 packets if we are not in the active state */
@@ -1130,20 +1133,34 @@ static int ipath_query_device(struct ib_device *ibdev,
1130 return 0; 1133 return 0;
1131} 1134}
1132 1135
1133const u8 ipath_cvt_physportstate[16] = { 1136const u8 ipath_cvt_physportstate[32] = {
1134 [INFINIPATH_IBCS_LT_STATE_DISABLED] = 3, 1137 [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
1135 [INFINIPATH_IBCS_LT_STATE_LINKUP] = 5, 1138 [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
1136 [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = 2, 1139 [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
1137 [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = 2, 1140 [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
1138 [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = 1, 1141 [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
1139 [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = 1, 1142 [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
1140 [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] = 4, 1143 [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
1141 [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] = 4, 1144 IB_PHYSPORTSTATE_CFG_TRAIN,
1142 [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] = 4, 1145 [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
1143 [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = 4, 1146 IB_PHYSPORTSTATE_CFG_TRAIN,
1144 [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] = 6, 1147 [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
1145 [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] = 6, 1148 IB_PHYSPORTSTATE_CFG_TRAIN,
1146 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] = 6, 1149 [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
1150 [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
1151 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1152 [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
1153 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1154 [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
1155 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
1156 [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
1157 [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
1158 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
1159 [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
1160 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
1161 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
1162 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
1163 [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
1147}; 1164};
1148 1165
1149u32 ipath_get_cr_errpkey(struct ipath_devdata *dd) 1166u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
@@ -1168,8 +1185,9 @@ static int ipath_query_port(struct ib_device *ibdev,
1168 ibcstat = dd->ipath_lastibcstat; 1185 ibcstat = dd->ipath_lastibcstat;
1169 props->state = ((ibcstat >> 4) & 0x3) + 1; 1186 props->state = ((ibcstat >> 4) & 0x3) + 1;
1170 /* See phys_state_show() */ 1187 /* See phys_state_show() */
1171 props->phys_state = ipath_cvt_physportstate[ 1188 props->phys_state = /* MEA: assumes shift == 0 */
1172 dd->ipath_lastibcstat & 0xf]; 1189 ipath_cvt_physportstate[dd->ipath_lastibcstat &
1190 dd->ibcs_lts_mask];
1173 props->port_cap_flags = dev->port_cap_flags; 1191 props->port_cap_flags = dev->port_cap_flags;
1174 props->gid_tbl_len = 1; 1192 props->gid_tbl_len = 1;
1175 props->max_msg_sz = 0x80000000; 1193 props->max_msg_sz = 0x80000000;
@@ -1641,6 +1659,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
1641 cntrs.local_link_integrity_errors; 1659 cntrs.local_link_integrity_errors;
1642 idev->z_excessive_buffer_overrun_errors = 1660 idev->z_excessive_buffer_overrun_errors =
1643 cntrs.excessive_buffer_overrun_errors; 1661 cntrs.excessive_buffer_overrun_errors;
1662 idev->z_vl15_dropped = cntrs.vl15_dropped;
1644 1663
1645 /* 1664 /*
1646 * The system image GUID is supposed to be the same for all 1665 * The system image GUID is supposed to be the same for all
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 6ccb54f104a3..3d59736b49b2 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -554,6 +554,7 @@ struct ipath_ibdev {
554 u32 z_pkey_violations; /* starting count for PMA */ 554 u32 z_pkey_violations; /* starting count for PMA */
555 u32 z_local_link_integrity_errors; /* starting count for PMA */ 555 u32 z_local_link_integrity_errors; /* starting count for PMA */
556 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ 556 u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */
557 u32 z_vl15_dropped; /* starting count for PMA */
557 u32 n_rc_resends; 558 u32 n_rc_resends;
558 u32 n_rc_acks; 559 u32 n_rc_acks;
559 u32 n_rc_qacks; 560 u32 n_rc_qacks;
@@ -598,6 +599,7 @@ struct ipath_verbs_counters {
598 u64 port_rcv_packets; 599 u64 port_rcv_packets;
599 u32 local_link_integrity_errors; 600 u32 local_link_integrity_errors;
600 u32 excessive_buffer_overrun_errors; 601 u32 excessive_buffer_overrun_errors;
602 u32 vl15_dropped;
601}; 603};
602 604
603static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) 605static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
@@ -830,7 +832,17 @@ unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
830 832
831extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; 833extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
832 834
835/*
836 * Below converts HCA-specific LinkTrainingState to IB PhysPortState
837 * values.
838 */
833extern const u8 ipath_cvt_physportstate[]; 839extern const u8 ipath_cvt_physportstate[];
840#define IB_PHYSPORTSTATE_SLEEP 1
841#define IB_PHYSPORTSTATE_POLL 2
842#define IB_PHYSPORTSTATE_DISABLED 3
843#define IB_PHYSPORTSTATE_CFG_TRAIN 4
844#define IB_PHYSPORTSTATE_LINKUP 5
845#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
834 846
835extern const int ib_ipath_state_ops[]; 847extern const int ib_ipath_state_ops[];
836 848
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 9d32c49cc651..7950aa6e8184 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -313,6 +313,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
313 struct mlx4_ib_srq *srq; 313 struct mlx4_ib_srq *srq;
314 int is_send; 314 int is_send;
315 int is_error; 315 int is_error;
316 u32 g_mlpath_rqpn;
316 u16 wqe_ctr; 317 u16 wqe_ctr;
317 318
318 cqe = next_cqe_sw(cq); 319 cqe = next_cqe_sw(cq);
@@ -426,10 +427,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
426 427
427 wc->slid = be16_to_cpu(cqe->rlid); 428 wc->slid = be16_to_cpu(cqe->rlid);
428 wc->sl = cqe->sl >> 4; 429 wc->sl = cqe->sl >> 4;
429 wc->src_qp = be32_to_cpu(cqe->g_mlpath_rqpn) & 0xffffff; 430 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
430 wc->dlid_path_bits = (be32_to_cpu(cqe->g_mlpath_rqpn) >> 24) & 0x7f; 431 wc->src_qp = g_mlpath_rqpn & 0xffffff;
431 wc->wc_flags |= be32_to_cpu(cqe->g_mlpath_rqpn) & 0x80000000 ? 432 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
432 IB_WC_GRH : 0; 433 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
433 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 434 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
434 } 435 }
435 436
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d8287d9db41e..96a39b5c9254 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -52,7 +52,7 @@ MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
52MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
53MODULE_VERSION(DRV_VERSION); 53MODULE_VERSION(DRV_VERSION);
54 54
55static const char mlx4_ib_version[] __devinitdata = 55static const char mlx4_ib_version[] =
56 DRV_NAME ": Mellanox ConnectX InfiniBand driver v" 56 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
57 DRV_VERSION " (" DRV_RELDATE ")\n"; 57 DRV_VERSION " (" DRV_RELDATE ")\n";
58 58
@@ -468,6 +468,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
468 if (err) 468 if (err)
469 goto out; 469 goto out;
470 470
471 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
471 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 472 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
472 473
473out: 474out:
@@ -516,9 +517,16 @@ static struct class_device_attribute *mlx4_class_attributes[] = {
516 517
517static void *mlx4_ib_add(struct mlx4_dev *dev) 518static void *mlx4_ib_add(struct mlx4_dev *dev)
518{ 519{
520 static int mlx4_ib_version_printed;
519 struct mlx4_ib_dev *ibdev; 521 struct mlx4_ib_dev *ibdev;
520 int i; 522 int i;
521 523
524
525 if (!mlx4_ib_version_printed) {
526 printk(KERN_INFO "%s", mlx4_ib_version);
527 ++mlx4_ib_version_printed;
528 }
529
522 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); 530 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
523 if (!ibdev) { 531 if (!ibdev) {
524 dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); 532 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 6966f943f440..09a30dd12b14 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1255,9 +1255,14 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1255 if (err) 1255 if (err)
1256 goto out; 1256 goto out;
1257 1257
1258 MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); 1258 if (!mthca_is_memfree(dev)) {
1259 MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); 1259 MTHCA_GET(adapter->vendor_id, outbox,
1260 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); 1260 QUERY_ADAPTER_VENDOR_ID_OFFSET);
1261 MTHCA_GET(adapter->device_id, outbox,
1262 QUERY_ADAPTER_DEVICE_ID_OFFSET);
1263 MTHCA_GET(adapter->revision_id, outbox,
1264 QUERY_ADAPTER_REVISION_ID_OFFSET);
1265 }
1261 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1266 MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1262 1267
1263 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4, 1268 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 15aa32eb78b6..7bbdd1f4e6c7 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -60,13 +60,12 @@
60enum { 60enum {
61 MTHCA_FLAG_DDR_HIDDEN = 1 << 1, 61 MTHCA_FLAG_DDR_HIDDEN = 1 << 1,
62 MTHCA_FLAG_SRQ = 1 << 2, 62 MTHCA_FLAG_SRQ = 1 << 2,
63 MTHCA_FLAG_MSI = 1 << 3, 63 MTHCA_FLAG_MSI_X = 1 << 3,
64 MTHCA_FLAG_MSI_X = 1 << 4, 64 MTHCA_FLAG_NO_LAM = 1 << 4,
65 MTHCA_FLAG_NO_LAM = 1 << 5, 65 MTHCA_FLAG_FMR = 1 << 5,
66 MTHCA_FLAG_FMR = 1 << 6, 66 MTHCA_FLAG_MEMFREE = 1 << 6,
67 MTHCA_FLAG_MEMFREE = 1 << 7, 67 MTHCA_FLAG_PCIE = 1 << 7,
68 MTHCA_FLAG_PCIE = 1 << 8, 68 MTHCA_FLAG_SINAI_OPT = 1 << 8
69 MTHCA_FLAG_SINAI_OPT = 1 << 9
70}; 69};
71 70
72enum { 71enum {
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index b29de51b7f35..b60eb5df96e8 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -827,8 +827,7 @@ int mthca_init_eq_table(struct mthca_dev *dev)
827 if (err) 827 if (err)
828 goto err_out_free; 828 goto err_out_free;
829 829
830 if (dev->mthca_flags & MTHCA_FLAG_MSI || 830 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
831 dev->mthca_flags & MTHCA_FLAG_MSI_X) {
832 dev->eq_table.clr_mask = 0; 831 dev->eq_table.clr_mask = 0;
833 } else { 832 } else {
834 dev->eq_table.clr_mask = 833 dev->eq_table.clr_mask =
@@ -839,8 +838,7 @@ int mthca_init_eq_table(struct mthca_dev *dev)
839 838
840 dev->eq_table.arm_mask = 0; 839 dev->eq_table.arm_mask = 0;
841 840
842 intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? 841 intr = dev->eq_table.inta_pin;
843 128 : dev->eq_table.inta_pin;
844 842
845 err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE, 843 err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
846 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, 844 (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 60de6f93869e..cd3d8adbef9f 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -65,14 +65,9 @@ static int msi_x = 1;
65module_param(msi_x, int, 0444); 65module_param(msi_x, int, 0444);
66MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 66MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
67 67
68static int msi = 0;
69module_param(msi, int, 0444);
70MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero (deprecated, use MSI-X instead)");
71
72#else /* CONFIG_PCI_MSI */ 68#else /* CONFIG_PCI_MSI */
73 69
74#define msi_x (0) 70#define msi_x (0)
75#define msi (0)
76 71
77#endif /* CONFIG_PCI_MSI */ 72#endif /* CONFIG_PCI_MSI */
78 73
@@ -131,7 +126,7 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
131MODULE_PARM_DESC(fmr_reserved_mtts, 126MODULE_PARM_DESC(fmr_reserved_mtts,
132 "number of memory translation table segments reserved for FMR"); 127 "number of memory translation table segments reserved for FMR");
133 128
134static const char mthca_version[] __devinitdata = 129static char mthca_version[] __devinitdata =
135 DRV_NAME ": Mellanox InfiniBand HCA driver v" 130 DRV_NAME ": Mellanox InfiniBand HCA driver v"
136 DRV_VERSION " (" DRV_RELDATE ")\n"; 131 DRV_VERSION " (" DRV_RELDATE ")\n";
137 132
@@ -740,7 +735,8 @@ static int mthca_init_hca(struct mthca_dev *mdev)
740 } 735 }
741 736
742 mdev->eq_table.inta_pin = adapter.inta_pin; 737 mdev->eq_table.inta_pin = adapter.inta_pin;
743 mdev->rev_id = adapter.revision_id; 738 if (!mthca_is_memfree(mdev))
739 mdev->rev_id = adapter.revision_id;
744 memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); 740 memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
745 741
746 return 0; 742 return 0;
@@ -816,13 +812,11 @@ static int mthca_setup_hca(struct mthca_dev *dev)
816 812
817 err = mthca_NOP(dev, &status); 813 err = mthca_NOP(dev, &status);
818 if (err || status) { 814 if (err || status) {
819 if (dev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X)) { 815 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
820 mthca_warn(dev, "NOP command failed to generate interrupt " 816 mthca_warn(dev, "NOP command failed to generate interrupt "
821 "(IRQ %d).\n", 817 "(IRQ %d).\n",
822 dev->mthca_flags & MTHCA_FLAG_MSI_X ? 818 dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);
823 dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector : 819 mthca_warn(dev, "Trying again with MSI-X disabled.\n");
824 dev->pdev->irq);
825 mthca_warn(dev, "Trying again with MSI/MSI-X disabled.\n");
826 } else { 820 } else {
827 mthca_err(dev, "NOP command failed to generate interrupt " 821 mthca_err(dev, "NOP command failed to generate interrupt "
828 "(IRQ %d), aborting.\n", 822 "(IRQ %d), aborting.\n",
@@ -1005,7 +999,7 @@ static struct {
1005 .flags = 0 }, 999 .flags = 0 },
1006 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200), 1000 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
1007 .flags = MTHCA_FLAG_PCIE }, 1001 .flags = MTHCA_FLAG_PCIE },
1008 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 2, 0), 1002 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0),
1009 .flags = MTHCA_FLAG_MEMFREE | 1003 .flags = MTHCA_FLAG_MEMFREE |
1010 MTHCA_FLAG_PCIE }, 1004 MTHCA_FLAG_PCIE },
1011 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0), 1005 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
@@ -1128,29 +1122,12 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
1128 1122
1129 if (msi_x && !mthca_enable_msi_x(mdev)) 1123 if (msi_x && !mthca_enable_msi_x(mdev))
1130 mdev->mthca_flags |= MTHCA_FLAG_MSI_X; 1124 mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
1131 else if (msi) {
1132 static int warned;
1133
1134 if (!warned) {
1135 printk(KERN_WARNING PFX "WARNING: MSI support will be "
1136 "removed from the ib_mthca driver in January 2008.\n");
1137 printk(KERN_WARNING " If you are using MSI and cannot "
1138 "switch to MSI-X, please tell "
1139 "<general@lists.openfabrics.org>.\n");
1140 ++warned;
1141 }
1142
1143 if (!pci_enable_msi(pdev))
1144 mdev->mthca_flags |= MTHCA_FLAG_MSI;
1145 }
1146 1125
1147 err = mthca_setup_hca(mdev); 1126 err = mthca_setup_hca(mdev);
1148 if (err == -EBUSY && (mdev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X))) { 1127 if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
1149 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) 1128 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1150 pci_disable_msix(pdev); 1129 pci_disable_msix(pdev);
1151 if (mdev->mthca_flags & MTHCA_FLAG_MSI) 1130 mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
1152 pci_disable_msi(pdev);
1153 mdev->mthca_flags &= ~(MTHCA_FLAG_MSI_X | MTHCA_FLAG_MSI);
1154 1131
1155 err = mthca_setup_hca(mdev); 1132 err = mthca_setup_hca(mdev);
1156 } 1133 }
@@ -1192,8 +1169,6 @@ err_cleanup:
1192err_close: 1169err_close:
1193 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) 1170 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1194 pci_disable_msix(pdev); 1171 pci_disable_msix(pdev);
1195 if (mdev->mthca_flags & MTHCA_FLAG_MSI)
1196 pci_disable_msi(pdev);
1197 1172
1198 mthca_close_hca(mdev); 1173 mthca_close_hca(mdev);
1199 1174
@@ -1246,8 +1221,6 @@ static void __mthca_remove_one(struct pci_dev *pdev)
1246 1221
1247 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X) 1222 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1248 pci_disable_msix(pdev); 1223 pci_disable_msix(pdev);
1249 if (mdev->mthca_flags & MTHCA_FLAG_MSI)
1250 pci_disable_msi(pdev);
1251 1224
1252 ib_dealloc_device(&mdev->ib_dev); 1225 ib_dealloc_device(&mdev->ib_dev);
1253 mthca_release_regions(pdev, mdev->mthca_flags & 1226 mthca_release_regions(pdev, mdev->mthca_flags &
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index aa6c70a6a36f..3b6985557cb2 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -613,8 +613,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
613 sizeof *(mr->mem.tavor.mpt) * idx; 613 sizeof *(mr->mem.tavor.mpt) * idx;
614 614
615 mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy); 615 mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
616 if (IS_ERR(mr->mtt)) 616 if (IS_ERR(mr->mtt)) {
617 err = PTR_ERR(mr->mtt);
617 goto err_out_table; 618 goto err_out_table;
619 }
618 620
619 mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; 621 mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
620 622
@@ -627,8 +629,10 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
627 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg; 629 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
628 630
629 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 631 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
630 if (IS_ERR(mailbox)) 632 if (IS_ERR(mailbox)) {
633 err = PTR_ERR(mailbox);
631 goto err_out_free_mtt; 634 goto err_out_free_mtt;
635 }
632 636
633 mpt_entry = mailbox->buf; 637 mpt_entry = mailbox->buf;
634 638
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 6bcde1cb9688..9e491df6419c 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -923,17 +923,13 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
923 struct mthca_mr *mr; 923 struct mthca_mr *mr;
924 u64 *page_list; 924 u64 *page_list;
925 u64 total_size; 925 u64 total_size;
926 u64 mask; 926 unsigned long mask;
927 int shift; 927 int shift;
928 int npages; 928 int npages;
929 int err; 929 int err;
930 int i, j, n; 930 int i, j, n;
931 931
932 /* First check that we have enough alignment */ 932 mask = buffer_list[0].addr ^ *iova_start;
933 if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
934 return ERR_PTR(-EINVAL);
935
936 mask = 0;
937 total_size = 0; 933 total_size = 0;
938 for (i = 0; i < num_phys_buf; ++i) { 934 for (i = 0; i < num_phys_buf; ++i) {
939 if (i != 0) 935 if (i != 0)
@@ -947,17 +943,7 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
947 if (mask & ~PAGE_MASK) 943 if (mask & ~PAGE_MASK)
948 return ERR_PTR(-EINVAL); 944 return ERR_PTR(-EINVAL);
949 945
950 /* Find largest page shift we can use to cover buffers */ 946 shift = __ffs(mask | 1 << 31);
951 for (shift = PAGE_SHIFT; shift < 31; ++shift)
952 if (num_phys_buf > 1) {
953 if ((1ULL << shift) & mask)
954 break;
955 } else {
956 if (1ULL << shift >=
957 buffer_list[0].size +
958 (buffer_list[0].addr & ((1ULL << shift) - 1)))
959 break;
960 }
961 947
962 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1); 948 buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
963 buffer_list[0].addr &= ~0ull << shift; 949 buffer_list[0].addr &= ~0ull << shift;
@@ -1270,6 +1256,8 @@ static int mthca_init_node_data(struct mthca_dev *dev)
1270 goto out; 1256 goto out;
1271 } 1257 }
1272 1258
1259 if (mthca_is_memfree(dev))
1260 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1273 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 1261 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1274 1262
1275out: 1263out:
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 0e5461c65731..db5595bbf7f0 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1175,6 +1175,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1175{ 1175{
1176 int ret; 1176 int ret;
1177 int i; 1177 int i;
1178 struct mthca_next_seg *next;
1178 1179
1179 qp->refcount = 1; 1180 qp->refcount = 1;
1180 init_waitqueue_head(&qp->wait); 1181 init_waitqueue_head(&qp->wait);
@@ -1217,7 +1218,6 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1217 } 1218 }
1218 1219
1219 if (mthca_is_memfree(dev)) { 1220 if (mthca_is_memfree(dev)) {
1220 struct mthca_next_seg *next;
1221 struct mthca_data_seg *scatter; 1221 struct mthca_data_seg *scatter;
1222 int size = (sizeof (struct mthca_next_seg) + 1222 int size = (sizeof (struct mthca_next_seg) +
1223 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; 1223 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
@@ -1240,6 +1240,13 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1240 qp->sq.wqe_shift) + 1240 qp->sq.wqe_shift) +
1241 qp->send_wqe_offset); 1241 qp->send_wqe_offset);
1242 } 1242 }
1243 } else {
1244 for (i = 0; i < qp->rq.max; ++i) {
1245 next = get_recv_wqe(qp, i);
1246 next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1247 qp->rq.wqe_shift) | 1);
1248 }
1249
1243 } 1250 }
1244 1251
1245 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 1252 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
@@ -1863,7 +1870,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1863 prev_wqe = qp->rq.last; 1870 prev_wqe = qp->rq.last;
1864 qp->rq.last = wqe; 1871 qp->rq.last = wqe;
1865 1872
1866 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1867 ((struct mthca_next_seg *) wqe)->ee_nds = 1873 ((struct mthca_next_seg *) wqe)->ee_nds =
1868 cpu_to_be32(MTHCA_NEXT_DBD); 1874 cpu_to_be32(MTHCA_NEXT_DBD);
1869 ((struct mthca_next_seg *) wqe)->flags = 0; 1875 ((struct mthca_next_seg *) wqe)->flags = 0;
@@ -1885,9 +1891,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1885 1891
1886 qp->wrid[ind] = wr->wr_id; 1892 qp->wrid[ind] = wr->wr_id;
1887 1893
1888 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1889 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1890 wmb();
1891 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1894 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1892 cpu_to_be32(MTHCA_NEXT_DBD | size); 1895 cpu_to_be32(MTHCA_NEXT_DBD | size);
1893 1896
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 553d681f6813..a5ffff6e1026 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -175,9 +175,17 @@ static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
175 * scatter list L_Keys to the sentry value of 0x100. 175 * scatter list L_Keys to the sentry value of 0x100.
176 */ 176 */
177 for (i = 0; i < srq->max; ++i) { 177 for (i = 0; i < srq->max; ++i) {
178 wqe = get_wqe(srq, i); 178 struct mthca_next_seg *next;
179 179
180 *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; 180 next = wqe = get_wqe(srq, i);
181
182 if (i < srq->max - 1) {
183 *wqe_to_link(wqe) = i + 1;
184 next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
185 } else {
186 *wqe_to_link(wqe) = -1;
187 next->nda_op = 0;
188 }
181 189
182 for (scatter = wqe + sizeof (struct mthca_next_seg); 190 for (scatter = wqe + sizeof (struct mthca_next_seg);
183 (void *) scatter < wqe + (1 << srq->wqe_shift); 191 (void *) scatter < wqe + (1 << srq->wqe_shift);
@@ -470,16 +478,15 @@ out:
470void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 478void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
471{ 479{
472 int ind; 480 int ind;
481 struct mthca_next_seg *last_free;
473 482
474 ind = wqe_addr >> srq->wqe_shift; 483 ind = wqe_addr >> srq->wqe_shift;
475 484
476 spin_lock(&srq->lock); 485 spin_lock(&srq->lock);
477 486
478 if (likely(srq->first_free >= 0)) 487 last_free = get_wqe(srq, srq->last_free);
479 *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; 488 *wqe_to_link(last_free) = ind;
480 else 489 last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
481 srq->first_free = ind;
482
483 *wqe_to_link(get_wqe(srq, ind)) = -1; 490 *wqe_to_link(get_wqe(srq, ind)) = -1;
484 srq->last_free = ind; 491 srq->last_free = ind;
485 492
@@ -506,15 +513,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
506 first_ind = srq->first_free; 513 first_ind = srq->first_free;
507 514
508 for (nreq = 0; wr; wr = wr->next) { 515 for (nreq = 0; wr; wr = wr->next) {
509 ind = srq->first_free; 516 ind = srq->first_free;
510
511 if (unlikely(ind < 0)) {
512 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
513 err = -ENOMEM;
514 *bad_wr = wr;
515 break;
516 }
517
518 wqe = get_wqe(srq, ind); 517 wqe = get_wqe(srq, ind);
519 next_ind = *wqe_to_link(wqe); 518 next_ind = *wqe_to_link(wqe);
520 519
@@ -528,7 +527,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
528 prev_wqe = srq->last; 527 prev_wqe = srq->last;
529 srq->last = wqe; 528 srq->last = wqe;
530 529
531 ((struct mthca_next_seg *) wqe)->nda_op = 0;
532 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 530 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
533 /* flags field will always remain 0 */ 531 /* flags field will always remain 0 */
534 532
@@ -549,9 +547,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
549 if (i < srq->max_gs) 547 if (i < srq->max_gs)
550 mthca_set_data_seg_inval(wqe); 548 mthca_set_data_seg_inval(wqe);
551 549
552 ((struct mthca_next_seg *) prev_wqe)->nda_op =
553 cpu_to_be32((ind << srq->wqe_shift) | 1);
554 wmb();
555 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 550 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
556 cpu_to_be32(MTHCA_NEXT_DBD); 551 cpu_to_be32(MTHCA_NEXT_DBD);
557 552
@@ -614,15 +609,7 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
614 spin_lock_irqsave(&srq->lock, flags); 609 spin_lock_irqsave(&srq->lock, flags);
615 610
616 for (nreq = 0; wr; ++nreq, wr = wr->next) { 611 for (nreq = 0; wr; ++nreq, wr = wr->next) {
617 ind = srq->first_free; 612 ind = srq->first_free;
618
619 if (unlikely(ind < 0)) {
620 mthca_err(dev, "SRQ %06x full\n", srq->srqn);
621 err = -ENOMEM;
622 *bad_wr = wr;
623 break;
624 }
625
626 wqe = get_wqe(srq, ind); 613 wqe = get_wqe(srq, ind);
627 next_ind = *wqe_to_link(wqe); 614 next_ind = *wqe_to_link(wqe);
628 615
@@ -633,8 +620,6 @@ int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
633 break; 620 break;
634 } 621 }
635 622
636 ((struct mthca_next_seg *) wqe)->nda_op =
637 cpu_to_be32((next_ind << srq->wqe_shift) | 1);
638 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 623 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
639 /* flags field will always remain 0 */ 624 /* flags field will always remain 0 */
640 625
diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig
new file mode 100644
index 000000000000..2aeb7ac972a9
--- /dev/null
+++ b/drivers/infiniband/hw/nes/Kconfig
@@ -0,0 +1,16 @@
1config INFINIBAND_NES
2 tristate "NetEffect RNIC Driver"
3 depends on PCI && INET && INFINIBAND
4 select LIBCRC32C
5 ---help---
6 This is a low-level driver for NetEffect RDMA enabled
7 Network Interface Cards (RNIC).
8
9config INFINIBAND_NES_DEBUG
10 bool "Verbose debugging output"
11 depends on INFINIBAND_NES
12 default n
13 ---help---
14 This option causes the NetEffect RNIC driver to produce debug
15 messages. Select this if you are developing the driver
16 or trying to diagnose a problem.
diff --git a/drivers/infiniband/hw/nes/Makefile b/drivers/infiniband/hw/nes/Makefile
new file mode 100644
index 000000000000..35148513c47e
--- /dev/null
+++ b/drivers/infiniband/hw/nes/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_INFINIBAND_NES) += iw_nes.o
2
3iw_nes-objs := nes.o nes_hw.o nes_nic.o nes_utils.o nes_verbs.o nes_cm.o
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
new file mode 100644
index 000000000000..7f8853b44ee1
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -0,0 +1,1152 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/if_vlan.h>
41#include <linux/crc32.h>
42#include <linux/in.h>
43#include <linux/fs.h>
44#include <linux/init.h>
45#include <linux/if_arp.h>
46#include <linux/highmem.h>
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/byteorder.h>
50#include <rdma/ib_smi.h>
51#include <rdma/ib_verbs.h>
52#include <rdma/ib_pack.h>
53#include <rdma/iw_cm.h>
54
55#include "nes.h"
56
57#include <net/netevent.h>
58#include <net/neighbour.h>
59#include <linux/route.h>
60#include <net/ip_fib.h>
61
62MODULE_AUTHOR("NetEffect");
63MODULE_DESCRIPTION("NetEffect RNIC Low-level iWARP Driver");
64MODULE_LICENSE("Dual BSD/GPL");
65MODULE_VERSION(DRV_VERSION);
66
67int max_mtu = 9000;
68int nics_per_function = 1;
69int interrupt_mod_interval = 0;
70
71
72/* Interoperability */
73int mpa_version = 1;
74module_param(mpa_version, int, 0);
75MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)");
76
77/* Interoperability */
78int disable_mpa_crc = 0;
79module_param(disable_mpa_crc, int, 0);
80MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
81
82unsigned int send_first = 0;
83module_param(send_first, int, 0);
84MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
85
86
87unsigned int nes_drv_opt = 0;
88module_param(nes_drv_opt, int, 0);
89MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
90
91unsigned int nes_debug_level = 0;
92module_param_named(debug_level, nes_debug_level, uint, 0644);
93MODULE_PARM_DESC(debug_level, "Enable debug output level");
94
95LIST_HEAD(nes_adapter_list);
96LIST_HEAD(nes_dev_list);
97
98atomic_t qps_destroyed;
99atomic_t cqp_reqs_allocated;
100atomic_t cqp_reqs_freed;
101atomic_t cqp_reqs_dynallocated;
102atomic_t cqp_reqs_dynfreed;
103atomic_t cqp_reqs_queued;
104atomic_t cqp_reqs_redriven;
105
106static void nes_print_macaddr(struct net_device *netdev);
107static irqreturn_t nes_interrupt(int, void *);
108static int __devinit nes_probe(struct pci_dev *, const struct pci_device_id *);
109static void __devexit nes_remove(struct pci_dev *);
110static int __init nes_init_module(void);
111static void __exit nes_exit_module(void);
112static unsigned int ee_flsh_adapter;
113static unsigned int sysfs_nonidx_addr;
114static unsigned int sysfs_idx_addr;
115
116static struct pci_device_id nes_pci_table[] = {
117 {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
118 {0}
119};
120
121MODULE_DEVICE_TABLE(pci, nes_pci_table);
122
123static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *);
124static int nes_net_event(struct notifier_block *, unsigned long, void *);
125static int nes_notifiers_registered;
126
127
128static struct notifier_block nes_inetaddr_notifier = {
129 .notifier_call = nes_inetaddr_event
130};
131
132static struct notifier_block nes_net_notifier = {
133 .notifier_call = nes_net_event
134};
135
136
137
138
139/**
140 * nes_inetaddr_event
141 */
142static int nes_inetaddr_event(struct notifier_block *notifier,
143 unsigned long event, void *ptr)
144{
145 struct in_ifaddr *ifa = ptr;
146 struct net_device *event_netdev = ifa->ifa_dev->dev;
147 struct nes_device *nesdev;
148 struct net_device *netdev;
149 struct nes_vnic *nesvnic;
150 unsigned int addr;
151 unsigned int mask;
152
153 addr = ntohl(ifa->ifa_address);
154 mask = ntohl(ifa->ifa_mask);
155 nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n",
156 addr, mask);
157 list_for_each_entry(nesdev, &nes_dev_list, list) {
158 nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
159 nesdev, nesdev->netdev[0]->name);
160 netdev = nesdev->netdev[0];
161 nesvnic = netdev_priv(netdev);
162 if (netdev == event_netdev) {
163 if (nesvnic->rdma_enabled == 0) {
164 nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
165 " RDMA is not enabled.\n",
166 netdev->name);
167 return NOTIFY_OK;
168 }
169 /* we have ifa->ifa_address/mask here if we need it */
170 switch (event) {
171 case NETDEV_DOWN:
172 nes_debug(NES_DBG_NETDEV, "event:DOWN\n");
173 nes_write_indexed(nesdev,
174 NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)), 0);
175
176 nes_manage_arp_cache(netdev, netdev->dev_addr,
177 ntohl(nesvnic->local_ipaddr), NES_ARP_DELETE);
178 nesvnic->local_ipaddr = 0;
179 return NOTIFY_OK;
180 break;
181 case NETDEV_UP:
182 nes_debug(NES_DBG_NETDEV, "event:UP\n");
183
184 if (nesvnic->local_ipaddr != 0) {
185 nes_debug(NES_DBG_NETDEV, "Interface already has local_ipaddr\n");
186 return NOTIFY_OK;
187 }
188 /* Add the address to the IP table */
189 nesvnic->local_ipaddr = ifa->ifa_address;
190
191 nes_write_indexed(nesdev,
192 NES_IDX_DST_IP_ADDR+(0x10*PCI_FUNC(nesdev->pcidev->devfn)),
193 ntohl(ifa->ifa_address));
194 nes_manage_arp_cache(netdev, netdev->dev_addr,
195 ntohl(nesvnic->local_ipaddr), NES_ARP_ADD);
196 return NOTIFY_OK;
197 break;
198 default:
199 break;
200 }
201 }
202 }
203
204 return NOTIFY_DONE;
205}
206
207
208/**
209 * nes_net_event
210 */
211static int nes_net_event(struct notifier_block *notifier,
212 unsigned long event, void *ptr)
213{
214 struct neighbour *neigh = ptr;
215 struct nes_device *nesdev;
216 struct net_device *netdev;
217 struct nes_vnic *nesvnic;
218
219 switch (event) {
220 case NETEVENT_NEIGH_UPDATE:
221 list_for_each_entry(nesdev, &nes_dev_list, list) {
222 /* nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p.\n", nesdev); */
223 netdev = nesdev->netdev[0];
224 nesvnic = netdev_priv(netdev);
225 if (netdev == neigh->dev) {
226 if (nesvnic->rdma_enabled == 0) {
227 nes_debug(NES_DBG_NETDEV, "Skipping device %s since no RDMA\n",
228 netdev->name);
229 } else {
230 if (neigh->nud_state & NUD_VALID) {
231 nes_manage_arp_cache(neigh->dev, neigh->ha,
232 ntohl(*(__be32 *)neigh->primary_key), NES_ARP_ADD);
233 } else {
234 nes_manage_arp_cache(neigh->dev, neigh->ha,
235 ntohl(*(__be32 *)neigh->primary_key), NES_ARP_DELETE);
236 }
237 }
238 return NOTIFY_OK;
239 }
240 }
241 break;
242 default:
243 nes_debug(NES_DBG_NETDEV, "NETEVENT_ %lu undefined\n", event);
244 break;
245 }
246
247 return NOTIFY_DONE;
248}
249
250
251/**
252 * nes_add_ref
253 */
254void nes_add_ref(struct ib_qp *ibqp)
255{
256 struct nes_qp *nesqp;
257
258 nesqp = to_nesqp(ibqp);
259 nes_debug(NES_DBG_QP, "Bumping refcount for QP%u. Pre-inc value = %u\n",
260 ibqp->qp_num, atomic_read(&nesqp->refcount));
261 atomic_inc(&nesqp->refcount);
262}
263
264static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_request *cqp_request)
265{
266 unsigned long flags;
267 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
268 struct nes_adapter *nesadapter = nesdev->nesadapter;
269 u32 qp_id;
270
271 atomic_inc(&qps_destroyed);
272
273 /* Free the control structures */
274
275 qp_id = nesqp->hwqp.qp_id;
276 if (nesqp->pbl_vbase) {
277 pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
278 nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
279 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
280 nesadapter->free_256pbl++;
281 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
282 pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
283 nesqp->pbl_vbase = NULL;
284
285 } else {
286 pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
287 nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
288 }
289 nes_free_resource(nesadapter, nesadapter->allocated_qps, nesqp->hwqp.qp_id);
290
291 kfree(nesqp->allocated_buffer);
292
293}
294
295/**
296 * nes_rem_ref
297 */
298void nes_rem_ref(struct ib_qp *ibqp)
299{
300 u64 u64temp;
301 struct nes_qp *nesqp;
302 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
303 struct nes_device *nesdev = nesvnic->nesdev;
304 struct nes_adapter *nesadapter = nesdev->nesadapter;
305 struct nes_hw_cqp_wqe *cqp_wqe;
306 struct nes_cqp_request *cqp_request;
307 u32 opcode;
308
309 nesqp = to_nesqp(ibqp);
310
311 if (atomic_read(&nesqp->refcount) == 0) {
312 printk(KERN_INFO PFX "%s: Reference count already 0 for QP%d, last aeq = 0x%04X.\n",
313 __FUNCTION__, ibqp->qp_num, nesqp->last_aeq);
314 BUG();
315 }
316
317 if (atomic_dec_and_test(&nesqp->refcount)) {
318 nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = NULL;
319
320 /* Destroy the QP */
321 cqp_request = nes_get_cqp_request(nesdev);
322 if (cqp_request == NULL) {
323 nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
324 return;
325 }
326 cqp_request->waiting = 0;
327 cqp_request->callback = 1;
328 cqp_request->cqp_callback = nes_cqp_rem_ref_callback;
329 cqp_request->cqp_callback_pointer = nesqp;
330 cqp_wqe = &cqp_request->cqp_wqe;
331
332 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
333 opcode = NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_IWARP;
334
335 if (nesqp->hte_added) {
336 opcode |= NES_CQP_QP_DEL_HTE;
337 nesqp->hte_added = 0;
338 }
339 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
340 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
341 u64temp = (u64)nesqp->nesqp_context_pbase;
342 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
343 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
344 }
345}
346
347
348/**
349 * nes_get_qp
350 */
351struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
352{
353 struct nes_vnic *nesvnic = to_nesvnic(device);
354 struct nes_device *nesdev = nesvnic->nesdev;
355 struct nes_adapter *nesadapter = nesdev->nesadapter;
356
357 if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp)))
358 return NULL;
359
360 return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp;
361}
362
363
364/**
365 * nes_print_macaddr
366 */
367static void nes_print_macaddr(struct net_device *netdev)
368{
369 nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n",
370 netdev->name,
371 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
372 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
373 netdev->irq);
374}
375
376
377/**
378 * nes_interrupt - handle interrupts
379 */
380static irqreturn_t nes_interrupt(int irq, void *dev_id)
381{
382 struct nes_device *nesdev = (struct nes_device *)dev_id;
383 int handled = 0;
384 u32 int_mask;
385 u32 int_req;
386 u32 int_stat;
387 u32 intf_int_stat;
388 u32 timer_stat;
389
390 if (nesdev->msi_enabled) {
391 /* No need to read the interrupt pending register if msi is enabled */
392 handled = 1;
393 } else {
394 if (unlikely(nesdev->nesadapter->hw_rev == NE020_REV)) {
395 /* Master interrupt enable provides synchronization for kicking off bottom half
396 when interrupt sharing is going on */
397 int_mask = nes_read32(nesdev->regs + NES_INT_MASK);
398 if (int_mask & 0x80000000) {
399 /* Check interrupt status to see if this might be ours */
400 int_stat = nes_read32(nesdev->regs + NES_INT_STAT);
401 int_req = nesdev->int_req;
402 if (int_stat&int_req) {
403 /* if interesting CEQ or AEQ is pending, claim the interrupt */
404 if ((int_stat&int_req) & (~(NES_INT_TIMER|NES_INT_INTF))) {
405 handled = 1;
406 } else {
407 if (((int_stat & int_req) & NES_INT_TIMER) == NES_INT_TIMER) {
408 /* Timer might be running but might be for another function */
409 timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT);
410 if ((timer_stat & nesdev->timer_int_req) != 0) {
411 handled = 1;
412 }
413 }
414 if ((((int_stat & int_req) & NES_INT_INTF) == NES_INT_INTF) &&
415 (handled == 0)) {
416 intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
417 if ((intf_int_stat & nesdev->intf_int_req) != 0) {
418 handled = 1;
419 }
420 }
421 }
422 if (handled) {
423 nes_write32(nesdev->regs+NES_INT_MASK, int_mask & (~0x80000000));
424 int_mask = nes_read32(nesdev->regs+NES_INT_MASK);
425 /* Save off the status to save an additional read */
426 nesdev->int_stat = int_stat;
427 nesdev->napi_isr_ran = 1;
428 }
429 }
430 }
431 } else {
432 handled = nes_read32(nesdev->regs+NES_INT_PENDING);
433 }
434 }
435
436 if (handled) {
437
438 if (nes_napi_isr(nesdev) == 0) {
439 tasklet_schedule(&nesdev->dpc_tasklet);
440
441 }
442 return IRQ_HANDLED;
443 } else {
444 return IRQ_NONE;
445 }
446}
447
448
449/**
450 * nes_probe - Device initialization
451 */
452static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
453{
454 struct net_device *netdev = NULL;
455 struct nes_device *nesdev = NULL;
456 int ret = 0;
457 struct nes_vnic *nesvnic = NULL;
458 void __iomem *mmio_regs = NULL;
459 u8 hw_rev;
460
461 assert(pcidev != NULL);
462 assert(ent != NULL);
463
464 printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n",
465 DRV_VERSION, pci_name(pcidev));
466
467 ret = pci_enable_device(pcidev);
468 if (ret) {
469 printk(KERN_ERR PFX "Unable to enable PCI device. (%s)\n", pci_name(pcidev));
470 goto bail0;
471 }
472
473 nes_debug(NES_DBG_INIT, "BAR0 (@0x%08lX) size = 0x%lX bytes\n",
474 (long unsigned int)pci_resource_start(pcidev, BAR_0),
475 (long unsigned int)pci_resource_len(pcidev, BAR_0));
476 nes_debug(NES_DBG_INIT, "BAR1 (@0x%08lX) size = 0x%lX bytes\n",
477 (long unsigned int)pci_resource_start(pcidev, BAR_1),
478 (long unsigned int)pci_resource_len(pcidev, BAR_1));
479
480 /* Make sure PCI base addr are MMIO */
481 if (!(pci_resource_flags(pcidev, BAR_0) & IORESOURCE_MEM) ||
482 !(pci_resource_flags(pcidev, BAR_1) & IORESOURCE_MEM)) {
483 printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
484 ret = -ENODEV;
485 goto bail1;
486 }
487
488 /* Reserve PCI I/O and memory resources */
489 ret = pci_request_regions(pcidev, DRV_NAME);
490 if (ret) {
491 printk(KERN_ERR PFX "Unable to request regions. (%s)\n", pci_name(pcidev));
492 goto bail1;
493 }
494
495 if ((sizeof(dma_addr_t) > 4)) {
496 ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
497 if (ret < 0) {
498 printk(KERN_ERR PFX "64b DMA mask configuration failed\n");
499 goto bail2;
500 }
501 ret = pci_set_consistent_dma_mask(pcidev, DMA_64BIT_MASK);
502 if (ret) {
503 printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n");
504 goto bail2;
505 }
506 } else {
507 ret = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
508 if (ret < 0) {
509 printk(KERN_ERR PFX "32b DMA mask configuration failed\n");
510 goto bail2;
511 }
512 ret = pci_set_consistent_dma_mask(pcidev, DMA_32BIT_MASK);
513 if (ret) {
514 printk(KERN_ERR PFX "32b DMA consistent mask configuration failed\n");
515 goto bail2;
516 }
517 }
518
519 pci_set_master(pcidev);
520
521 /* Allocate hardware structure */
522 nesdev = kzalloc(sizeof(struct nes_device), GFP_KERNEL);
523 if (!nesdev) {
524 printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev));
525 ret = -ENOMEM;
526 goto bail2;
527 }
528
529 nes_debug(NES_DBG_INIT, "Allocated nes device at %p\n", nesdev);
530 nesdev->pcidev = pcidev;
531 pci_set_drvdata(pcidev, nesdev);
532
533 pci_read_config_byte(pcidev, 0x0008, &hw_rev);
534 nes_debug(NES_DBG_INIT, "hw_rev=%u\n", hw_rev);
535
536 spin_lock_init(&nesdev->indexed_regs_lock);
537
538 /* Remap the PCI registers in adapter BAR0 to kernel VA space */
539 mmio_regs = ioremap_nocache(pci_resource_start(pcidev, BAR_0), sizeof(mmio_regs));
540 if (mmio_regs == NULL) {
541 printk(KERN_ERR PFX "Unable to remap BAR0\n");
542 ret = -EIO;
543 goto bail3;
544 }
545 nesdev->regs = mmio_regs;
546 nesdev->index_reg = 0x50 + (PCI_FUNC(pcidev->devfn)*8) + mmio_regs;
547
548 /* Ensure interrupts are disabled */
549 nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff);
550
551 if (nes_drv_opt & NES_DRV_OPT_ENABLE_MSI) {
552 if (!pci_enable_msi(nesdev->pcidev)) {
553 nesdev->msi_enabled = 1;
554 nes_debug(NES_DBG_INIT, "MSI is enabled for device %s\n",
555 pci_name(pcidev));
556 } else {
557 nes_debug(NES_DBG_INIT, "MSI is disabled by linux for device %s\n",
558 pci_name(pcidev));
559 }
560 } else {
561 nes_debug(NES_DBG_INIT, "MSI not requested due to driver options for device %s\n",
562 pci_name(pcidev));
563 }
564
565 nesdev->csr_start = pci_resource_start(nesdev->pcidev, BAR_0);
566 nesdev->doorbell_region = pci_resource_start(nesdev->pcidev, BAR_1);
567
568 /* Init the adapter */
569 nesdev->nesadapter = nes_init_adapter(nesdev, hw_rev);
570 nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
571 if (!nesdev->nesadapter) {
572 printk(KERN_ERR PFX "Unable to initialize adapter.\n");
573 ret = -ENOMEM;
574 goto bail5;
575 }
576
577 /* nesdev->base_doorbell_index =
578 nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
579 nesdev->base_doorbell_index = 1;
580 nesdev->doorbell_start = nesdev->nesadapter->doorbell_start;
581 nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count;
582
583 tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
584
585 /* bring up the Control QP */
586 if (nes_init_cqp(nesdev)) {
587 ret = -ENODEV;
588 goto bail6;
589 }
590
591 /* Arm the CCQ */
592 nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
593 PCI_FUNC(nesdev->pcidev->devfn));
594 nes_read32(nesdev->regs+NES_CQE_ALLOC);
595
596 /* Enable the interrupts */
597 nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) |
598 (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
599 if (PCI_FUNC(nesdev->pcidev->devfn) < 4) {
600 nesdev->int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+24));
601 }
602
603 /* TODO: This really should be the first driver to load, not function 0 */
604 if (PCI_FUNC(nesdev->pcidev->devfn) == 0) {
605 /* pick up PCI and critical errors if the first driver to load */
606 nesdev->intf_int_req = NES_INTF_INT_PCIERR | NES_INTF_INT_CRITERR;
607 nesdev->int_req |= NES_INT_INTF;
608 } else {
609 nesdev->intf_int_req = 0;
610 }
611 nesdev->intf_int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
612 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0, 0);
613 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 0);
614 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS2, 0x00001265);
615 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS4, 0x18021804);
616
617 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS3, 0x17801790);
618
619 /* deal with both periodic and one_shot */
620 nesdev->timer_int_req = 0x101 << PCI_FUNC(nesdev->pcidev->devfn);
621 nesdev->nesadapter->timer_int_req |= nesdev->timer_int_req;
622 nes_debug(NES_DBG_INIT, "setting int_req for function %u, nesdev = 0x%04X, adapter = 0x%04X\n",
623 PCI_FUNC(nesdev->pcidev->devfn),
624 nesdev->timer_int_req, nesdev->nesadapter->timer_int_req);
625
626 nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
627
628 list_add_tail(&nesdev->list, &nes_dev_list);
629
630 /* Request an interrupt line for the driver */
631 ret = request_irq(pcidev->irq, nes_interrupt, IRQF_SHARED, DRV_NAME, nesdev);
632 if (ret) {
633 printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
634 pci_name(pcidev), pcidev->irq);
635 goto bail65;
636 }
637
638 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
639
640 if (nes_notifiers_registered == 0) {
641 register_inetaddr_notifier(&nes_inetaddr_notifier);
642 register_netevent_notifier(&nes_net_notifier);
643 }
644 nes_notifiers_registered++;
645
646 /* Initialize network devices */
647 if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) {
648 goto bail7;
649 }
650
651 /* Register network device */
652 ret = register_netdev(netdev);
653 if (ret) {
654 printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret);
655 nes_netdev_destroy(netdev);
656 goto bail7;
657 }
658
659 nes_print_macaddr(netdev);
660 /* create a CM core for this netdev */
661 nesvnic = netdev_priv(netdev);
662
663 nesdev->netdev_count++;
664 nesdev->nesadapter->netdev_count++;
665
666
667 printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n",
668 pci_name(pcidev));
669 return 0;
670
671 bail7:
672 printk(KERN_ERR PFX "bail7\n");
673 while (nesdev->netdev_count > 0) {
674 nesdev->netdev_count--;
675 nesdev->nesadapter->netdev_count--;
676
677 unregister_netdev(nesdev->netdev[nesdev->netdev_count]);
678 nes_netdev_destroy(nesdev->netdev[nesdev->netdev_count]);
679 }
680
681 nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n",
682 nesdev->netdev_count, nesdev->nesadapter->netdev_count);
683
684 nes_notifiers_registered--;
685 if (nes_notifiers_registered == 0) {
686 unregister_netevent_notifier(&nes_net_notifier);
687 unregister_inetaddr_notifier(&nes_inetaddr_notifier);
688 }
689
690 list_del(&nesdev->list);
691 nes_destroy_cqp(nesdev);
692
693 bail65:
694 printk(KERN_ERR PFX "bail65\n");
695 free_irq(pcidev->irq, nesdev);
696 if (nesdev->msi_enabled) {
697 pci_disable_msi(pcidev);
698 }
699 bail6:
700 printk(KERN_ERR PFX "bail6\n");
701 tasklet_kill(&nesdev->dpc_tasklet);
702 /* Deallocate the Adapter Structure */
703 nes_destroy_adapter(nesdev->nesadapter);
704
705 bail5:
706 printk(KERN_ERR PFX "bail5\n");
707 iounmap(nesdev->regs);
708
709 bail3:
710 printk(KERN_ERR PFX "bail3\n");
711 kfree(nesdev);
712
713 bail2:
714 pci_release_regions(pcidev);
715
716 bail1:
717 pci_disable_device(pcidev);
718
719 bail0:
720 return ret;
721}
722
723
724/**
725 * nes_remove - unload from kernel
726 */
727static void __devexit nes_remove(struct pci_dev *pcidev)
728{
729 struct nes_device *nesdev = pci_get_drvdata(pcidev);
730 struct net_device *netdev;
731 int netdev_index = 0;
732
733 if (nesdev->netdev_count) {
734 netdev = nesdev->netdev[netdev_index];
735 if (netdev) {
736 netif_stop_queue(netdev);
737 unregister_netdev(netdev);
738 nes_netdev_destroy(netdev);
739
740 nesdev->netdev[netdev_index] = NULL;
741 nesdev->netdev_count--;
742 nesdev->nesadapter->netdev_count--;
743 }
744 }
745
746 nes_notifiers_registered--;
747 if (nes_notifiers_registered == 0) {
748 unregister_netevent_notifier(&nes_net_notifier);
749 unregister_inetaddr_notifier(&nes_inetaddr_notifier);
750 }
751
752 list_del(&nesdev->list);
753 nes_destroy_cqp(nesdev);
754 tasklet_kill(&nesdev->dpc_tasklet);
755
756 /* Deallocate the Adapter Structure */
757 nes_destroy_adapter(nesdev->nesadapter);
758
759 free_irq(pcidev->irq, nesdev);
760
761 if (nesdev->msi_enabled) {
762 pci_disable_msi(pcidev);
763 }
764
765 iounmap(nesdev->regs);
766 kfree(nesdev);
767
768 /* nes_debug(NES_DBG_SHUTDOWN, "calling pci_release_regions.\n"); */
769 pci_release_regions(pcidev);
770 pci_disable_device(pcidev);
771 pci_set_drvdata(pcidev, NULL);
772}
773
774
775static struct pci_driver nes_pci_driver = {
776 .name = DRV_NAME,
777 .id_table = nes_pci_table,
778 .probe = nes_probe,
779 .remove = __devexit_p(nes_remove),
780};
781
782static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
783{
784 unsigned int devfn = 0xffffffff;
785 unsigned char bus_number = 0xff;
786 unsigned int i = 0;
787 struct nes_device *nesdev;
788
789 list_for_each_entry(nesdev, &nes_dev_list, list) {
790 if (i == ee_flsh_adapter) {
791 devfn = nesdev->nesadapter->devfn;
792 bus_number = nesdev->nesadapter->bus_number;
793 break;
794 }
795 i++;
796 }
797
798 return snprintf(buf, PAGE_SIZE, "%x:%x", bus_number, devfn);
799}
800
801static ssize_t nes_store_adapter(struct device_driver *ddp,
802 const char *buf, size_t count)
803{
804 char *p = (char *)buf;
805
806 ee_flsh_adapter = simple_strtoul(p, &p, 10);
807 return strnlen(buf, count);
808}
809
810static ssize_t nes_show_ee_cmd(struct device_driver *ddp, char *buf)
811{
812 u32 eeprom_cmd = 0xdead;
813 u32 i = 0;
814 struct nes_device *nesdev;
815
816 list_for_each_entry(nesdev, &nes_dev_list, list) {
817 if (i == ee_flsh_adapter) {
818 eeprom_cmd = nes_read32(nesdev->regs + NES_EEPROM_COMMAND);
819 break;
820 }
821 i++;
822 }
823 return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_cmd);
824}
825
826static ssize_t nes_store_ee_cmd(struct device_driver *ddp,
827 const char *buf, size_t count)
828{
829 char *p = (char *)buf;
830 u32 val;
831 u32 i = 0;
832 struct nes_device *nesdev;
833
834 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
835 val = simple_strtoul(p, &p, 16);
836 list_for_each_entry(nesdev, &nes_dev_list, list) {
837 if (i == ee_flsh_adapter) {
838 nes_write32(nesdev->regs + NES_EEPROM_COMMAND, val);
839 break;
840 }
841 i++;
842 }
843 }
844 return strnlen(buf, count);
845}
846
847static ssize_t nes_show_ee_data(struct device_driver *ddp, char *buf)
848{
849 u32 eeprom_data = 0xdead;
850 u32 i = 0;
851 struct nes_device *nesdev;
852
853 list_for_each_entry(nesdev, &nes_dev_list, list) {
854 if (i == ee_flsh_adapter) {
855 eeprom_data = nes_read32(nesdev->regs + NES_EEPROM_DATA);
856 break;
857 }
858 i++;
859 }
860
861 return snprintf(buf, PAGE_SIZE, "0x%x\n", eeprom_data);
862}
863
864static ssize_t nes_store_ee_data(struct device_driver *ddp,
865 const char *buf, size_t count)
866{
867 char *p = (char *)buf;
868 u32 val;
869 u32 i = 0;
870 struct nes_device *nesdev;
871
872 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
873 val = simple_strtoul(p, &p, 16);
874 list_for_each_entry(nesdev, &nes_dev_list, list) {
875 if (i == ee_flsh_adapter) {
876 nes_write32(nesdev->regs + NES_EEPROM_DATA, val);
877 break;
878 }
879 i++;
880 }
881 }
882 return strnlen(buf, count);
883}
884
885static ssize_t nes_show_flash_cmd(struct device_driver *ddp, char *buf)
886{
887 u32 flash_cmd = 0xdead;
888 u32 i = 0;
889 struct nes_device *nesdev;
890
891 list_for_each_entry(nesdev, &nes_dev_list, list) {
892 if (i == ee_flsh_adapter) {
893 flash_cmd = nes_read32(nesdev->regs + NES_FLASH_COMMAND);
894 break;
895 }
896 i++;
897 }
898
899 return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_cmd);
900}
901
902static ssize_t nes_store_flash_cmd(struct device_driver *ddp,
903 const char *buf, size_t count)
904{
905 char *p = (char *)buf;
906 u32 val;
907 u32 i = 0;
908 struct nes_device *nesdev;
909
910 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
911 val = simple_strtoul(p, &p, 16);
912 list_for_each_entry(nesdev, &nes_dev_list, list) {
913 if (i == ee_flsh_adapter) {
914 nes_write32(nesdev->regs + NES_FLASH_COMMAND, val);
915 break;
916 }
917 i++;
918 }
919 }
920 return strnlen(buf, count);
921}
922
923static ssize_t nes_show_flash_data(struct device_driver *ddp, char *buf)
924{
925 u32 flash_data = 0xdead;
926 u32 i = 0;
927 struct nes_device *nesdev;
928
929 list_for_each_entry(nesdev, &nes_dev_list, list) {
930 if (i == ee_flsh_adapter) {
931 flash_data = nes_read32(nesdev->regs + NES_FLASH_DATA);
932 break;
933 }
934 i++;
935 }
936
937 return snprintf(buf, PAGE_SIZE, "0x%x\n", flash_data);
938}
939
940static ssize_t nes_store_flash_data(struct device_driver *ddp,
941 const char *buf, size_t count)
942{
943 char *p = (char *)buf;
944 u32 val;
945 u32 i = 0;
946 struct nes_device *nesdev;
947
948 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
949 val = simple_strtoul(p, &p, 16);
950 list_for_each_entry(nesdev, &nes_dev_list, list) {
951 if (i == ee_flsh_adapter) {
952 nes_write32(nesdev->regs + NES_FLASH_DATA, val);
953 break;
954 }
955 i++;
956 }
957 }
958 return strnlen(buf, count);
959}
960
961static ssize_t nes_show_nonidx_addr(struct device_driver *ddp, char *buf)
962{
963 return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_nonidx_addr);
964}
965
966static ssize_t nes_store_nonidx_addr(struct device_driver *ddp,
967 const char *buf, size_t count)
968{
969 char *p = (char *)buf;
970
971 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
972 sysfs_nonidx_addr = simple_strtoul(p, &p, 16);
973
974 return strnlen(buf, count);
975}
976
977static ssize_t nes_show_nonidx_data(struct device_driver *ddp, char *buf)
978{
979 u32 nonidx_data = 0xdead;
980 u32 i = 0;
981 struct nes_device *nesdev;
982
983 list_for_each_entry(nesdev, &nes_dev_list, list) {
984 if (i == ee_flsh_adapter) {
985 nonidx_data = nes_read32(nesdev->regs + sysfs_nonidx_addr);
986 break;
987 }
988 i++;
989 }
990
991 return snprintf(buf, PAGE_SIZE, "0x%x\n", nonidx_data);
992}
993
994static ssize_t nes_store_nonidx_data(struct device_driver *ddp,
995 const char *buf, size_t count)
996{
997 char *p = (char *)buf;
998 u32 val;
999 u32 i = 0;
1000 struct nes_device *nesdev;
1001
1002 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1003 val = simple_strtoul(p, &p, 16);
1004 list_for_each_entry(nesdev, &nes_dev_list, list) {
1005 if (i == ee_flsh_adapter) {
1006 nes_write32(nesdev->regs + sysfs_nonidx_addr, val);
1007 break;
1008 }
1009 i++;
1010 }
1011 }
1012 return strnlen(buf, count);
1013}
1014
1015static ssize_t nes_show_idx_addr(struct device_driver *ddp, char *buf)
1016{
1017 return snprintf(buf, PAGE_SIZE, "0x%x\n", sysfs_idx_addr);
1018}
1019
1020static ssize_t nes_store_idx_addr(struct device_driver *ddp,
1021 const char *buf, size_t count)
1022{
1023 char *p = (char *)buf;
1024
1025 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X')
1026 sysfs_idx_addr = simple_strtoul(p, &p, 16);
1027
1028 return strnlen(buf, count);
1029}
1030
1031static ssize_t nes_show_idx_data(struct device_driver *ddp, char *buf)
1032{
1033 u32 idx_data = 0xdead;
1034 u32 i = 0;
1035 struct nes_device *nesdev;
1036
1037 list_for_each_entry(nesdev, &nes_dev_list, list) {
1038 if (i == ee_flsh_adapter) {
1039 idx_data = nes_read_indexed(nesdev, sysfs_idx_addr);
1040 break;
1041 }
1042 i++;
1043 }
1044
1045 return snprintf(buf, PAGE_SIZE, "0x%x\n", idx_data);
1046}
1047
1048static ssize_t nes_store_idx_data(struct device_driver *ddp,
1049 const char *buf, size_t count)
1050{
1051 char *p = (char *)buf;
1052 u32 val;
1053 u32 i = 0;
1054 struct nes_device *nesdev;
1055
1056 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1057 val = simple_strtoul(p, &p, 16);
1058 list_for_each_entry(nesdev, &nes_dev_list, list) {
1059 if (i == ee_flsh_adapter) {
1060 nes_write_indexed(nesdev, sysfs_idx_addr, val);
1061 break;
1062 }
1063 i++;
1064 }
1065 }
1066 return strnlen(buf, count);
1067}
1068
1069static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR,
1070 nes_show_adapter, nes_store_adapter);
1071static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR,
1072 nes_show_ee_cmd, nes_store_ee_cmd);
1073static DRIVER_ATTR(eeprom_data, S_IRUSR | S_IWUSR,
1074 nes_show_ee_data, nes_store_ee_data);
1075static DRIVER_ATTR(flash_cmd, S_IRUSR | S_IWUSR,
1076 nes_show_flash_cmd, nes_store_flash_cmd);
1077static DRIVER_ATTR(flash_data, S_IRUSR | S_IWUSR,
1078 nes_show_flash_data, nes_store_flash_data);
1079static DRIVER_ATTR(nonidx_addr, S_IRUSR | S_IWUSR,
1080 nes_show_nonidx_addr, nes_store_nonidx_addr);
1081static DRIVER_ATTR(nonidx_data, S_IRUSR | S_IWUSR,
1082 nes_show_nonidx_data, nes_store_nonidx_data);
1083static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR,
1084 nes_show_idx_addr, nes_store_idx_addr);
1085static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR,
1086 nes_show_idx_data, nes_store_idx_data);
1087
1088static int nes_create_driver_sysfs(struct pci_driver *drv)
1089{
1090 int error;
1091 error = driver_create_file(&drv->driver, &driver_attr_adapter);
1092 error |= driver_create_file(&drv->driver, &driver_attr_eeprom_cmd);
1093 error |= driver_create_file(&drv->driver, &driver_attr_eeprom_data);
1094 error |= driver_create_file(&drv->driver, &driver_attr_flash_cmd);
1095 error |= driver_create_file(&drv->driver, &driver_attr_flash_data);
1096 error |= driver_create_file(&drv->driver, &driver_attr_nonidx_addr);
1097 error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data);
1098 error |= driver_create_file(&drv->driver, &driver_attr_idx_addr);
1099 error |= driver_create_file(&drv->driver, &driver_attr_idx_data);
1100 return error;
1101}
1102
1103static void nes_remove_driver_sysfs(struct pci_driver *drv)
1104{
1105 driver_remove_file(&drv->driver, &driver_attr_adapter);
1106 driver_remove_file(&drv->driver, &driver_attr_eeprom_cmd);
1107 driver_remove_file(&drv->driver, &driver_attr_eeprom_data);
1108 driver_remove_file(&drv->driver, &driver_attr_flash_cmd);
1109 driver_remove_file(&drv->driver, &driver_attr_flash_data);
1110 driver_remove_file(&drv->driver, &driver_attr_nonidx_addr);
1111 driver_remove_file(&drv->driver, &driver_attr_nonidx_data);
1112 driver_remove_file(&drv->driver, &driver_attr_idx_addr);
1113 driver_remove_file(&drv->driver, &driver_attr_idx_data);
1114}
1115
1116/**
1117 * nes_init_module - module initialization entry point
1118 */
1119static int __init nes_init_module(void)
1120{
1121 int retval;
1122 int retval1;
1123
1124 retval = nes_cm_start();
1125 if (retval) {
1126 printk(KERN_ERR PFX "Unable to start NetEffect iWARP CM.\n");
1127 return retval;
1128 }
1129 retval = pci_register_driver(&nes_pci_driver);
1130 if (retval >= 0) {
1131 retval1 = nes_create_driver_sysfs(&nes_pci_driver);
1132 if (retval1 < 0)
1133 printk(KERN_ERR PFX "Unable to create NetEffect sys files.\n");
1134 }
1135 return retval;
1136}
1137
1138
1139/**
1140 * nes_exit_module - module unload entry point
1141 */
1142static void __exit nes_exit_module(void)
1143{
1144 nes_cm_stop();
1145 nes_remove_driver_sysfs(&nes_pci_driver);
1146
1147 pci_unregister_driver(&nes_pci_driver);
1148}
1149
1150
1151module_init(nes_init_module);
1152module_exit(nes_exit_module);
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
new file mode 100644
index 000000000000..fd57e8a1582f
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -0,0 +1,560 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef __NES_H
35#define __NES_H
36
37#include <linux/netdevice.h>
38#include <linux/inetdevice.h>
39#include <linux/spinlock.h>
40#include <linux/kernel.h>
41#include <linux/delay.h>
42#include <linux/pci.h>
43#include <linux/dma-mapping.h>
44#include <linux/workqueue.h>
45#include <linux/slab.h>
46#include <asm/semaphore.h>
47#include <linux/version.h>
48#include <asm/io.h>
49#include <linux/crc32c.h>
50
51#include <rdma/ib_smi.h>
52#include <rdma/ib_verbs.h>
53#include <rdma/ib_pack.h>
54#include <rdma/rdma_cm.h>
55#include <rdma/iw_cm.h>
56
57#define NES_SEND_FIRST_WRITE
58
59#define QUEUE_DISCONNECTS
60
61#define DRV_BUILD "1"
62
63#define DRV_NAME "iw_nes"
64#define DRV_VERSION "1.0 KO Build " DRV_BUILD
65#define PFX DRV_NAME ": "
66
67/*
68 * NetEffect PCI vendor id and NE010 PCI device id.
69 */
70#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
71#define PCI_VENDOR_ID_NETEFFECT 0x1678
72#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
73#endif
74
75#define NE020_REV 4
76#define NE020_REV1 5
77
78#define BAR_0 0
79#define BAR_1 2
80
81#define RX_BUF_SIZE (1536 + 8)
82#define NES_REG0_SIZE (4 * 1024)
83#define NES_TX_TIMEOUT (6*HZ)
84#define NES_FIRST_QPN 64
85#define NES_SW_CONTEXT_ALIGN 1024
86
87#define NES_NIC_MAX_NICS 16
88#define NES_MAX_ARP_TABLE_SIZE 4096
89
90#define NES_NIC_CEQ_SIZE 8
91/* NICs will be on a separate CQ */
92#define NES_CCEQ_SIZE ((nesadapter->max_cq / nesadapter->port_count) - 32)
93
94#define NES_MAX_PORT_COUNT 4
95
96#define MAX_DPC_ITERATIONS 128
97
98#define NES_CQP_REQUEST_NO_DOORBELL_RING 0
99#define NES_CQP_REQUEST_RING_DOORBELL 1
100
101#define NES_DRV_OPT_ENABLE_MPA_VER_0 0x00000001
102#define NES_DRV_OPT_DISABLE_MPA_CRC 0x00000002
103#define NES_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
104#define NES_DRV_OPT_DISABLE_INTF 0x00000008
105#define NES_DRV_OPT_ENABLE_MSI 0x00000010
106#define NES_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
107#define NES_DRV_OPT_SUPRESS_OPTION_BC 0x00000040
108#define NES_DRV_OPT_NO_INLINE_DATA 0x00000080
109#define NES_DRV_OPT_DISABLE_INT_MOD 0x00000100
110#define NES_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
111
112#define NES_AEQ_EVENT_TIMEOUT 2500
113#define NES_DISCONNECT_EVENT_TIMEOUT 2000
114
115/* debug levels */
116/* must match userspace */
117#define NES_DBG_HW 0x00000001
118#define NES_DBG_INIT 0x00000002
119#define NES_DBG_ISR 0x00000004
120#define NES_DBG_PHY 0x00000008
121#define NES_DBG_NETDEV 0x00000010
122#define NES_DBG_CM 0x00000020
123#define NES_DBG_CM1 0x00000040
124#define NES_DBG_NIC_RX 0x00000080
125#define NES_DBG_NIC_TX 0x00000100
126#define NES_DBG_CQP 0x00000200
127#define NES_DBG_MMAP 0x00000400
128#define NES_DBG_MR 0x00000800
129#define NES_DBG_PD 0x00001000
130#define NES_DBG_CQ 0x00002000
131#define NES_DBG_QP 0x00004000
132#define NES_DBG_MOD_QP 0x00008000
133#define NES_DBG_AEQ 0x00010000
134#define NES_DBG_IW_RX 0x00020000
135#define NES_DBG_IW_TX 0x00040000
136#define NES_DBG_SHUTDOWN 0x00080000
137#define NES_DBG_RSVD1 0x10000000
138#define NES_DBG_RSVD2 0x20000000
139#define NES_DBG_RSVD3 0x40000000
140#define NES_DBG_RSVD4 0x80000000
141#define NES_DBG_ALL 0xffffffff
142
143#ifdef CONFIG_INFINIBAND_NES_DEBUG
144#define nes_debug(level, fmt, args...) \
145 if (level & nes_debug_level) \
146 printk(KERN_ERR PFX "%s[%u]: " fmt, __FUNCTION__, __LINE__, ##args)
147
148#define assert(expr) \
149if (!(expr)) { \
150 printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
151 #expr, __FILE__, __FUNCTION__, __LINE__); \
152}
153
154#define NES_EVENT_TIMEOUT 1200000
155#else
156#define nes_debug(level, fmt, args...)
157#define assert(expr) do {} while (0)
158
159#define NES_EVENT_TIMEOUT 100000
160#endif
161
162#include "nes_hw.h"
163#include "nes_verbs.h"
164#include "nes_context.h"
165#include "nes_user.h"
166#include "nes_cm.h"
167
168extern int max_mtu;
169extern int nics_per_function;
170#define max_frame_len (max_mtu+ETH_HLEN)
171extern int interrupt_mod_interval;
172extern int nes_if_count;
173extern int mpa_version;
174extern int disable_mpa_crc;
175extern unsigned int send_first;
176extern unsigned int nes_drv_opt;
177extern unsigned int nes_debug_level;
178
179extern struct list_head nes_adapter_list;
180extern struct list_head nes_dev_list;
181
182extern struct nes_cm_core *g_cm_core;
183
184extern atomic_t cm_connects;
185extern atomic_t cm_accepts;
186extern atomic_t cm_disconnects;
187extern atomic_t cm_closes;
188extern atomic_t cm_connecteds;
189extern atomic_t cm_connect_reqs;
190extern atomic_t cm_rejects;
191extern atomic_t mod_qp_timouts;
192extern atomic_t qps_created;
193extern atomic_t qps_destroyed;
194extern atomic_t sw_qps_destroyed;
195extern u32 mh_detected;
196extern u32 mh_pauses_sent;
197extern u32 cm_packets_sent;
198extern u32 cm_packets_bounced;
199extern u32 cm_packets_created;
200extern u32 cm_packets_received;
201extern u32 cm_packets_dropped;
202extern u32 cm_packets_retrans;
203extern u32 cm_listens_created;
204extern u32 cm_listens_destroyed;
205extern u32 cm_backlog_drops;
206extern atomic_t cm_loopbacks;
207extern atomic_t cm_nodes_created;
208extern atomic_t cm_nodes_destroyed;
209extern atomic_t cm_accel_dropped_pkts;
210extern atomic_t cm_resets_recvd;
211
212extern u32 crit_err_count;
213extern u32 int_mod_timer_init;
214extern u32 int_mod_cq_depth_256;
215extern u32 int_mod_cq_depth_128;
216extern u32 int_mod_cq_depth_32;
217extern u32 int_mod_cq_depth_24;
218extern u32 int_mod_cq_depth_16;
219extern u32 int_mod_cq_depth_4;
220extern u32 int_mod_cq_depth_1;
221
222extern atomic_t cqp_reqs_allocated;
223extern atomic_t cqp_reqs_freed;
224extern atomic_t cqp_reqs_dynallocated;
225extern atomic_t cqp_reqs_dynfreed;
226extern atomic_t cqp_reqs_queued;
227extern atomic_t cqp_reqs_redriven;
228
229
230struct nes_device {
231 struct nes_adapter *nesadapter;
232 void __iomem *regs;
233 void __iomem *index_reg;
234 struct pci_dev *pcidev;
235 struct net_device *netdev[NES_NIC_MAX_NICS];
236 u64 link_status_interrupts;
237 struct tasklet_struct dpc_tasklet;
238 spinlock_t indexed_regs_lock;
239 unsigned long csr_start;
240 unsigned long doorbell_region;
241 unsigned long doorbell_start;
242 unsigned long mac_tx_errors;
243 unsigned long mac_pause_frames_sent;
244 unsigned long mac_pause_frames_received;
245 unsigned long mac_rx_errors;
246 unsigned long mac_rx_crc_errors;
247 unsigned long mac_rx_symbol_err_frames;
248 unsigned long mac_rx_jabber_frames;
249 unsigned long mac_rx_oversized_frames;
250 unsigned long mac_rx_short_frames;
251 unsigned long port_rx_discards;
252 unsigned long port_tx_discards;
253 unsigned int mac_index;
254 unsigned int nes_stack_start;
255
256 /* Control Structures */
257 void *cqp_vbase;
258 dma_addr_t cqp_pbase;
259 u32 cqp_mem_size;
260 u8 ceq_index;
261 u8 nic_ceq_index;
262 struct nes_hw_cqp cqp;
263 struct nes_hw_cq ccq;
264 struct list_head cqp_avail_reqs;
265 struct list_head cqp_pending_reqs;
266 struct nes_cqp_request *nes_cqp_requests;
267
268 u32 int_req;
269 u32 int_stat;
270 u32 timer_int_req;
271 u32 timer_only_int_count;
272 u32 intf_int_req;
273 u32 last_mac_tx_pauses;
274 u32 last_used_chunks_tx;
275 struct list_head list;
276
277 u16 base_doorbell_index;
278 u16 currcq_count;
279 u16 deepcq_count;
280 u8 msi_enabled;
281 u8 netdev_count;
282 u8 napi_isr_ran;
283 u8 disable_rx_flow_control;
284 u8 disable_tx_flow_control;
285};
286
287
288static inline void
289set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value)
290{
291 wqe_words[index] = cpu_to_le32((u32) ((unsigned long)value));
292 wqe_words[index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)value)));
293}
294
295static inline void
296set_wqe_32bit_value(__le32 *wqe_words, u32 index, u32 value)
297{
298 wqe_words[index] = cpu_to_le32(value);
299}
300
301static inline void
302nes_fill_init_cqp_wqe(struct nes_hw_cqp_wqe *cqp_wqe, struct nes_device *nesdev)
303{
304 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_CTX_LOW_IDX,
305 (u64)((unsigned long) &nesdev->cqp));
306 cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] = 0;
307 cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] = 0;
308 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] = 0;
309 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_LEN_IDX] = 0;
310 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_LOW_IDX] = 0;
311 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_LOW_IDX] = 0;
312 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PA_HIGH_IDX] = 0;
313}
314
315static inline void
316nes_fill_init_qp_wqe(struct nes_hw_qp_wqe *wqe, struct nes_qp *nesqp, u32 head)
317{
318 u32 value;
319 value = ((u32)((unsigned long) nesqp)) | head;
320 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX,
321 (u32)(upper_32_bits((unsigned long)(nesqp))));
322 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, value);
323}
324
325/* Read from memory-mapped device */
326static inline u32 nes_read_indexed(struct nes_device *nesdev, u32 reg_index)
327{
328 unsigned long flags;
329 void __iomem *addr = nesdev->index_reg;
330 u32 value;
331
332 spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
333
334 writel(reg_index, addr);
335 value = readl((void __iomem *)addr + 4);
336
337 spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
338 return value;
339}
340
341static inline u32 nes_read32(const void __iomem *addr)
342{
343 return readl(addr);
344}
345
346static inline u16 nes_read16(const void __iomem *addr)
347{
348 return readw(addr);
349}
350
351static inline u8 nes_read8(const void __iomem *addr)
352{
353 return readb(addr);
354}
355
356/* Write to memory-mapped device */
357static inline void nes_write_indexed(struct nes_device *nesdev, u32 reg_index, u32 val)
358{
359 unsigned long flags;
360 void __iomem *addr = nesdev->index_reg;
361
362 spin_lock_irqsave(&nesdev->indexed_regs_lock, flags);
363
364 writel(reg_index, addr);
365 writel(val, (void __iomem *)addr + 4);
366
367 spin_unlock_irqrestore(&nesdev->indexed_regs_lock, flags);
368}
369
370static inline void nes_write32(void __iomem *addr, u32 val)
371{
372 writel(val, addr);
373}
374
375static inline void nes_write16(void __iomem *addr, u16 val)
376{
377 writew(val, addr);
378}
379
380static inline void nes_write8(void __iomem *addr, u8 val)
381{
382 writeb(val, addr);
383}
384
385
386
387static inline int nes_alloc_resource(struct nes_adapter *nesadapter,
388 unsigned long *resource_array, u32 max_resources,
389 u32 *req_resource_num, u32 *next)
390{
391 unsigned long flags;
392 u32 resource_num;
393
394 spin_lock_irqsave(&nesadapter->resource_lock, flags);
395
396 resource_num = find_next_zero_bit(resource_array, max_resources, *next);
397 if (resource_num >= max_resources) {
398 resource_num = find_first_zero_bit(resource_array, max_resources);
399 if (resource_num >= max_resources) {
400 printk(KERN_ERR PFX "%s: No available resourcess.\n", __FUNCTION__);
401 spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
402 return -EMFILE;
403 }
404 }
405 set_bit(resource_num, resource_array);
406 *next = resource_num+1;
407 if (*next == max_resources) {
408 *next = 0;
409 }
410 spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
411 *req_resource_num = resource_num;
412
413 return 0;
414}
415
416static inline int nes_is_resource_allocated(struct nes_adapter *nesadapter,
417 unsigned long *resource_array, u32 resource_num)
418{
419 unsigned long flags;
420 int bit_is_set;
421
422 spin_lock_irqsave(&nesadapter->resource_lock, flags);
423
424 bit_is_set = test_bit(resource_num, resource_array);
425 nes_debug(NES_DBG_HW, "resource_num %u is%s allocated.\n",
426 resource_num, (bit_is_set ? "": " not"));
427 spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
428
429 return bit_is_set;
430}
431
432static inline void nes_free_resource(struct nes_adapter *nesadapter,
433 unsigned long *resource_array, u32 resource_num)
434{
435 unsigned long flags;
436
437 spin_lock_irqsave(&nesadapter->resource_lock, flags);
438 clear_bit(resource_num, resource_array);
439 spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
440}
441
442static inline struct nes_vnic *to_nesvnic(struct ib_device *ibdev)
443{
444 return container_of(ibdev, struct nes_ib_device, ibdev)->nesvnic;
445}
446
447static inline struct nes_pd *to_nespd(struct ib_pd *ibpd)
448{
449 return container_of(ibpd, struct nes_pd, ibpd);
450}
451
452static inline struct nes_ucontext *to_nesucontext(struct ib_ucontext *ibucontext)
453{
454 return container_of(ibucontext, struct nes_ucontext, ibucontext);
455}
456
457static inline struct nes_mr *to_nesmr(struct ib_mr *ibmr)
458{
459 return container_of(ibmr, struct nes_mr, ibmr);
460}
461
462static inline struct nes_mr *to_nesmr_from_ibfmr(struct ib_fmr *ibfmr)
463{
464 return container_of(ibfmr, struct nes_mr, ibfmr);
465}
466
467static inline struct nes_mr *to_nesmw(struct ib_mw *ibmw)
468{
469 return container_of(ibmw, struct nes_mr, ibmw);
470}
471
472static inline struct nes_fmr *to_nesfmr(struct nes_mr *nesmr)
473{
474 return container_of(nesmr, struct nes_fmr, nesmr);
475}
476
477static inline struct nes_cq *to_nescq(struct ib_cq *ibcq)
478{
479 return container_of(ibcq, struct nes_cq, ibcq);
480}
481
482static inline struct nes_qp *to_nesqp(struct ib_qp *ibqp)
483{
484 return container_of(ibqp, struct nes_qp, ibqp);
485}
486
487
488
489/* nes.c */
490void nes_add_ref(struct ib_qp *);
491void nes_rem_ref(struct ib_qp *);
492struct ib_qp *nes_get_qp(struct ib_device *, int);
493
494
495/* nes_hw.c */
496struct nes_adapter *nes_init_adapter(struct nes_device *, u8);
497void nes_nic_init_timer_defaults(struct nes_device *, u8);
498unsigned int nes_reset_adapter_ne020(struct nes_device *, u8 *);
499int nes_init_serdes(struct nes_device *, u8, u8, u8);
500void nes_init_csr_ne020(struct nes_device *, u8, u8);
501void nes_destroy_adapter(struct nes_adapter *);
502int nes_init_cqp(struct nes_device *);
503int nes_init_phy(struct nes_device *);
504int nes_init_nic_qp(struct nes_device *, struct net_device *);
505void nes_destroy_nic_qp(struct nes_vnic *);
506int nes_napi_isr(struct nes_device *);
507void nes_dpc(unsigned long);
508void nes_process_ceq(struct nes_device *, struct nes_hw_ceq *);
509void nes_process_aeq(struct nes_device *, struct nes_hw_aeq *);
510void nes_process_mac_intr(struct nes_device *, u32);
511void nes_nic_napi_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
512void nes_nic_ce_handler(struct nes_device *, struct nes_hw_nic_cq *);
513void nes_cqp_ce_handler(struct nes_device *, struct nes_hw_cq *);
514void nes_process_iwarp_aeqe(struct nes_device *, struct nes_hw_aeqe *);
515void nes_iwarp_ce_handler(struct nes_device *, struct nes_hw_cq *);
516int nes_destroy_cqp(struct nes_device *);
517int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
518
519/* nes_nic.c */
520void nes_netdev_set_multicast_list(struct net_device *);
521void nes_netdev_exit(struct nes_vnic *);
522struct net_device *nes_netdev_init(struct nes_device *, void __iomem *);
523void nes_netdev_destroy(struct net_device *);
524int nes_nic_cm_xmit(struct sk_buff *, struct net_device *);
525
526/* nes_cm.c */
527void *nes_cm_create(struct net_device *);
528int nes_cm_recv(struct sk_buff *, struct net_device *);
529void nes_update_arp(unsigned char *, u32, u32, u16, u16);
530void nes_manage_arp_cache(struct net_device *, unsigned char *, u32, u32);
531void nes_sock_release(struct nes_qp *, unsigned long *);
532struct nes_cm_core *nes_cm_alloc_core(void);
533void flush_wqes(struct nes_device *nesdev, struct nes_qp *, u32, u32);
534int nes_manage_apbvt(struct nes_vnic *, u32, u32, u32);
535int nes_cm_disconn(struct nes_qp *);
536void nes_cm_disconn_worker(void *);
537
538/* nes_verbs.c */
539int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32);
540int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
541struct nes_ib_device *nes_init_ofa_device(struct net_device *);
542void nes_destroy_ofa_device(struct nes_ib_device *);
543int nes_register_ofa_device(struct nes_ib_device *);
544void nes_unregister_ofa_device(struct nes_ib_device *);
545
546/* nes_util.c */
547int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
548void nes_write_1G_phy_reg(struct nes_device *, u8, u8, u16);
549void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *);
550void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16);
551void nes_read_10G_phy_reg(struct nes_device *, u16, u8);
552struct nes_cqp_request *nes_get_cqp_request(struct nes_device *);
553void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *, int);
554int nes_arp_table(struct nes_device *, u32, u8 *, u32);
555void nes_mh_fix(unsigned long);
556void nes_clc(unsigned long);
557void nes_dump_mem(unsigned int, void *, int);
558u32 nes_crc32(u32, u32, u32, u32, u8 *, u32, u32, u32);
559
560#endif /* __NES_H */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
new file mode 100644
index 000000000000..bd5cfeaac203
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -0,0 +1,3088 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34
35#define TCPOPT_TIMESTAMP 8
36
37#include <asm/atomic.h>
38#include <linux/skbuff.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41#include <linux/init.h>
42#include <linux/if_arp.h>
43#include <linux/notifier.h>
44#include <linux/net.h>
45#include <linux/types.h>
46#include <linux/timer.h>
47#include <linux/time.h>
48#include <linux/delay.h>
49#include <linux/etherdevice.h>
50#include <linux/netdevice.h>
51#include <linux/random.h>
52#include <linux/list.h>
53#include <linux/threads.h>
54
55#include <net/neighbour.h>
56#include <net/route.h>
57#include <net/ip_fib.h>
58
59#include "nes.h"
60
61u32 cm_packets_sent;
62u32 cm_packets_bounced;
63u32 cm_packets_dropped;
64u32 cm_packets_retrans;
65u32 cm_packets_created;
66u32 cm_packets_received;
67u32 cm_listens_created;
68u32 cm_listens_destroyed;
69u32 cm_backlog_drops;
70atomic_t cm_loopbacks;
71atomic_t cm_nodes_created;
72atomic_t cm_nodes_destroyed;
73atomic_t cm_accel_dropped_pkts;
74atomic_t cm_resets_recvd;
75
76static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
77static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *,
78 struct nes_vnic *, struct nes_cm_info *);
79static int add_ref_cm_node(struct nes_cm_node *);
80static int rem_ref_cm_node(struct nes_cm_core *, struct nes_cm_node *);
81static int mini_cm_del_listen(struct nes_cm_core *, struct nes_cm_listener *);
82
83
84/* External CM API Interface */
85/* instance of function pointers for client API */
86/* set address of this instance to cm_core->cm_ops at cm_core alloc */
87static struct nes_cm_ops nes_cm_api = {
88 mini_cm_accelerated,
89 mini_cm_listen,
90 mini_cm_del_listen,
91 mini_cm_connect,
92 mini_cm_close,
93 mini_cm_accept,
94 mini_cm_reject,
95 mini_cm_recv_pkt,
96 mini_cm_dealloc_core,
97 mini_cm_get,
98 mini_cm_set
99};
100
101struct nes_cm_core *g_cm_core;
102
103atomic_t cm_connects;
104atomic_t cm_accepts;
105atomic_t cm_disconnects;
106atomic_t cm_closes;
107atomic_t cm_connecteds;
108atomic_t cm_connect_reqs;
109atomic_t cm_rejects;
110
111
112/**
113 * create_event
114 */
115static struct nes_cm_event *create_event(struct nes_cm_node *cm_node,
116 enum nes_cm_event_type type)
117{
118 struct nes_cm_event *event;
119
120 if (!cm_node->cm_id)
121 return NULL;
122
123 /* allocate an empty event */
124 event = kzalloc(sizeof(*event), GFP_ATOMIC);
125
126 if (!event)
127 return NULL;
128
129 event->type = type;
130 event->cm_node = cm_node;
131 event->cm_info.rem_addr = cm_node->rem_addr;
132 event->cm_info.loc_addr = cm_node->loc_addr;
133 event->cm_info.rem_port = cm_node->rem_port;
134 event->cm_info.loc_port = cm_node->loc_port;
135 event->cm_info.cm_id = cm_node->cm_id;
136
137 nes_debug(NES_DBG_CM, "Created event=%p, type=%u, dst_addr=%08x[%x],"
138 " src_addr=%08x[%x]\n",
139 event, type,
140 event->cm_info.loc_addr, event->cm_info.loc_port,
141 event->cm_info.rem_addr, event->cm_info.rem_port);
142
143 nes_cm_post_event(event);
144 return event;
145}
146
147
148/**
149 * send_mpa_request
150 */
151int send_mpa_request(struct nes_cm_node *cm_node)
152{
153 struct sk_buff *skb;
154 int ret;
155
156 skb = get_free_pkt(cm_node);
157 if (!skb) {
158 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
159 return -1;
160 }
161
162 /* send an MPA Request frame */
163 form_cm_frame(skb, cm_node, NULL, 0, &cm_node->mpa_frame,
164 cm_node->mpa_frame_size, SET_ACK);
165
166 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
167 if (ret < 0) {
168 return ret;
169 }
170
171 return 0;
172}
173
174
175/**
176 * recv_mpa - process a received TCP pkt, we are expecting an
177 * IETF MPA frame
178 */
179static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 len)
180{
181 struct ietf_mpa_frame *mpa_frame;
182
183 /* assume req frame is in tcp data payload */
184 if (len < sizeof(struct ietf_mpa_frame)) {
185 nes_debug(NES_DBG_CM, "The received ietf buffer was too small (%x)\n", len);
186 return -1;
187 }
188
189 mpa_frame = (struct ietf_mpa_frame *)buffer;
190 cm_node->mpa_frame_size = ntohs(mpa_frame->priv_data_len);
191
192 if (cm_node->mpa_frame_size + sizeof(struct ietf_mpa_frame) != len) {
193 nes_debug(NES_DBG_CM, "The received ietf buffer was not right"
194 " complete (%x + %x != %x)\n",
195 cm_node->mpa_frame_size, (u32)sizeof(struct ietf_mpa_frame), len);
196 return -1;
197 }
198
199 /* copy entire MPA frame to our cm_node's frame */
200 memcpy(cm_node->mpa_frame_buf, buffer + sizeof(struct ietf_mpa_frame),
201 cm_node->mpa_frame_size);
202
203 return 0;
204}
205
206
207/**
208 * handle_exception_pkt - process an exception packet.
209 * We have been in a TSA state, and we have now received SW
210 * TCP/IP traffic should be a FIN request or IP pkt with options
211 */
212static int handle_exception_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb)
213{
214 int ret = 0;
215 struct tcphdr *tcph = tcp_hdr(skb);
216
217 /* first check to see if this a FIN pkt */
218 if (tcph->fin) {
219 /* we need to ACK the FIN request */
220 send_ack(cm_node);
221
222 /* check which side we are (client/server) and set next state accordingly */
223 if (cm_node->tcp_cntxt.client)
224 cm_node->state = NES_CM_STATE_CLOSING;
225 else {
226 /* we are the server side */
227 cm_node->state = NES_CM_STATE_CLOSE_WAIT;
228 /* since this is a self contained CM we don't wait for */
229 /* an APP to close us, just send final FIN immediately */
230 ret = send_fin(cm_node, NULL);
231 cm_node->state = NES_CM_STATE_LAST_ACK;
232 }
233 } else {
234 ret = -EINVAL;
235 }
236
237 return ret;
238}
239
240
241/**
242 * form_cm_frame - get a free packet and build empty frame Use
243 * node info to build.
244 */
245struct sk_buff *form_cm_frame(struct sk_buff *skb, struct nes_cm_node *cm_node,
246 void *options, u32 optionsize, void *data, u32 datasize, u8 flags)
247{
248 struct tcphdr *tcph;
249 struct iphdr *iph;
250 struct ethhdr *ethh;
251 u8 *buf;
252 u16 packetsize = sizeof(*iph);
253
254 packetsize += sizeof(*tcph);
255 packetsize += optionsize + datasize;
256
257 memset(skb->data, 0x00, ETH_HLEN + sizeof(*iph) + sizeof(*tcph));
258
259 skb->len = 0;
260 buf = skb_put(skb, packetsize + ETH_HLEN);
261
262 ethh = (struct ethhdr *) buf;
263 buf += ETH_HLEN;
264
265 iph = (struct iphdr *)buf;
266 buf += sizeof(*iph);
267 tcph = (struct tcphdr *)buf;
268 skb_reset_mac_header(skb);
269 skb_set_network_header(skb, ETH_HLEN);
270 skb_set_transport_header(skb, ETH_HLEN+sizeof(*iph));
271 buf += sizeof(*tcph);
272
273 skb->ip_summed = CHECKSUM_PARTIAL;
274 skb->protocol = htons(0x800);
275 skb->data_len = 0;
276 skb->mac_len = ETH_HLEN;
277
278 memcpy(ethh->h_dest, cm_node->rem_mac, ETH_ALEN);
279 memcpy(ethh->h_source, cm_node->loc_mac, ETH_ALEN);
280 ethh->h_proto = htons(0x0800);
281
282 iph->version = IPVERSION;
283 iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
284 iph->tos = 0;
285 iph->tot_len = htons(packetsize);
286 iph->id = htons(++cm_node->tcp_cntxt.loc_id);
287
288 iph->frag_off = htons(0x4000);
289 iph->ttl = 0x40;
290 iph->protocol = 0x06; /* IPPROTO_TCP */
291
292 iph->saddr = htonl(cm_node->loc_addr);
293 iph->daddr = htonl(cm_node->rem_addr);
294
295 tcph->source = htons(cm_node->loc_port);
296 tcph->dest = htons(cm_node->rem_port);
297 tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
298
299 if (flags & SET_ACK) {
300 cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
301 tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
302 tcph->ack = 1;
303 } else
304 tcph->ack_seq = 0;
305
306 if (flags & SET_SYN) {
307 cm_node->tcp_cntxt.loc_seq_num++;
308 tcph->syn = 1;
309 } else
310 cm_node->tcp_cntxt.loc_seq_num += datasize; /* data (no headers) */
311
312 if (flags & SET_FIN)
313 tcph->fin = 1;
314
315 if (flags & SET_RST)
316 tcph->rst = 1;
317
318 tcph->doff = (u16)((sizeof(*tcph) + optionsize + 3) >> 2);
319 tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
320 tcph->urg_ptr = 0;
321 if (optionsize)
322 memcpy(buf, options, optionsize);
323 buf += optionsize;
324 if (datasize)
325 memcpy(buf, data, datasize);
326
327 skb_shinfo(skb)->nr_frags = 0;
328 cm_packets_created++;
329
330 return skb;
331}
332
333
334/**
335 * print_core - dump a cm core
336 */
337static void print_core(struct nes_cm_core *core)
338{
339 nes_debug(NES_DBG_CM, "---------------------------------------------\n");
340 nes_debug(NES_DBG_CM, "CM Core -- (core = %p )\n", core);
341 if (!core)
342 return;
343 nes_debug(NES_DBG_CM, "---------------------------------------------\n");
344 nes_debug(NES_DBG_CM, "Session ID : %u \n", atomic_read(&core->session_id));
345
346 nes_debug(NES_DBG_CM, "State : %u \n", core->state);
347
348 nes_debug(NES_DBG_CM, "Tx Free cnt : %u \n", skb_queue_len(&core->tx_free_list));
349 nes_debug(NES_DBG_CM, "Listen Nodes : %u \n", atomic_read(&core->listen_node_cnt));
350 nes_debug(NES_DBG_CM, "Active Nodes : %u \n", atomic_read(&core->node_cnt));
351
352 nes_debug(NES_DBG_CM, "core : %p \n", core);
353
354 nes_debug(NES_DBG_CM, "-------------- end core ---------------\n");
355}
356
357
358/**
359 * schedule_nes_timer
360 * note - cm_node needs to be protected before calling this. Encase in:
361 * rem_ref_cm_node(cm_core, cm_node);add_ref_cm_node(cm_node);
362 */
363int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
364 enum nes_timer_type type, int send_retrans,
365 int close_when_complete)
366{
367 unsigned long flags;
368 struct nes_cm_core *cm_core;
369 struct nes_timer_entry *new_send;
370 int ret = 0;
371 u32 was_timer_set;
372
373 new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
374 if (!new_send)
375 return -1;
376 if (!cm_node)
377 return -EINVAL;
378
379 /* new_send->timetosend = currenttime */
380 new_send->retrycount = NES_DEFAULT_RETRYS;
381 new_send->retranscount = NES_DEFAULT_RETRANS;
382 new_send->skb = skb;
383 new_send->timetosend = jiffies;
384 new_send->type = type;
385 new_send->netdev = cm_node->netdev;
386 new_send->send_retrans = send_retrans;
387 new_send->close_when_complete = close_when_complete;
388
389 if (type == NES_TIMER_TYPE_CLOSE) {
390 new_send->timetosend += (HZ/2); /* TODO: decide on the correct value here */
391 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
392 list_add_tail(&new_send->list, &cm_node->recv_list);
393 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
394 }
395
396 if (type == NES_TIMER_TYPE_SEND) {
397 new_send->seq_num = htonl(tcp_hdr(skb)->seq);
398 atomic_inc(&new_send->skb->users);
399
400 ret = nes_nic_cm_xmit(new_send->skb, cm_node->netdev);
401 if (ret != NETDEV_TX_OK) {
402 nes_debug(NES_DBG_CM, "Error sending packet %p (jiffies = %lu)\n",
403 new_send, jiffies);
404 atomic_dec(&new_send->skb->users);
405 new_send->timetosend = jiffies;
406 } else {
407 cm_packets_sent++;
408 if (!send_retrans) {
409 if (close_when_complete)
410 rem_ref_cm_node(cm_node->cm_core, cm_node);
411 dev_kfree_skb_any(new_send->skb);
412 kfree(new_send);
413 return ret;
414 }
415 new_send->timetosend = jiffies + NES_RETRY_TIMEOUT;
416 }
417 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
418 list_add_tail(&new_send->list, &cm_node->retrans_list);
419 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
420 }
421 if (type == NES_TIMER_TYPE_RECV) {
422 new_send->seq_num = htonl(tcp_hdr(skb)->seq);
423 new_send->timetosend = jiffies;
424 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
425 list_add_tail(&new_send->list, &cm_node->recv_list);
426 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
427 }
428 cm_core = cm_node->cm_core;
429
430 was_timer_set = timer_pending(&cm_core->tcp_timer);
431
432 if (!was_timer_set) {
433 cm_core->tcp_timer.expires = new_send->timetosend;
434 add_timer(&cm_core->tcp_timer);
435 }
436
437 return ret;
438}
439
440
441/**
442 * nes_cm_timer_tick
443 */
444void nes_cm_timer_tick(unsigned long pass)
445{
446 unsigned long flags, qplockflags;
447 unsigned long nexttimeout = jiffies + NES_LONG_TIME;
448 struct iw_cm_id *cm_id;
449 struct nes_cm_node *cm_node;
450 struct nes_timer_entry *send_entry, *recv_entry;
451 struct list_head *list_core, *list_core_temp;
452 struct list_head *list_node, *list_node_temp;
453 struct nes_cm_core *cm_core = g_cm_core;
454 struct nes_qp *nesqp;
455 struct sk_buff *skb;
456 u32 settimer = 0;
457 int ret = NETDEV_TX_OK;
458 int node_done;
459
460 spin_lock_irqsave(&cm_core->ht_lock, flags);
461
462 list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
463 cm_node = container_of(list_node, struct nes_cm_node, list);
464 add_ref_cm_node(cm_node);
465 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
466 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
467 list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
468 recv_entry = container_of(list_core, struct nes_timer_entry, list);
469 if ((time_after(recv_entry->timetosend, jiffies)) &&
470 (recv_entry->type == NES_TIMER_TYPE_CLOSE)) {
471 if (nexttimeout > recv_entry->timetosend || !settimer) {
472 nexttimeout = recv_entry->timetosend;
473 settimer = 1;
474 }
475 continue;
476 }
477 list_del(&recv_entry->list);
478 cm_id = cm_node->cm_id;
479 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
480 if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
481 nesqp = (struct nes_qp *)recv_entry->skb;
482 spin_lock_irqsave(&nesqp->lock, qplockflags);
483 if (nesqp->cm_id) {
484 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d: "
485 "****** HIT A NES_TIMER_TYPE_CLOSE"
486 " with something to do!!! ******\n",
487 nesqp->hwqp.qp_id, cm_id,
488 atomic_read(&nesqp->refcount));
489 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
490 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
491 nesqp->ibqp_state = IB_QPS_ERR;
492 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
493 nes_cm_disconn(nesqp);
494 } else {
495 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
496 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p, refcount = %d:"
497 " ****** HIT A NES_TIMER_TYPE_CLOSE"
498 " with nothing to do!!! ******\n",
499 nesqp->hwqp.qp_id, cm_id,
500 atomic_read(&nesqp->refcount));
501 nes_rem_ref(&nesqp->ibqp);
502 }
503 if (cm_id)
504 cm_id->rem_ref(cm_id);
505 }
506 kfree(recv_entry);
507 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
508 }
509 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
510
511 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
512 node_done = 0;
513 list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
514 if (node_done) {
515 break;
516 }
517 send_entry = container_of(list_core, struct nes_timer_entry, list);
518 if (time_after(send_entry->timetosend, jiffies)) {
519 if (cm_node->state != NES_CM_STATE_TSA) {
520 if ((nexttimeout > send_entry->timetosend) || !settimer) {
521 nexttimeout = send_entry->timetosend;
522 settimer = 1;
523 }
524 node_done = 1;
525 continue;
526 } else {
527 list_del(&send_entry->list);
528 skb = send_entry->skb;
529 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
530 dev_kfree_skb_any(skb);
531 kfree(send_entry);
532 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
533 continue;
534 }
535 }
536 if (send_entry->type == NES_TIMER_NODE_CLEANUP) {
537 list_del(&send_entry->list);
538 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
539 kfree(send_entry);
540 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
541 continue;
542 }
543 if ((send_entry->seq_num < cm_node->tcp_cntxt.rem_ack_num) ||
544 (cm_node->state == NES_CM_STATE_TSA) ||
545 (cm_node->state == NES_CM_STATE_CLOSED)) {
546 skb = send_entry->skb;
547 list_del(&send_entry->list);
548 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
549 kfree(send_entry);
550 dev_kfree_skb_any(skb);
551 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
552 continue;
553 }
554
555 if (!send_entry->retranscount || !send_entry->retrycount) {
556 cm_packets_dropped++;
557 skb = send_entry->skb;
558 list_del(&send_entry->list);
559 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
560 dev_kfree_skb_any(skb);
561 kfree(send_entry);
562 if (cm_node->state == NES_CM_STATE_SYN_RCVD) {
563 /* this node never even generated an indication up to the cm */
564 rem_ref_cm_node(cm_core, cm_node);
565 } else {
566 cm_node->state = NES_CM_STATE_CLOSED;
567 create_event(cm_node, NES_CM_EVENT_ABORTED);
568 }
569 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
570 continue;
571 }
572 /* this seems like the correct place, but leave send entry unprotected */
573 // spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
574 atomic_inc(&send_entry->skb->users);
575 cm_packets_retrans++;
576 nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p,"
577 " jiffies = %lu, time to send = %lu, retranscount = %u, "
578 "send_entry->seq_num = 0x%08X, cm_node->tcp_cntxt.rem_ack_num = 0x%08X\n",
579 send_entry, cm_node, jiffies, send_entry->timetosend, send_entry->retranscount,
580 send_entry->seq_num, cm_node->tcp_cntxt.rem_ack_num);
581
582 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
583 ret = nes_nic_cm_xmit(send_entry->skb, cm_node->netdev);
584 if (ret != NETDEV_TX_OK) {
585 cm_packets_bounced++;
586 atomic_dec(&send_entry->skb->users);
587 send_entry->retrycount--;
588 nexttimeout = jiffies + NES_SHORT_TIME;
589 settimer = 1;
590 node_done = 1;
591 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
592 continue;
593 } else {
594 cm_packets_sent++;
595 }
596 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
597 list_del(&send_entry->list);
598 nes_debug(NES_DBG_CM, "Packet Sent: retrans count = %u, retry count = %u.\n",
599 send_entry->retranscount, send_entry->retrycount);
600 if (send_entry->send_retrans) {
601 send_entry->retranscount--;
602 send_entry->timetosend = jiffies + NES_RETRY_TIMEOUT;
603 if (nexttimeout > send_entry->timetosend || !settimer) {
604 nexttimeout = send_entry->timetosend;
605 settimer = 1;
606 }
607 list_add(&send_entry->list, &cm_node->retrans_list);
608 continue;
609 } else {
610 int close_when_complete;
611 skb = send_entry->skb;
612 close_when_complete = send_entry->close_when_complete;
613 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
614 if (close_when_complete) {
615 BUG_ON(atomic_read(&cm_node->ref_count) == 1);
616 rem_ref_cm_node(cm_core, cm_node);
617 }
618 dev_kfree_skb_any(skb);
619 kfree(send_entry);
620 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
621 continue;
622 }
623 }
624 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
625
626 rem_ref_cm_node(cm_core, cm_node);
627
628 spin_lock_irqsave(&cm_core->ht_lock, flags);
629 if (ret != NETDEV_TX_OK)
630 break;
631 }
632 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
633
634 if (settimer) {
635 if (!timer_pending(&cm_core->tcp_timer)) {
636 cm_core->tcp_timer.expires = nexttimeout;
637 add_timer(&cm_core->tcp_timer);
638 }
639 }
640}
641
642
643/**
644 * send_syn
645 */
646int send_syn(struct nes_cm_node *cm_node, u32 sendack)
647{
648 int ret;
649 int flags = SET_SYN;
650 struct sk_buff *skb;
651 char optionsbuffer[sizeof(struct option_mss) +
652 sizeof(struct option_windowscale) +
653 sizeof(struct option_base) + 1];
654
655 int optionssize = 0;
656 /* Sending MSS option */
657 union all_known_options *options;
658
659 if (!cm_node)
660 return -EINVAL;
661
662 options = (union all_known_options *)&optionsbuffer[optionssize];
663 options->as_mss.optionnum = OPTION_NUMBER_MSS;
664 options->as_mss.length = sizeof(struct option_mss);
665 options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
666 optionssize += sizeof(struct option_mss);
667
668 options = (union all_known_options *)&optionsbuffer[optionssize];
669 options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
670 options->as_windowscale.length = sizeof(struct option_windowscale);
671 options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
672 optionssize += sizeof(struct option_windowscale);
673
674 if (sendack && !(NES_DRV_OPT_SUPRESS_OPTION_BC & nes_drv_opt)
675 ) {
676 options = (union all_known_options *)&optionsbuffer[optionssize];
677 options->as_base.optionnum = OPTION_NUMBER_WRITE0;
678 options->as_base.length = sizeof(struct option_base);
679 optionssize += sizeof(struct option_base);
680 /* we need the size to be a multiple of 4 */
681 options = (union all_known_options *)&optionsbuffer[optionssize];
682 options->as_end = 1;
683 optionssize += 1;
684 options = (union all_known_options *)&optionsbuffer[optionssize];
685 options->as_end = 1;
686 optionssize += 1;
687 }
688
689 options = (union all_known_options *)&optionsbuffer[optionssize];
690 options->as_end = OPTION_NUMBER_END;
691 optionssize += 1;
692
693 skb = get_free_pkt(cm_node);
694 if (!skb) {
695 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
696 return -1;
697 }
698
699 if (sendack)
700 flags |= SET_ACK;
701
702 form_cm_frame(skb, cm_node, optionsbuffer, optionssize, NULL, 0, flags);
703 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
704
705 return ret;
706}
707
708
709/**
710 * send_reset
711 */
712int send_reset(struct nes_cm_node *cm_node)
713{
714 int ret;
715 struct sk_buff *skb = get_free_pkt(cm_node);
716 int flags = SET_RST | SET_ACK;
717
718 if (!skb) {
719 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
720 return -1;
721 }
722
723 add_ref_cm_node(cm_node);
724 form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, flags);
725 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 1);
726
727 return ret;
728}
729
730
731/**
732 * send_ack
733 */
734int send_ack(struct nes_cm_node *cm_node)
735{
736 int ret;
737 struct sk_buff *skb = get_free_pkt(cm_node);
738
739 if (!skb) {
740 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
741 return -1;
742 }
743
744 form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK);
745 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 0, 0);
746
747 return ret;
748}
749
750
751/**
752 * send_fin
753 */
754int send_fin(struct nes_cm_node *cm_node, struct sk_buff *skb)
755{
756 int ret;
757
758 /* if we didn't get a frame get one */
759 if (!skb)
760 skb = get_free_pkt(cm_node);
761
762 if (!skb) {
763 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
764 return -1;
765 }
766
767 form_cm_frame(skb, cm_node, NULL, 0, NULL, 0, SET_ACK | SET_FIN);
768 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
769
770 return ret;
771}
772
773
774/**
775 * get_free_pkt
776 */
777struct sk_buff *get_free_pkt(struct nes_cm_node *cm_node)
778{
779 struct sk_buff *skb, *new_skb;
780
781 /* check to see if we need to repopulate the free tx pkt queue */
782 if (skb_queue_len(&cm_node->cm_core->tx_free_list) < NES_CM_FREE_PKT_LO_WATERMARK) {
783 while (skb_queue_len(&cm_node->cm_core->tx_free_list) <
784 cm_node->cm_core->free_tx_pkt_max) {
785 /* replace the frame we took, we won't get it back */
786 new_skb = dev_alloc_skb(cm_node->cm_core->mtu);
787 BUG_ON(!new_skb);
788 /* add a replacement frame to the free tx list head */
789 skb_queue_head(&cm_node->cm_core->tx_free_list, new_skb);
790 }
791 }
792
793 skb = skb_dequeue(&cm_node->cm_core->tx_free_list);
794
795 return skb;
796}
797
798
799/**
800 * make_hashkey - generate hash key from node tuple
801 */
802static inline int make_hashkey(u16 loc_port, nes_addr_t loc_addr, u16 rem_port,
803 nes_addr_t rem_addr)
804{
805 u32 hashkey = 0;
806
807 hashkey = loc_addr + rem_addr + loc_port + rem_port;
808 hashkey = (hashkey % NES_CM_HASHTABLE_SIZE);
809
810 return hashkey;
811}
812
813
814/**
815 * find_node - find a cm node that matches the reference cm node
816 */
817static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
818 u16 rem_port, nes_addr_t rem_addr, u16 loc_port, nes_addr_t loc_addr)
819{
820 unsigned long flags;
821 u32 hashkey;
822 struct list_head *list_pos;
823 struct list_head *hte;
824 struct nes_cm_node *cm_node;
825
826 /* make a hash index key for this packet */
827 hashkey = make_hashkey(loc_port, loc_addr, rem_port, rem_addr);
828
829 /* get a handle on the hte */
830 hte = &cm_core->connected_nodes;
831
832 nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n",
833 loc_addr, loc_port, cm_core, hte);
834
835 /* walk list and find cm_node associated with this session ID */
836 spin_lock_irqsave(&cm_core->ht_lock, flags);
837 list_for_each(list_pos, hte) {
838 cm_node = container_of(list_pos, struct nes_cm_node, list);
839 /* compare quad, return node handle if a match */
840 nes_debug(NES_DBG_CM, "finding node %x:%x =? %x:%x ^ %x:%x =? %x:%x\n",
841 cm_node->loc_addr, cm_node->loc_port,
842 loc_addr, loc_port,
843 cm_node->rem_addr, cm_node->rem_port,
844 rem_addr, rem_port);
845 if ((cm_node->loc_addr == loc_addr) && (cm_node->loc_port == loc_port) &&
846 (cm_node->rem_addr == rem_addr) && (cm_node->rem_port == rem_port)) {
847 add_ref_cm_node(cm_node);
848 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
849 return cm_node;
850 }
851 }
852 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
853
854 /* no owner node */
855 return NULL;
856}
857
858
859/**
860 * find_listener - find a cm node listening on this addr-port pair
861 */
862static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
863 nes_addr_t dst_addr, u16 dst_port, enum nes_cm_listener_state listener_state)
864{
865 unsigned long flags;
866 struct list_head *listen_list;
867 struct nes_cm_listener *listen_node;
868
869 /* walk list and find cm_node associated with this session ID */
870 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
871 list_for_each(listen_list, &cm_core->listen_list.list) {
872 listen_node = container_of(listen_list, struct nes_cm_listener, list);
873 /* compare node pair, return node handle if a match */
874 if (((listen_node->loc_addr == dst_addr) ||
875 listen_node->loc_addr == 0x00000000) &&
876 (listen_node->loc_port == dst_port) &&
877 (listener_state & listen_node->listener_state)) {
878 atomic_inc(&listen_node->ref_count);
879 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
880 return listen_node;
881 }
882 }
883 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
884
885 nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n",
886 dst_addr, dst_port);
887
888 /* no listener */
889 return NULL;
890}
891
892
893/**
894 * add_hte_node - add a cm node to the hash table
895 */
896static int add_hte_node(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
897{
898 unsigned long flags;
899 u32 hashkey;
900 struct list_head *hte;
901
902 if (!cm_node || !cm_core)
903 return -EINVAL;
904
905 nes_debug(NES_DBG_CM, "Adding Node to Active Connection HT\n");
906
907 /* first, make an index into our hash table */
908 hashkey = make_hashkey(cm_node->loc_port, cm_node->loc_addr,
909 cm_node->rem_port, cm_node->rem_addr);
910 cm_node->hashkey = hashkey;
911
912 spin_lock_irqsave(&cm_core->ht_lock, flags);
913
914 /* get a handle on the hash table element (list head for this slot) */
915 hte = &cm_core->connected_nodes;
916 list_add_tail(&cm_node->list, hte);
917 atomic_inc(&cm_core->ht_node_cnt);
918
919 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
920
921 return 0;
922}
923
924
925/**
926 * mini_cm_dec_refcnt_listen
927 */
928static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
929 struct nes_cm_listener *listener, int free_hanging_nodes)
930{
931 int ret = 1;
932 unsigned long flags;
933 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
934 if (!atomic_dec_return(&listener->ref_count)) {
935 list_del(&listener->list);
936
937 /* decrement our listen node count */
938 atomic_dec(&cm_core->listen_node_cnt);
939
940 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
941
942 if (listener->nesvnic) {
943 nes_manage_apbvt(listener->nesvnic, listener->loc_port,
944 PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
945 }
946
947 nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
948
949 kfree(listener);
950 ret = 0;
951 cm_listens_destroyed++;
952 } else {
953 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
954 }
955 if (listener) {
956 if (atomic_read(&listener->pend_accepts_cnt) > 0)
957 nes_debug(NES_DBG_CM, "destroying listener (%p)"
958 " with non-zero pending accepts=%u\n",
959 listener, atomic_read(&listener->pend_accepts_cnt));
960 }
961
962 return ret;
963}
964
965
966/**
967 * mini_cm_del_listen
968 */
969static int mini_cm_del_listen(struct nes_cm_core *cm_core,
970 struct nes_cm_listener *listener)
971{
972 listener->listener_state = NES_CM_LISTENER_PASSIVE_STATE;
973 listener->cm_id = NULL; /* going to be destroyed pretty soon */
974 return mini_cm_dec_refcnt_listen(cm_core, listener, 1);
975}
976
977
978/**
979 * mini_cm_accelerated
980 */
981static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
982 struct nes_cm_node *cm_node)
983{
984 u32 was_timer_set;
985 cm_node->accelerated = 1;
986
987 if (cm_node->accept_pend) {
988 BUG_ON(!cm_node->listener);
989 atomic_dec(&cm_node->listener->pend_accepts_cnt);
990 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
991 }
992
993 was_timer_set = timer_pending(&cm_core->tcp_timer);
994 if (!was_timer_set) {
995 cm_core->tcp_timer.expires = jiffies + NES_SHORT_TIME;
996 add_timer(&cm_core->tcp_timer);
997 }
998
999 return 0;
1000}
1001
1002
1003/**
1004 * nes_addr_send_arp
1005 */
1006static void nes_addr_send_arp(u32 dst_ip)
1007{
1008 struct rtable *rt;
1009 struct flowi fl;
1010
1011 memset(&fl, 0, sizeof fl);
1012 fl.nl_u.ip4_u.daddr = htonl(dst_ip);
1013 if (ip_route_output_key(&init_net, &rt, &fl)) {
1014 printk("%s: ip_route_output_key failed for 0x%08X\n",
1015 __FUNCTION__, dst_ip);
1016 return;
1017 }
1018
1019 neigh_event_send(rt->u.dst.neighbour, NULL);
1020 ip_rt_put(rt);
1021}
1022
1023
1024/**
1025 * make_cm_node - create a new instance of a cm node
1026 */
1027static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1028 struct nes_vnic *nesvnic, struct nes_cm_info *cm_info,
1029 struct nes_cm_listener *listener)
1030{
1031 struct nes_cm_node *cm_node;
1032 struct timespec ts;
1033 int arpindex = 0;
1034 struct nes_device *nesdev;
1035 struct nes_adapter *nesadapter;
1036
1037 /* create an hte and cm_node for this instance */
1038 cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
1039 if (!cm_node)
1040 return NULL;
1041
1042 /* set our node specific transport info */
1043 cm_node->loc_addr = cm_info->loc_addr;
1044 cm_node->rem_addr = cm_info->rem_addr;
1045 cm_node->loc_port = cm_info->loc_port;
1046 cm_node->rem_port = cm_info->rem_port;
1047 cm_node->send_write0 = send_first;
1048 nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n",
1049 cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port);
1050 cm_node->listener = listener;
1051 cm_node->netdev = nesvnic->netdev;
1052 cm_node->cm_id = cm_info->cm_id;
1053 memcpy(cm_node->loc_mac, nesvnic->netdev->dev_addr, ETH_ALEN);
1054
1055 nes_debug(NES_DBG_CM, "listener=%p, cm_id=%p\n",
1056 cm_node->listener, cm_node->cm_id);
1057
1058 INIT_LIST_HEAD(&cm_node->retrans_list);
1059 spin_lock_init(&cm_node->retrans_list_lock);
1060 INIT_LIST_HEAD(&cm_node->recv_list);
1061 spin_lock_init(&cm_node->recv_list_lock);
1062
1063 cm_node->loopbackpartner = NULL;
1064 atomic_set(&cm_node->ref_count, 1);
1065 /* associate our parent CM core */
1066 cm_node->cm_core = cm_core;
1067 cm_node->tcp_cntxt.loc_id = NES_CM_DEF_LOCAL_ID;
1068 cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
1069 cm_node->tcp_cntxt.rcv_wnd = NES_CM_DEFAULT_RCV_WND_SCALED >>
1070 NES_CM_DEFAULT_RCV_WND_SCALE;
1071 ts = current_kernel_time();
1072 cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
1073 cm_node->tcp_cntxt.mss = nesvnic->max_frame_size - sizeof(struct iphdr) -
1074 sizeof(struct tcphdr) - ETH_HLEN;
1075 cm_node->tcp_cntxt.rcv_nxt = 0;
1076 /* get a unique session ID , add thread_id to an upcounter to handle race */
1077 atomic_inc(&cm_core->node_cnt);
1078 atomic_inc(&cm_core->session_id);
1079 cm_node->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
1080 cm_node->conn_type = cm_info->conn_type;
1081 cm_node->apbvt_set = 0;
1082 cm_node->accept_pend = 0;
1083
1084 cm_node->nesvnic = nesvnic;
1085 /* get some device handles, for arp lookup */
1086 nesdev = nesvnic->nesdev;
1087 nesadapter = nesdev->nesadapter;
1088
1089 cm_node->loopbackpartner = NULL;
1090 /* get the mac addr for the remote node */
1091 arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
1092 if (arpindex < 0) {
1093 kfree(cm_node);
1094 nes_addr_send_arp(cm_info->rem_addr);
1095 return NULL;
1096 }
1097
1098 /* copy the mac addr to node context */
1099 memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
1100 nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x,"
1101 " %02x, %02x, %02x, %02x, %02x\n",
1102 cm_node->rem_mac[0], cm_node->rem_mac[1],
1103 cm_node->rem_mac[2], cm_node->rem_mac[3],
1104 cm_node->rem_mac[4], cm_node->rem_mac[5]);
1105
1106 add_hte_node(cm_core, cm_node);
1107 atomic_inc(&cm_nodes_created);
1108
1109 return cm_node;
1110}
1111
1112
1113/**
1114 * add_ref_cm_node - destroy an instance of a cm node
1115 */
1116static int add_ref_cm_node(struct nes_cm_node *cm_node)
1117{
1118 atomic_inc(&cm_node->ref_count);
1119 return 0;
1120}
1121
1122
1123/**
1124 * rem_ref_cm_node - destroy an instance of a cm node
1125 */
1126static int rem_ref_cm_node(struct nes_cm_core *cm_core,
1127 struct nes_cm_node *cm_node)
1128{
1129 unsigned long flags, qplockflags;
1130 struct nes_timer_entry *send_entry;
1131 struct nes_timer_entry *recv_entry;
1132 struct iw_cm_id *cm_id;
1133 struct list_head *list_core, *list_node_temp;
1134 struct nes_qp *nesqp;
1135
1136 if (!cm_node)
1137 return -EINVAL;
1138
1139 spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
1140 if (atomic_dec_return(&cm_node->ref_count)) {
1141 spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
1142 return 0;
1143 }
1144 list_del(&cm_node->list);
1145 atomic_dec(&cm_core->ht_node_cnt);
1146 spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
1147
1148 /* if the node is destroyed before connection was accelerated */
1149 if (!cm_node->accelerated && cm_node->accept_pend) {
1150 BUG_ON(!cm_node->listener);
1151 atomic_dec(&cm_node->listener->pend_accepts_cnt);
1152 BUG_ON(atomic_read(&cm_node->listener->pend_accepts_cnt) < 0);
1153 }
1154
1155 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1156 list_for_each_safe(list_core, list_node_temp, &cm_node->retrans_list) {
1157 send_entry = container_of(list_core, struct nes_timer_entry, list);
1158 list_del(&send_entry->list);
1159 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1160 dev_kfree_skb_any(send_entry->skb);
1161 kfree(send_entry);
1162 spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
1163 continue;
1164 }
1165 spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
1166
1167 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
1168 list_for_each_safe(list_core, list_node_temp, &cm_node->recv_list) {
1169 recv_entry = container_of(list_core, struct nes_timer_entry, list);
1170 list_del(&recv_entry->list);
1171 cm_id = cm_node->cm_id;
1172 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
1173 if (recv_entry->type == NES_TIMER_TYPE_CLOSE) {
1174 nesqp = (struct nes_qp *)recv_entry->skb;
1175 spin_lock_irqsave(&nesqp->lock, qplockflags);
1176 if (nesqp->cm_id) {
1177 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
1178 " with something to do!!! ******\n",
1179 nesqp->hwqp.qp_id, cm_id);
1180 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
1181 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
1182 nesqp->ibqp_state = IB_QPS_ERR;
1183 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
1184 nes_cm_disconn(nesqp);
1185 } else {
1186 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
1187 nes_debug(NES_DBG_CM, "QP%u: cm_id = %p: ****** HIT A NES_TIMER_TYPE_CLOSE"
1188 " with nothing to do!!! ******\n",
1189 nesqp->hwqp.qp_id, cm_id);
1190 nes_rem_ref(&nesqp->ibqp);
1191 }
1192 cm_id->rem_ref(cm_id);
1193 } else if (recv_entry->type == NES_TIMER_TYPE_RECV) {
1194 dev_kfree_skb_any(recv_entry->skb);
1195 }
1196 kfree(recv_entry);
1197 spin_lock_irqsave(&cm_node->recv_list_lock, flags);
1198 }
1199 spin_unlock_irqrestore(&cm_node->recv_list_lock, flags);
1200
1201 if (cm_node->listener) {
1202 mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
1203 } else {
1204 if (cm_node->apbvt_set && cm_node->nesvnic) {
1205 nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
1206 PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
1207 NES_MANAGE_APBVT_DEL);
1208 }
1209 }
1210
1211 kfree(cm_node);
1212 atomic_dec(&cm_core->node_cnt);
1213 atomic_inc(&cm_nodes_destroyed);
1214
1215 return 0;
1216}
1217
1218
1219/**
1220 * process_options
1221 */
1222static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc, u32 optionsize, u32 syn_packet)
1223{
1224 u32 tmp;
1225 u32 offset = 0;
1226 union all_known_options *all_options;
1227 char got_mss_option = 0;
1228
1229 while (offset < optionsize) {
1230 all_options = (union all_known_options *)(optionsloc + offset);
1231 switch (all_options->as_base.optionnum) {
1232 case OPTION_NUMBER_END:
1233 offset = optionsize;
1234 break;
1235 case OPTION_NUMBER_NONE:
1236 offset += 1;
1237 continue;
1238 case OPTION_NUMBER_MSS:
1239 nes_debug(NES_DBG_CM, "%s: MSS Length: %d Offset: %d Size: %d\n",
1240 __FUNCTION__,
1241 all_options->as_mss.length, offset, optionsize);
1242 got_mss_option = 1;
1243 if (all_options->as_mss.length != 4) {
1244 return 1;
1245 } else {
1246 tmp = ntohs(all_options->as_mss.mss);
1247 if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
1248 cm_node->tcp_cntxt.mss = tmp;
1249 }
1250 break;
1251 case OPTION_NUMBER_WINDOW_SCALE:
1252 cm_node->tcp_cntxt.snd_wscale = all_options->as_windowscale.shiftcount;
1253 break;
1254 case OPTION_NUMBER_WRITE0:
1255 cm_node->send_write0 = 1;
1256 break;
1257 default:
1258 nes_debug(NES_DBG_CM, "TCP Option not understood: %x\n",
1259 all_options->as_base.optionnum);
1260 break;
1261 }
1262 offset += all_options->as_base.length;
1263 }
1264 if ((!got_mss_option) && (syn_packet))
1265 cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
1266 return 0;
1267}
1268
1269
1270/**
1271 * process_packet
1272 */
1273int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
1274 struct nes_cm_core *cm_core)
1275{
1276 int optionsize;
1277 int datasize;
1278 int ret = 0;
1279 struct tcphdr *tcph = tcp_hdr(skb);
1280 u32 inc_sequence;
1281 if (cm_node->state == NES_CM_STATE_SYN_SENT && tcph->syn) {
1282 inc_sequence = ntohl(tcph->seq);
1283 cm_node->tcp_cntxt.rcv_nxt = inc_sequence;
1284 }
1285
1286 if ((!tcph) || (cm_node->state == NES_CM_STATE_TSA)) {
1287 BUG_ON(!tcph);
1288 atomic_inc(&cm_accel_dropped_pkts);
1289 return -1;
1290 }
1291
1292 if (tcph->rst) {
1293 atomic_inc(&cm_resets_recvd);
1294 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u. refcnt=%d\n",
1295 cm_node, cm_node->state, atomic_read(&cm_node->ref_count));
1296 switch (cm_node->state) {
1297 case NES_CM_STATE_LISTENING:
1298 rem_ref_cm_node(cm_core, cm_node);
1299 break;
1300 case NES_CM_STATE_TSA:
1301 case NES_CM_STATE_CLOSED:
1302 break;
1303 case NES_CM_STATE_SYN_RCVD:
1304 nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
1305 " remote 0x%08X:%04X, node state = %u\n",
1306 cm_node->loc_addr, cm_node->loc_port,
1307 cm_node->rem_addr, cm_node->rem_port,
1308 cm_node->state);
1309 rem_ref_cm_node(cm_core, cm_node);
1310 break;
1311 case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
1312 case NES_CM_STATE_ESTABLISHED:
1313 case NES_CM_STATE_MPAREQ_SENT:
1314 default:
1315 nes_debug(NES_DBG_CM, "Received a reset for local 0x%08X:%04X,"
1316 " remote 0x%08X:%04X, node state = %u refcnt=%d\n",
1317 cm_node->loc_addr, cm_node->loc_port,
1318 cm_node->rem_addr, cm_node->rem_port,
1319 cm_node->state, atomic_read(&cm_node->ref_count));
1320 // create event
1321 cm_node->state = NES_CM_STATE_CLOSED;
1322
1323 create_event(cm_node, NES_CM_EVENT_ABORTED);
1324 break;
1325
1326 }
1327 return -1;
1328 }
1329
1330 optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
1331
1332 skb_pull(skb, ip_hdr(skb)->ihl << 2);
1333 skb_pull(skb, tcph->doff << 2);
1334
1335 datasize = skb->len;
1336 inc_sequence = ntohl(tcph->seq);
1337 nes_debug(NES_DBG_CM, "datasize = %u, sequence = 0x%08X, ack_seq = 0x%08X,"
1338 " rcv_nxt = 0x%08X Flags: %s %s.\n",
1339 datasize, inc_sequence, ntohl(tcph->ack_seq),
1340 cm_node->tcp_cntxt.rcv_nxt, (tcph->syn ? "SYN":""),
1341 (tcph->ack ? "ACK":""));
1342
1343 if (!tcph->syn && (inc_sequence != cm_node->tcp_cntxt.rcv_nxt)
1344 ) {
1345 nes_debug(NES_DBG_CM, "dropping packet, datasize = %u, sequence = 0x%08X,"
1346 " ack_seq = 0x%08X, rcv_nxt = 0x%08X Flags: %s.\n",
1347 datasize, inc_sequence, ntohl(tcph->ack_seq),
1348 cm_node->tcp_cntxt.rcv_nxt, (tcph->ack ? "ACK":""));
1349 if (cm_node->state == NES_CM_STATE_LISTENING) {
1350 rem_ref_cm_node(cm_core, cm_node);
1351 }
1352 return -1;
1353 }
1354
1355 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
1356
1357
1358 if (optionsize) {
1359 u8 *optionsloc = (u8 *)&tcph[1];
1360 if (process_options(cm_node, optionsloc, optionsize, (u32)tcph->syn)) {
1361 nes_debug(NES_DBG_CM, "%s: Node %p, Sending RESET\n", __FUNCTION__, cm_node);
1362 send_reset(cm_node);
1363 if (cm_node->state != NES_CM_STATE_SYN_SENT)
1364 rem_ref_cm_node(cm_core, cm_node);
1365 return 0;
1366 }
1367 } else if (tcph->syn)
1368 cm_node->tcp_cntxt.mss = NES_CM_DEFAULT_MSS;
1369
1370 cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
1371 cm_node->tcp_cntxt.snd_wscale;
1372
1373 if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) {
1374 cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
1375 }
1376
1377 if (tcph->ack) {
1378 cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
1379 switch (cm_node->state) {
1380 case NES_CM_STATE_SYN_RCVD:
1381 case NES_CM_STATE_SYN_SENT:
1382 /* read and stash current sequence number */
1383 if (cm_node->tcp_cntxt.rem_ack_num != cm_node->tcp_cntxt.loc_seq_num) {
1384 nes_debug(NES_DBG_CM, "ERROR - cm_node->tcp_cntxt.rem_ack_num !="
1385 " cm_node->tcp_cntxt.loc_seq_num\n");
1386 send_reset(cm_node);
1387 return 0;
1388 }
1389 if (cm_node->state == NES_CM_STATE_SYN_SENT)
1390 cm_node->state = NES_CM_STATE_ONE_SIDE_ESTABLISHED;
1391 else {
1392 cm_node->state = NES_CM_STATE_ESTABLISHED;
1393 }
1394 break;
1395 case NES_CM_STATE_LAST_ACK:
1396 cm_node->state = NES_CM_STATE_CLOSED;
1397 break;
1398 case NES_CM_STATE_FIN_WAIT1:
1399 cm_node->state = NES_CM_STATE_FIN_WAIT2;
1400 break;
1401 case NES_CM_STATE_CLOSING:
1402 cm_node->state = NES_CM_STATE_TIME_WAIT;
1403 /* need to schedule this to happen in 2MSL timeouts */
1404 cm_node->state = NES_CM_STATE_CLOSED;
1405 break;
1406 case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
1407 case NES_CM_STATE_ESTABLISHED:
1408 case NES_CM_STATE_MPAREQ_SENT:
1409 case NES_CM_STATE_CLOSE_WAIT:
1410 case NES_CM_STATE_TIME_WAIT:
1411 case NES_CM_STATE_CLOSED:
1412 break;
1413 case NES_CM_STATE_LISTENING:
1414 nes_debug(NES_DBG_CM, "Received an ACK on a listening port (SYN %d)\n", tcph->syn);
1415 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1416 send_reset(cm_node);
1417 /* send_reset bumps refcount, this should have been a new node */
1418 rem_ref_cm_node(cm_core, cm_node);
1419 return -1;
1420 break;
1421 case NES_CM_STATE_TSA:
1422 nes_debug(NES_DBG_CM, "Received a packet with the ack bit set while in TSA state\n");
1423 break;
1424 case NES_CM_STATE_UNKNOWN:
1425 case NES_CM_STATE_INITED:
1426 case NES_CM_STATE_ACCEPTING:
1427 case NES_CM_STATE_FIN_WAIT2:
1428 default:
1429 nes_debug(NES_DBG_CM, "Received ack from unknown state: %x\n",
1430 cm_node->state);
1431 send_reset(cm_node);
1432 break;
1433 }
1434 }
1435
1436 if (tcph->syn) {
1437 if (cm_node->state == NES_CM_STATE_LISTENING) {
1438 /* do not exceed backlog */
1439 atomic_inc(&cm_node->listener->pend_accepts_cnt);
1440 if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
1441 cm_node->listener->backlog) {
1442 nes_debug(NES_DBG_CM, "drop syn due to backlog pressure \n");
1443 cm_backlog_drops++;
1444 atomic_dec(&cm_node->listener->pend_accepts_cnt);
1445 rem_ref_cm_node(cm_core, cm_node);
1446 return 0;
1447 }
1448 cm_node->accept_pend = 1;
1449
1450 }
1451 if (datasize == 0)
1452 cm_node->tcp_cntxt.rcv_nxt ++;
1453
1454 if (cm_node->state == NES_CM_STATE_LISTENING) {
1455 cm_node->state = NES_CM_STATE_SYN_RCVD;
1456 send_syn(cm_node, 1);
1457 }
1458 if (cm_node->state == NES_CM_STATE_ONE_SIDE_ESTABLISHED) {
1459 cm_node->state = NES_CM_STATE_ESTABLISHED;
1460 /* send final handshake ACK */
1461 ret = send_ack(cm_node);
1462 if (ret < 0)
1463 return ret;
1464
1465 cm_node->state = NES_CM_STATE_MPAREQ_SENT;
1466 ret = send_mpa_request(cm_node);
1467 if (ret < 0)
1468 return ret;
1469 }
1470 }
1471
1472 if (tcph->fin) {
1473 cm_node->tcp_cntxt.rcv_nxt++;
1474 switch (cm_node->state) {
1475 case NES_CM_STATE_SYN_RCVD:
1476 case NES_CM_STATE_SYN_SENT:
1477 case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
1478 case NES_CM_STATE_ESTABLISHED:
1479 case NES_CM_STATE_ACCEPTING:
1480 case NES_CM_STATE_MPAREQ_SENT:
1481 cm_node->state = NES_CM_STATE_CLOSE_WAIT;
1482 cm_node->state = NES_CM_STATE_LAST_ACK;
1483 ret = send_fin(cm_node, NULL);
1484 break;
1485 case NES_CM_STATE_FIN_WAIT1:
1486 cm_node->state = NES_CM_STATE_CLOSING;
1487 ret = send_ack(cm_node);
1488 break;
1489 case NES_CM_STATE_FIN_WAIT2:
1490 cm_node->state = NES_CM_STATE_TIME_WAIT;
1491 cm_node->tcp_cntxt.loc_seq_num ++;
1492 ret = send_ack(cm_node);
1493 /* need to schedule this to happen in 2MSL timeouts */
1494 cm_node->state = NES_CM_STATE_CLOSED;
1495 break;
1496 case NES_CM_STATE_CLOSE_WAIT:
1497 case NES_CM_STATE_LAST_ACK:
1498 case NES_CM_STATE_CLOSING:
1499 case NES_CM_STATE_TSA:
1500 default:
1501 nes_debug(NES_DBG_CM, "Received a fin while in %x state\n",
1502 cm_node->state);
1503 ret = -EINVAL;
1504 break;
1505 }
1506 }
1507
1508 if (datasize) {
1509 u8 *dataloc = skb->data;
1510 /* figure out what state we are in and handle transition to next state */
1511 switch (cm_node->state) {
1512 case NES_CM_STATE_LISTENING:
1513 case NES_CM_STATE_SYN_RCVD:
1514 case NES_CM_STATE_SYN_SENT:
1515 case NES_CM_STATE_FIN_WAIT1:
1516 case NES_CM_STATE_FIN_WAIT2:
1517 case NES_CM_STATE_CLOSE_WAIT:
1518 case NES_CM_STATE_LAST_ACK:
1519 case NES_CM_STATE_CLOSING:
1520 break;
1521 case NES_CM_STATE_MPAREQ_SENT:
1522 /* recv the mpa res frame, ret=frame len (incl priv data) */
1523 ret = parse_mpa(cm_node, dataloc, datasize);
1524 if (ret < 0)
1525 break;
1526 /* set the req frame payload len in skb */
1527 /* we are done handling this state, set node to a TSA state */
1528 cm_node->state = NES_CM_STATE_TSA;
1529 send_ack(cm_node);
1530 create_event(cm_node, NES_CM_EVENT_CONNECTED);
1531 break;
1532
1533 case NES_CM_STATE_ESTABLISHED:
1534 /* we are expecting an MPA req frame */
1535 ret = parse_mpa(cm_node, dataloc, datasize);
1536 if (ret < 0) {
1537 break;
1538 }
1539 cm_node->state = NES_CM_STATE_TSA;
1540 send_ack(cm_node);
1541 /* we got a valid MPA request, create an event */
1542 create_event(cm_node, NES_CM_EVENT_MPA_REQ);
1543 break;
1544 case NES_CM_STATE_TSA:
1545 handle_exception_pkt(cm_node, skb);
1546 break;
1547 case NES_CM_STATE_UNKNOWN:
1548 case NES_CM_STATE_INITED:
1549 default:
1550 ret = -1;
1551 }
1552 }
1553
1554 return ret;
1555}
1556
1557
1558/**
1559 * mini_cm_listen - create a listen node with params
1560 */
1561static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
1562 struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
1563{
1564 struct nes_cm_listener *listener;
1565 unsigned long flags;
1566
1567 nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
1568 cm_info->loc_addr, cm_info->loc_port);
1569
1570 /* cannot have multiple matching listeners */
1571 listener = find_listener(cm_core, htonl(cm_info->loc_addr),
1572 htons(cm_info->loc_port), NES_CM_LISTENER_EITHER_STATE);
1573 if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
1574 /* find automatically incs ref count ??? */
1575 atomic_dec(&listener->ref_count);
1576 nes_debug(NES_DBG_CM, "Not creating listener since it already exists\n");
1577 return NULL;
1578 }
1579
1580 if (!listener) {
1581 /* create a CM listen node (1/2 node to compare incoming traffic to) */
1582 listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
1583 if (!listener) {
1584 nes_debug(NES_DBG_CM, "Not creating listener memory allocation failed\n");
1585 return NULL;
1586 }
1587
1588 memset(listener, 0, sizeof(struct nes_cm_listener));
1589 listener->loc_addr = htonl(cm_info->loc_addr);
1590 listener->loc_port = htons(cm_info->loc_port);
1591 listener->reused_node = 0;
1592
1593 atomic_set(&listener->ref_count, 1);
1594 }
1595 /* pasive case */
1596 /* find already inc'ed the ref count */
1597 else {
1598 listener->reused_node = 1;
1599 }
1600
1601 listener->cm_id = cm_info->cm_id;
1602 atomic_set(&listener->pend_accepts_cnt, 0);
1603 listener->cm_core = cm_core;
1604 listener->nesvnic = nesvnic;
1605 atomic_inc(&cm_core->node_cnt);
1606 atomic_inc(&cm_core->session_id);
1607
1608 listener->session_id = (u32)(atomic_read(&cm_core->session_id) + current->tgid);
1609 listener->conn_type = cm_info->conn_type;
1610 listener->backlog = cm_info->backlog;
1611 listener->listener_state = NES_CM_LISTENER_ACTIVE_STATE;
1612
1613 if (!listener->reused_node) {
1614 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1615 list_add(&listener->list, &cm_core->listen_list.list);
1616 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1617 atomic_inc(&cm_core->listen_node_cnt);
1618 }
1619
1620 nes_debug(NES_DBG_CM, "Api - listen(): addr=0x%08X, port=0x%04x,"
1621 " listener = %p, backlog = %d, cm_id = %p.\n",
1622 cm_info->loc_addr, cm_info->loc_port,
1623 listener, listener->backlog, listener->cm_id);
1624
1625 return listener;
1626}
1627
1628
1629/**
1630 * mini_cm_connect - make a connection node with params
1631 */
1632struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1633 struct nes_vnic *nesvnic, struct ietf_mpa_frame *mpa_frame,
1634 struct nes_cm_info *cm_info)
1635{
1636 int ret = 0;
1637 struct nes_cm_node *cm_node;
1638 struct nes_cm_listener *loopbackremotelistener;
1639 struct nes_cm_node *loopbackremotenode;
1640 struct nes_cm_info loopback_cm_info;
1641
1642 u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
1643 ntohs(mpa_frame->priv_data_len);
1644
1645 cm_info->loc_addr = htonl(cm_info->loc_addr);
1646 cm_info->rem_addr = htonl(cm_info->rem_addr);
1647 cm_info->loc_port = htons(cm_info->loc_port);
1648 cm_info->rem_port = htons(cm_info->rem_port);
1649
1650 /* create a CM connection node */
1651 cm_node = make_cm_node(cm_core, nesvnic, cm_info, NULL);
1652 if (!cm_node)
1653 return NULL;
1654
1655 // set our node side to client (active) side
1656 cm_node->tcp_cntxt.client = 1;
1657 cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
1658
1659 if (cm_info->loc_addr == cm_info->rem_addr) {
1660 loopbackremotelistener = find_listener(cm_core, cm_node->rem_addr,
1661 cm_node->rem_port, NES_CM_LISTENER_ACTIVE_STATE);
1662 if (loopbackremotelistener == NULL) {
1663 create_event(cm_node, NES_CM_EVENT_ABORTED);
1664 } else {
1665 atomic_inc(&cm_loopbacks);
1666 loopback_cm_info = *cm_info;
1667 loopback_cm_info.loc_port = cm_info->rem_port;
1668 loopback_cm_info.rem_port = cm_info->loc_port;
1669 loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
1670 loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info,
1671 loopbackremotelistener);
1672 loopbackremotenode->loopbackpartner = cm_node;
1673 loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
1674 cm_node->loopbackpartner = loopbackremotenode;
1675 memcpy(loopbackremotenode->mpa_frame_buf, &mpa_frame->priv_data,
1676 mpa_frame_size);
1677 loopbackremotenode->mpa_frame_size = mpa_frame_size -
1678 sizeof(struct ietf_mpa_frame);
1679
1680 // we are done handling this state, set node to a TSA state
1681 cm_node->state = NES_CM_STATE_TSA;
1682 cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num;
1683 loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num;
1684 cm_node->tcp_cntxt.max_snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
1685 loopbackremotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
1686 cm_node->tcp_cntxt.snd_wnd = loopbackremotenode->tcp_cntxt.rcv_wnd;
1687 loopbackremotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
1688 cm_node->tcp_cntxt.snd_wscale = loopbackremotenode->tcp_cntxt.rcv_wscale;
1689 loopbackremotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
1690
1691 create_event(loopbackremotenode, NES_CM_EVENT_MPA_REQ);
1692 }
1693 return cm_node;
1694 }
1695
1696 /* set our node side to client (active) side */
1697 cm_node->tcp_cntxt.client = 1;
1698 /* init our MPA frame ptr */
1699 memcpy(&cm_node->mpa_frame, mpa_frame, mpa_frame_size);
1700 cm_node->mpa_frame_size = mpa_frame_size;
1701
1702 /* send a syn and goto syn sent state */
1703 cm_node->state = NES_CM_STATE_SYN_SENT;
1704 ret = send_syn(cm_node, 0);
1705
1706 nes_debug(NES_DBG_CM, "Api - connect(): dest addr=0x%08X, port=0x%04x,"
1707 " cm_node=%p, cm_id = %p.\n",
1708 cm_node->rem_addr, cm_node->rem_port, cm_node, cm_node->cm_id);
1709
1710 return cm_node;
1711}
1712
1713
1714/**
1715 * mini_cm_accept - accept a connection
1716 * This function is never called
1717 */
1718int mini_cm_accept(struct nes_cm_core *cm_core, struct ietf_mpa_frame *mpa_frame,
1719 struct nes_cm_node *cm_node)
1720{
1721 return 0;
1722}
1723
1724
1725/**
1726 * mini_cm_reject - reject and teardown a connection
1727 */
1728int mini_cm_reject(struct nes_cm_core *cm_core,
1729 struct ietf_mpa_frame *mpa_frame,
1730 struct nes_cm_node *cm_node)
1731{
1732 int ret = 0;
1733 struct sk_buff *skb;
1734 u16 mpa_frame_size = sizeof(struct ietf_mpa_frame) +
1735 ntohs(mpa_frame->priv_data_len);
1736
1737 skb = get_free_pkt(cm_node);
1738 if (!skb) {
1739 nes_debug(NES_DBG_CM, "Failed to get a Free pkt\n");
1740 return -1;
1741 }
1742
1743 /* send an MPA Request frame */
1744 form_cm_frame(skb, cm_node, NULL, 0, mpa_frame, mpa_frame_size, SET_ACK | SET_FIN);
1745 ret = schedule_nes_timer(cm_node, skb, NES_TIMER_TYPE_SEND, 1, 0);
1746
1747 cm_node->state = NES_CM_STATE_CLOSED;
1748 ret = send_fin(cm_node, NULL);
1749
1750 if (ret < 0) {
1751 printk(KERN_INFO PFX "failed to send MPA Reply (reject)\n");
1752 return ret;
1753 }
1754
1755 return ret;
1756}
1757
1758
1759/**
1760 * mini_cm_close
1761 */
1762int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_node)
1763{
1764 int ret = 0;
1765
1766 if (!cm_core || !cm_node)
1767 return -EINVAL;
1768
1769 switch (cm_node->state) {
1770 /* if passed in node is null, create a reference key node for node search */
1771 /* check if we found an owner node for this pkt */
1772 case NES_CM_STATE_SYN_RCVD:
1773 case NES_CM_STATE_SYN_SENT:
1774 case NES_CM_STATE_ONE_SIDE_ESTABLISHED:
1775 case NES_CM_STATE_ESTABLISHED:
1776 case NES_CM_STATE_ACCEPTING:
1777 case NES_CM_STATE_MPAREQ_SENT:
1778 cm_node->state = NES_CM_STATE_FIN_WAIT1;
1779 send_fin(cm_node, NULL);
1780 break;
1781 case NES_CM_STATE_CLOSE_WAIT:
1782 cm_node->state = NES_CM_STATE_LAST_ACK;
1783 send_fin(cm_node, NULL);
1784 break;
1785 case NES_CM_STATE_FIN_WAIT1:
1786 case NES_CM_STATE_FIN_WAIT2:
1787 case NES_CM_STATE_LAST_ACK:
1788 case NES_CM_STATE_TIME_WAIT:
1789 case NES_CM_STATE_CLOSING:
1790 ret = -1;
1791 break;
1792 case NES_CM_STATE_LISTENING:
1793 case NES_CM_STATE_UNKNOWN:
1794 case NES_CM_STATE_INITED:
1795 case NES_CM_STATE_CLOSED:
1796 case NES_CM_STATE_TSA:
1797 ret = rem_ref_cm_node(cm_core, cm_node);
1798 break;
1799 }
1800 cm_node->cm_id = NULL;
1801 return ret;
1802}
1803
1804
1805/**
1806 * recv_pkt - recv an ETHERNET packet, and process it through CM
1807 * node state machine
1808 */
1809int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic,
1810 struct sk_buff *skb)
1811{
1812 struct nes_cm_node *cm_node = NULL;
1813 struct nes_cm_listener *listener = NULL;
1814 struct iphdr *iph;
1815 struct tcphdr *tcph;
1816 struct nes_cm_info nfo;
1817 int ret = 0;
1818
1819 if (!skb || skb->len < sizeof(struct iphdr) + sizeof(struct tcphdr)) {
1820 ret = -EINVAL;
1821 goto out;
1822 }
1823
1824 iph = (struct iphdr *)skb->data;
1825 tcph = (struct tcphdr *)(skb->data + sizeof(struct iphdr));
1826 skb_reset_network_header(skb);
1827 skb_set_transport_header(skb, sizeof(*tcph));
1828 skb->len = ntohs(iph->tot_len);
1829
1830 nfo.loc_addr = ntohl(iph->daddr);
1831 nfo.loc_port = ntohs(tcph->dest);
1832 nfo.rem_addr = ntohl(iph->saddr);
1833 nfo.rem_port = ntohs(tcph->source);
1834
1835 nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n",
1836 iph->daddr, tcph->dest, iph->saddr, tcph->source);
1837
1838 /* note: this call is going to increment cm_node ref count */
1839 cm_node = find_node(cm_core,
1840 nfo.rem_port, nfo.rem_addr,
1841 nfo.loc_port, nfo.loc_addr);
1842
1843 if (!cm_node) {
1844 listener = find_listener(cm_core, nfo.loc_addr, nfo.loc_port,
1845 NES_CM_LISTENER_ACTIVE_STATE);
1846 if (listener) {
1847 nfo.cm_id = listener->cm_id;
1848 nfo.conn_type = listener->conn_type;
1849 } else {
1850 nfo.cm_id = NULL;
1851 nfo.conn_type = 0;
1852 }
1853
1854 cm_node = make_cm_node(cm_core, nesvnic, &nfo, listener);
1855 if (!cm_node) {
1856 nes_debug(NES_DBG_CM, "Unable to allocate node\n");
1857 if (listener) {
1858 nes_debug(NES_DBG_CM, "unable to allocate node and decrementing listener refcount\n");
1859 atomic_dec(&listener->ref_count);
1860 }
1861 ret = -1;
1862 goto out;
1863 }
1864 if (!listener) {
1865 nes_debug(NES_DBG_CM, "Packet found for unknown port %x refcnt=%d\n",
1866 nfo.loc_port, atomic_read(&cm_node->ref_count));
1867 if (!tcph->rst) {
1868 nes_debug(NES_DBG_CM, "Packet found for unknown port=%d"
1869 " rem_port=%d refcnt=%d\n",
1870 nfo.loc_port, nfo.rem_port, atomic_read(&cm_node->ref_count));
1871
1872 cm_node->tcp_cntxt.rcv_nxt = ntohl(tcph->seq);
1873 cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1874 send_reset(cm_node);
1875 }
1876 rem_ref_cm_node(cm_core, cm_node);
1877 ret = -1;
1878 goto out;
1879 }
1880 add_ref_cm_node(cm_node);
1881 cm_node->state = NES_CM_STATE_LISTENING;
1882 }
1883
1884 nes_debug(NES_DBG_CM, "Processing Packet for node %p, data = (%p):\n",
1885 cm_node, skb->data);
1886 process_packet(cm_node, skb, cm_core);
1887
1888 rem_ref_cm_node(cm_core, cm_node);
1889 out:
1890 if (skb)
1891 dev_kfree_skb_any(skb);
1892 return ret;
1893}
1894
1895
1896/**
1897 * nes_cm_alloc_core - allocate a top level instance of a cm core
1898 */
1899struct nes_cm_core *nes_cm_alloc_core(void)
1900{
1901 int i;
1902
1903 struct nes_cm_core *cm_core;
1904 struct sk_buff *skb = NULL;
1905
1906 /* setup the CM core */
1907 /* alloc top level core control structure */
1908 cm_core = kzalloc(sizeof(*cm_core), GFP_KERNEL);
1909 if (!cm_core)
1910 return NULL;
1911
1912 INIT_LIST_HEAD(&cm_core->connected_nodes);
1913 init_timer(&cm_core->tcp_timer);
1914 cm_core->tcp_timer.function = nes_cm_timer_tick;
1915
1916 cm_core->mtu = NES_CM_DEFAULT_MTU;
1917 cm_core->state = NES_CM_STATE_INITED;
1918 cm_core->free_tx_pkt_max = NES_CM_DEFAULT_FREE_PKTS;
1919
1920 atomic_set(&cm_core->session_id, 0);
1921 atomic_set(&cm_core->events_posted, 0);
1922
1923 /* init the packet lists */
1924 skb_queue_head_init(&cm_core->tx_free_list);
1925
1926 for (i = 0; i < NES_CM_DEFAULT_FRAME_CNT; i++) {
1927 skb = dev_alloc_skb(cm_core->mtu);
1928 if (!skb) {
1929 kfree(cm_core);
1930 return NULL;
1931 }
1932 /* add 'raw' skb to free frame list */
1933 skb_queue_head(&cm_core->tx_free_list, skb);
1934 }
1935
1936 cm_core->api = &nes_cm_api;
1937
1938 spin_lock_init(&cm_core->ht_lock);
1939 spin_lock_init(&cm_core->listen_list_lock);
1940
1941 INIT_LIST_HEAD(&cm_core->listen_list.list);
1942
1943 nes_debug(NES_DBG_CM, "Init CM Core completed -- cm_core=%p\n", cm_core);
1944
1945 nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
1946 cm_core->event_wq = create_singlethread_workqueue("nesewq");
1947 cm_core->post_event = nes_cm_post_event;
1948 nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
1949 cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
1950
1951 print_core(cm_core);
1952 return cm_core;
1953}
1954
1955
1956/**
1957 * mini_cm_dealloc_core - deallocate a top level instance of a cm core
1958 */
1959int mini_cm_dealloc_core(struct nes_cm_core *cm_core)
1960{
1961 nes_debug(NES_DBG_CM, "De-Alloc CM Core (%p)\n", cm_core);
1962
1963 if (!cm_core)
1964 return -EINVAL;
1965
1966 barrier();
1967
1968 if (timer_pending(&cm_core->tcp_timer)) {
1969 del_timer(&cm_core->tcp_timer);
1970 }
1971
1972 destroy_workqueue(cm_core->event_wq);
1973 destroy_workqueue(cm_core->disconn_wq);
1974 nes_debug(NES_DBG_CM, "\n");
1975 kfree(cm_core);
1976
1977 return 0;
1978}
1979
1980
1981/**
1982 * mini_cm_get
1983 */
1984int mini_cm_get(struct nes_cm_core *cm_core)
1985{
1986 return cm_core->state;
1987}
1988
1989
1990/**
1991 * mini_cm_set
1992 */
1993int mini_cm_set(struct nes_cm_core *cm_core, u32 type, u32 value)
1994{
1995 int ret = 0;
1996
1997 switch (type) {
1998 case NES_CM_SET_PKT_SIZE:
1999 cm_core->mtu = value;
2000 break;
2001 case NES_CM_SET_FREE_PKT_Q_SIZE:
2002 cm_core->free_tx_pkt_max = value;
2003 break;
2004 default:
2005 /* unknown set option */
2006 ret = -EINVAL;
2007 }
2008
2009 return ret;
2010}
2011
2012
2013/**
2014 * nes_cm_init_tsa_conn setup HW; MPA frames must be
2015 * successfully exchanged when this is called
2016 */
2017static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_node)
2018{
2019 int ret = 0;
2020
2021 if (!nesqp)
2022 return -EINVAL;
2023
2024 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_IPV4 |
2025 NES_QPCONTEXT_MISC_NO_NAGLE | NES_QPCONTEXT_MISC_DO_NOT_FRAG |
2026 NES_QPCONTEXT_MISC_DROS);
2027
2028 if (cm_node->tcp_cntxt.snd_wscale || cm_node->tcp_cntxt.rcv_wscale)
2029 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WSCALE);
2030
2031 nesqp->nesqp_context->misc2 |= cpu_to_le32(64 << NES_QPCONTEXT_MISC2_TTL_SHIFT);
2032
2033 nesqp->nesqp_context->mss |= cpu_to_le32(((u32)cm_node->tcp_cntxt.mss) << 16);
2034
2035 nesqp->nesqp_context->tcp_state_flow_label |= cpu_to_le32(
2036 (u32)NES_QPCONTEXT_TCPSTATE_EST << NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT);
2037
2038 nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
2039 (cm_node->tcp_cntxt.snd_wscale << NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT) &
2040 NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK);
2041
2042 nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32(
2043 (cm_node->tcp_cntxt.rcv_wscale << NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT) &
2044 NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK);
2045
2046 nesqp->nesqp_context->keepalive = cpu_to_le32(0x80);
2047 nesqp->nesqp_context->ts_recent = 0;
2048 nesqp->nesqp_context->ts_age = 0;
2049 nesqp->nesqp_context->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
2050 nesqp->nesqp_context->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
2051 nesqp->nesqp_context->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
2052 nesqp->nesqp_context->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
2053 cm_node->tcp_cntxt.rcv_wscale);
2054 nesqp->nesqp_context->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
2055 nesqp->nesqp_context->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
2056 nesqp->nesqp_context->srtt = 0;
2057 nesqp->nesqp_context->rttvar = cpu_to_le32(0x6);
2058 nesqp->nesqp_context->ssthresh = cpu_to_le32(0x3FFFC000);
2059 nesqp->nesqp_context->cwnd = cpu_to_le32(2*cm_node->tcp_cntxt.mss);
2060 nesqp->nesqp_context->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
2061 nesqp->nesqp_context->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
2062 nesqp->nesqp_context->max_snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
2063
2064 nes_debug(NES_DBG_CM, "QP%u: rcv_nxt = 0x%08X, snd_nxt = 0x%08X,"
2065 " Setting MSS to %u, PDWscale = 0x%08X, rcv_wnd = %u, context misc = 0x%08X.\n",
2066 nesqp->hwqp.qp_id, le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
2067 le32_to_cpu(nesqp->nesqp_context->snd_nxt),
2068 cm_node->tcp_cntxt.mss, le32_to_cpu(nesqp->nesqp_context->pd_index_wscale),
2069 le32_to_cpu(nesqp->nesqp_context->rcv_wnd),
2070 le32_to_cpu(nesqp->nesqp_context->misc));
2071 nes_debug(NES_DBG_CM, " snd_wnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->snd_wnd));
2072 nes_debug(NES_DBG_CM, " snd_cwnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->cwnd));
2073 nes_debug(NES_DBG_CM, " max_swnd = 0x%08X.\n", le32_to_cpu(nesqp->nesqp_context->max_snd_wnd));
2074
2075 nes_debug(NES_DBG_CM, "Change cm_node state to TSA\n");
2076 cm_node->state = NES_CM_STATE_TSA;
2077
2078 return ret;
2079}
2080
2081
2082/**
2083 * nes_cm_disconn
2084 */
2085int nes_cm_disconn(struct nes_qp *nesqp)
2086{
2087 unsigned long flags;
2088
2089 spin_lock_irqsave(&nesqp->lock, flags);
2090 if (nesqp->disconn_pending == 0) {
2091 nesqp->disconn_pending++;
2092 spin_unlock_irqrestore(&nesqp->lock, flags);
2093 /* nes_add_ref(&nesqp->ibqp); */
2094 /* init our disconnect work element, to */
2095 INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
2096
2097 queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
2098 } else {
2099 spin_unlock_irqrestore(&nesqp->lock, flags);
2100 nes_rem_ref(&nesqp->ibqp);
2101 }
2102
2103 return 0;
2104}
2105
2106
2107/**
2108 * nes_disconnect_worker
2109 */
2110void nes_disconnect_worker(struct work_struct *work)
2111{
2112 struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
2113
2114 nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
2115 nesqp->last_aeq, nesqp->hwqp.qp_id);
2116 nes_cm_disconn_true(nesqp);
2117}
2118
2119
2120/**
2121 * nes_cm_disconn_true
2122 */
2123int nes_cm_disconn_true(struct nes_qp *nesqp)
2124{
2125 unsigned long flags;
2126 int ret = 0;
2127 struct iw_cm_id *cm_id;
2128 struct iw_cm_event cm_event;
2129 struct nes_vnic *nesvnic;
2130 u16 last_ae;
2131 u8 original_hw_tcp_state;
2132 u8 original_ibqp_state;
2133 u8 issued_disconnect_reset = 0;
2134
2135 if (!nesqp) {
2136 nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
2137 return -1;
2138 }
2139
2140 spin_lock_irqsave(&nesqp->lock, flags);
2141 cm_id = nesqp->cm_id;
2142 /* make sure we havent already closed this connection */
2143 if (!cm_id) {
2144 nes_debug(NES_DBG_CM, "QP%u disconnect_worker cmid is NULL\n",
2145 nesqp->hwqp.qp_id);
2146 spin_unlock_irqrestore(&nesqp->lock, flags);
2147 nes_rem_ref(&nesqp->ibqp);
2148 return -1;
2149 }
2150
2151 nesvnic = to_nesvnic(nesqp->ibqp.device);
2152 nes_debug(NES_DBG_CM, "Disconnecting QP%u\n", nesqp->hwqp.qp_id);
2153
2154 original_hw_tcp_state = nesqp->hw_tcp_state;
2155 original_ibqp_state = nesqp->ibqp_state;
2156 last_ae = nesqp->last_aeq;
2157
2158
2159 nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state);
2160
2161 if ((nesqp->cm_id) && (cm_id->event_handler)) {
2162 if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2163 ((original_ibqp_state == IB_QPS_RTS) &&
2164 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2165 atomic_inc(&cm_disconnects);
2166 cm_event.event = IW_CM_EVENT_DISCONNECT;
2167 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
2168 issued_disconnect_reset = 1;
2169 cm_event.status = IW_CM_EVENT_STATUS_RESET;
2170 nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event (status reset) for "
2171 " QP%u, cm_id = %p. \n",
2172 nesqp->hwqp.qp_id, cm_id);
2173 } else {
2174 cm_event.status = IW_CM_EVENT_STATUS_OK;
2175 }
2176
2177 cm_event.local_addr = cm_id->local_addr;
2178 cm_event.remote_addr = cm_id->remote_addr;
2179 cm_event.private_data = NULL;
2180 cm_event.private_data_len = 0;
2181
2182 nes_debug(NES_DBG_CM, "Generating a CM Disconnect Event for "
2183 " QP%u, SQ Head = %u, SQ Tail = %u. cm_id = %p, refcount = %u.\n",
2184 nesqp->hwqp.qp_id,
2185 nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail, cm_id,
2186 atomic_read(&nesqp->refcount));
2187
2188 spin_unlock_irqrestore(&nesqp->lock, flags);
2189 ret = cm_id->event_handler(cm_id, &cm_event);
2190 if (ret)
2191 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
2192 spin_lock_irqsave(&nesqp->lock, flags);
2193 }
2194
2195 nesqp->disconn_pending = 0;
2196 /* There might have been another AE while the lock was released */
2197 original_hw_tcp_state = nesqp->hw_tcp_state;
2198 original_ibqp_state = nesqp->ibqp_state;
2199 last_ae = nesqp->last_aeq;
2200
2201 if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
2202 ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2203 (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
2204 (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
2205 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2206 atomic_inc(&cm_closes);
2207 nesqp->cm_id = NULL;
2208 nesqp->in_disconnect = 0;
2209 spin_unlock_irqrestore(&nesqp->lock, flags);
2210 nes_disconnect(nesqp, 1);
2211
2212 cm_id->provider_data = nesqp;
2213 /* Send up the close complete event */
2214 cm_event.event = IW_CM_EVENT_CLOSE;
2215 cm_event.status = IW_CM_EVENT_STATUS_OK;
2216 cm_event.provider_data = cm_id->provider_data;
2217 cm_event.local_addr = cm_id->local_addr;
2218 cm_event.remote_addr = cm_id->remote_addr;
2219 cm_event.private_data = NULL;
2220 cm_event.private_data_len = 0;
2221
2222 ret = cm_id->event_handler(cm_id, &cm_event);
2223 if (ret) {
2224 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
2225 }
2226
2227 cm_id->rem_ref(cm_id);
2228
2229 spin_lock_irqsave(&nesqp->lock, flags);
2230 if (nesqp->flush_issued == 0) {
2231 nesqp->flush_issued = 1;
2232 spin_unlock_irqrestore(&nesqp->lock, flags);
2233 flush_wqes(nesvnic->nesdev, nesqp, NES_CQP_FLUSH_RQ, 1);
2234 } else {
2235 spin_unlock_irqrestore(&nesqp->lock, flags);
2236 }
2237
2238 /* This reference is from either ModifyQP or the AE processing,
2239 there is still a race here with modifyqp */
2240 nes_rem_ref(&nesqp->ibqp);
2241
2242 } else {
2243 cm_id = nesqp->cm_id;
2244 spin_unlock_irqrestore(&nesqp->lock, flags);
2245 /* check to see if the inbound reset beat the outbound reset */
2246 if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
2247 nes_debug(NES_DBG_CM, "QP%u: Decing refcount due to inbound reset"
2248 " beating the outbound reset.\n",
2249 nesqp->hwqp.qp_id);
2250 nes_rem_ref(&nesqp->ibqp);
2251 }
2252 }
2253 } else {
2254 nesqp->disconn_pending = 0;
2255 spin_unlock_irqrestore(&nesqp->lock, flags);
2256 }
2257 nes_rem_ref(&nesqp->ibqp);
2258
2259 return 0;
2260}
2261
2262
2263/**
2264 * nes_disconnect
2265 */
2266int nes_disconnect(struct nes_qp *nesqp, int abrupt)
2267{
2268 int ret = 0;
2269 struct nes_vnic *nesvnic;
2270 struct nes_device *nesdev;
2271
2272 nesvnic = to_nesvnic(nesqp->ibqp.device);
2273 if (!nesvnic)
2274 return -EINVAL;
2275
2276 nesdev = nesvnic->nesdev;
2277
2278 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
2279 atomic_read(&nesvnic->netdev->refcnt));
2280
2281 if (nesqp->active_conn) {
2282
2283 /* indicate this connection is NOT active */
2284 nesqp->active_conn = 0;
2285 } else {
2286 /* Need to free the Last Streaming Mode Message */
2287 if (nesqp->ietf_frame) {
2288 pci_free_consistent(nesdev->pcidev,
2289 nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
2290 nesqp->ietf_frame, nesqp->ietf_frame_pbase);
2291 }
2292 }
2293
2294 /* close the CM node down if it is still active */
2295 if (nesqp->cm_node) {
2296 nes_debug(NES_DBG_CM, "Call close API\n");
2297
2298 g_cm_core->api->close(g_cm_core, nesqp->cm_node);
2299 nesqp->cm_node = NULL;
2300 }
2301
2302 return ret;
2303}
2304
2305
2306/**
2307 * nes_accept
2308 */
2309int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2310{
2311 u64 u64temp;
2312 struct ib_qp *ibqp;
2313 struct nes_qp *nesqp;
2314 struct nes_vnic *nesvnic;
2315 struct nes_device *nesdev;
2316 struct nes_cm_node *cm_node;
2317 struct nes_adapter *adapter;
2318 struct ib_qp_attr attr;
2319 struct iw_cm_event cm_event;
2320 struct nes_hw_qp_wqe *wqe;
2321 struct nes_v4_quad nes_quad;
2322 int ret;
2323
2324 ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
2325 if (!ibqp)
2326 return -EINVAL;
2327
2328 /* get all our handles */
2329 nesqp = to_nesqp(ibqp);
2330 nesvnic = to_nesvnic(nesqp->ibqp.device);
2331 nesdev = nesvnic->nesdev;
2332 adapter = nesdev->nesadapter;
2333
2334 nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
2335 nesvnic, nesvnic->netdev, nesvnic->netdev->name);
2336
2337 /* since this is from a listen, we were able to put node handle into cm_id */
2338 cm_node = (struct nes_cm_node *)cm_id->provider_data;
2339
2340 /* associate the node with the QP */
2341 nesqp->cm_node = (void *)cm_node;
2342
2343 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu\n",
2344 nesqp->hwqp.qp_id, cm_node, jiffies);
2345 atomic_inc(&cm_accepts);
2346
2347 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
2348 atomic_read(&nesvnic->netdev->refcnt));
2349
2350 /* allocate the ietf frame and space for private data */
2351 nesqp->ietf_frame = pci_alloc_consistent(nesdev->pcidev,
2352 sizeof(struct ietf_mpa_frame) + conn_param->private_data_len,
2353 &nesqp->ietf_frame_pbase);
2354
2355 if (!nesqp->ietf_frame) {
2356 nes_debug(NES_DBG_CM, "Unable to allocate memory for private data\n");
2357 return -ENOMEM;
2358 }
2359
2360
2361 /* setup the MPA frame */
2362 nesqp->private_data_len = conn_param->private_data_len;
2363 memcpy(nesqp->ietf_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
2364
2365 memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
2366 conn_param->private_data_len);
2367
2368 nesqp->ietf_frame->priv_data_len = cpu_to_be16(conn_param->private_data_len);
2369 nesqp->ietf_frame->rev = mpa_version;
2370 nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
2371
2372 /* setup our first outgoing iWarp send WQE (the IETF frame response) */
2373 wqe = &nesqp->hwqp.sq_vbase[0];
2374
2375 if (cm_id->remote_addr.sin_addr.s_addr != cm_id->local_addr.sin_addr.s_addr) {
2376 u64temp = (unsigned long)nesqp;
2377 u64temp |= NES_SW_CONTEXT_ALIGN>>1;
2378 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
2379 u64temp);
2380 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
2381 cpu_to_le32(NES_IWARP_SQ_WQE_STREAMING | NES_IWARP_SQ_WQE_WRPDU);
2382 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
2383 cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
2384 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
2385 cpu_to_le32((u32)nesqp->ietf_frame_pbase);
2386 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
2387 cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
2388 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
2389 cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame));
2390 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
2391
2392 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
2393 NES_QPCONTEXT_ORDIRD_LSMM_PRESENT | NES_QPCONTEXT_ORDIRD_WRPDU);
2394 } else {
2395 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
2396 NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
2397 }
2398 nesqp->skip_lsmm = 1;
2399
2400
2401 /* Cache the cm_id in the qp */
2402 nesqp->cm_id = cm_id;
2403 cm_node->cm_id = cm_id;
2404
2405 /* nesqp->cm_node = (void *)cm_id->provider_data; */
2406 cm_id->provider_data = nesqp;
2407 nesqp->active_conn = 0;
2408
2409 nes_cm_init_tsa_conn(nesqp, cm_node);
2410
2411 nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
2412 nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
2413 nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
2414
2415 nesqp->nesqp_context->misc2 |= cpu_to_le32(
2416 (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
2417
2418 nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
2419 nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0), NULL,
2420 NES_ARP_RESOLVE) << 16);
2421
2422 nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
2423 jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
2424
2425 nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
2426
2427 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(
2428 ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT));
2429 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
2430
2431 memset(&nes_quad, 0, sizeof(nes_quad));
2432 nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
2433 nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
2434 nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
2435 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
2436
2437 /* Produce hash key */
2438 nesqp->hte_index = cpu_to_be32(
2439 crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
2440 nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, CRC = 0x%08X\n",
2441 nesqp->hte_index, nesqp->hte_index & adapter->hte_index_mask);
2442
2443 nesqp->hte_index &= adapter->hte_index_mask;
2444 nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
2445
2446 cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
2447
2448 nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X,"
2449 " rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + private data length=%zu.\n",
2450 nesqp->hwqp.qp_id,
2451 ntohl(cm_id->remote_addr.sin_addr.s_addr),
2452 ntohs(cm_id->remote_addr.sin_port),
2453 ntohl(cm_id->local_addr.sin_addr.s_addr),
2454 ntohs(cm_id->local_addr.sin_port),
2455 le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
2456 le32_to_cpu(nesqp->nesqp_context->snd_nxt),
2457 conn_param->private_data_len+sizeof(struct ietf_mpa_frame));
2458
2459 attr.qp_state = IB_QPS_RTS;
2460 nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
2461
2462 /* notify OF layer that accept event was successfull */
2463 cm_id->add_ref(cm_id);
2464
2465 cm_event.event = IW_CM_EVENT_ESTABLISHED;
2466 cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
2467 cm_event.provider_data = (void *)nesqp;
2468 cm_event.local_addr = cm_id->local_addr;
2469 cm_event.remote_addr = cm_id->remote_addr;
2470 cm_event.private_data = NULL;
2471 cm_event.private_data_len = 0;
2472 ret = cm_id->event_handler(cm_id, &cm_event);
2473 if (cm_node->loopbackpartner) {
2474 cm_node->loopbackpartner->mpa_frame_size = nesqp->private_data_len;
2475 /* copy entire MPA frame to our cm_node's frame */
2476 memcpy(cm_node->loopbackpartner->mpa_frame_buf, nesqp->ietf_frame->priv_data,
2477 nesqp->private_data_len);
2478 create_event(cm_node->loopbackpartner, NES_CM_EVENT_CONNECTED);
2479 }
2480 if (ret)
2481 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
2482 __FUNCTION__, __LINE__, ret);
2483
2484 return 0;
2485}
2486
2487
2488/**
2489 * nes_reject
2490 */
2491int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2492{
2493 struct nes_cm_node *cm_node;
2494 struct nes_cm_core *cm_core;
2495
2496 atomic_inc(&cm_rejects);
2497 cm_node = (struct nes_cm_node *) cm_id->provider_data;
2498 cm_core = cm_node->cm_core;
2499 cm_node->mpa_frame_size = sizeof(struct ietf_mpa_frame) + pdata_len;
2500
2501 strcpy(&cm_node->mpa_frame.key[0], IEFT_MPA_KEY_REP);
2502 memcpy(&cm_node->mpa_frame.priv_data, pdata, pdata_len);
2503
2504 cm_node->mpa_frame.priv_data_len = cpu_to_be16(pdata_len);
2505 cm_node->mpa_frame.rev = mpa_version;
2506 cm_node->mpa_frame.flags = IETF_MPA_FLAGS_CRC | IETF_MPA_FLAGS_REJECT;
2507
2508 cm_core->api->reject(cm_core, &cm_node->mpa_frame, cm_node);
2509
2510 return 0;
2511}
2512
2513
2514/**
2515 * nes_connect
2516 * setup and launch cm connect node
2517 */
2518int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2519{
2520 struct ib_qp *ibqp;
2521 struct nes_qp *nesqp;
2522 struct nes_vnic *nesvnic;
2523 struct nes_device *nesdev;
2524 struct nes_cm_node *cm_node;
2525 struct nes_cm_info cm_info;
2526
2527 ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
2528 if (!ibqp)
2529 return -EINVAL;
2530 nesqp = to_nesqp(ibqp);
2531 if (!nesqp)
2532 return -EINVAL;
2533 nesvnic = to_nesvnic(nesqp->ibqp.device);
2534 if (!nesvnic)
2535 return -EINVAL;
2536 nesdev = nesvnic->nesdev;
2537 if (!nesdev)
2538 return -EINVAL;
2539
2540 atomic_inc(&cm_connects);
2541
2542 nesqp->ietf_frame = kzalloc(sizeof(struct ietf_mpa_frame) +
2543 conn_param->private_data_len, GFP_KERNEL);
2544 if (!nesqp->ietf_frame)
2545 return -ENOMEM;
2546
2547 /* set qp as having an active connection */
2548 nesqp->active_conn = 1;
2549
2550 nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = 0x%08X:0x%04X.\n",
2551 nesqp->hwqp.qp_id,
2552 ntohl(cm_id->remote_addr.sin_addr.s_addr),
2553 ntohs(cm_id->remote_addr.sin_port),
2554 ntohl(cm_id->local_addr.sin_addr.s_addr),
2555 ntohs(cm_id->local_addr.sin_port));
2556
2557 /* cache the cm_id in the qp */
2558 nesqp->cm_id = cm_id;
2559
2560 cm_id->provider_data = nesqp;
2561
2562 /* copy the private data */
2563 if (conn_param->private_data_len) {
2564 memcpy(nesqp->ietf_frame->priv_data, conn_param->private_data,
2565 conn_param->private_data_len);
2566 }
2567
2568 nesqp->private_data_len = conn_param->private_data_len;
2569 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord);
2570 nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord);
2571 nes_debug(NES_DBG_CM, "mpa private data len =%u\n", conn_param->private_data_len);
2572
2573 strcpy(&nesqp->ietf_frame->key[0], IEFT_MPA_KEY_REQ);
2574 nesqp->ietf_frame->flags = IETF_MPA_FLAGS_CRC;
2575 nesqp->ietf_frame->rev = IETF_MPA_VERSION;
2576 nesqp->ietf_frame->priv_data_len = htons(conn_param->private_data_len);
2577
2578 if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
2579 nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
2580 PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
2581
2582 /* set up the connection params for the node */
2583 cm_info.loc_addr = (cm_id->local_addr.sin_addr.s_addr);
2584 cm_info.loc_port = (cm_id->local_addr.sin_port);
2585 cm_info.rem_addr = (cm_id->remote_addr.sin_addr.s_addr);
2586 cm_info.rem_port = (cm_id->remote_addr.sin_port);
2587 cm_info.cm_id = cm_id;
2588 cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
2589
2590 cm_id->add_ref(cm_id);
2591 nes_add_ref(&nesqp->ibqp);
2592
2593 /* create a connect CM node connection */
2594 cm_node = g_cm_core->api->connect(g_cm_core, nesvnic, nesqp->ietf_frame, &cm_info);
2595 if (!cm_node) {
2596 if (cm_id->local_addr.sin_addr.s_addr != cm_id->remote_addr.sin_addr.s_addr)
2597 nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
2598 PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL);
2599 nes_rem_ref(&nesqp->ibqp);
2600 kfree(nesqp->ietf_frame);
2601 nesqp->ietf_frame = NULL;
2602 cm_id->rem_ref(cm_id);
2603 return -ENOMEM;
2604 }
2605
2606 cm_node->apbvt_set = 1;
2607 nesqp->cm_node = cm_node;
2608
2609 return 0;
2610}
2611
2612
2613/**
2614 * nes_create_listen
2615 */
2616int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
2617{
2618 struct nes_vnic *nesvnic;
2619 struct nes_cm_listener *cm_node;
2620 struct nes_cm_info cm_info;
2621 struct nes_adapter *adapter;
2622 int err;
2623
2624
2625 nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
2626 cm_id, ntohs(cm_id->local_addr.sin_port));
2627
2628 nesvnic = to_nesvnic(cm_id->device);
2629 if (!nesvnic)
2630 return -EINVAL;
2631 adapter = nesvnic->nesdev->nesadapter;
2632 nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n",
2633 nesvnic, nesvnic->netdev, nesvnic->netdev->name);
2634
2635 nes_debug(NES_DBG_CM, "nesvnic->local_ipaddr=0x%08x, sin_addr.s_addr=0x%08x\n",
2636 nesvnic->local_ipaddr, cm_id->local_addr.sin_addr.s_addr);
2637
2638 /* setup listen params in our api call struct */
2639 cm_info.loc_addr = nesvnic->local_ipaddr;
2640 cm_info.loc_port = cm_id->local_addr.sin_port;
2641 cm_info.backlog = backlog;
2642 cm_info.cm_id = cm_id;
2643
2644 cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
2645
2646
2647 cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
2648 if (!cm_node) {
2649 printk("%s[%u] Error returned from listen API call\n",
2650 __FUNCTION__, __LINE__);
2651 return -ENOMEM;
2652 }
2653
2654 cm_id->provider_data = cm_node;
2655
2656 if (!cm_node->reused_node) {
2657 err = nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
2658 PCI_FUNC(nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
2659 if (err) {
2660 printk("nes_manage_apbvt call returned %d.\n", err);
2661 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
2662 return err;
2663 }
2664 cm_listens_created++;
2665 }
2666
2667 cm_id->add_ref(cm_id);
2668 cm_id->provider_data = (void *)cm_node;
2669
2670
2671 return 0;
2672}
2673
2674
2675/**
2676 * nes_destroy_listen
2677 */
2678int nes_destroy_listen(struct iw_cm_id *cm_id)
2679{
2680 if (cm_id->provider_data)
2681 g_cm_core->api->stop_listener(g_cm_core, cm_id->provider_data);
2682 else
2683 nes_debug(NES_DBG_CM, "cm_id->provider_data was NULL\n");
2684
2685 cm_id->rem_ref(cm_id);
2686
2687 return 0;
2688}
2689
2690
2691/**
2692 * nes_cm_recv
2693 */
2694int nes_cm_recv(struct sk_buff *skb, struct net_device *netdevice)
2695{
2696 cm_packets_received++;
2697 if ((g_cm_core) && (g_cm_core->api)) {
2698 g_cm_core->api->recv_pkt(g_cm_core, netdev_priv(netdevice), skb);
2699 } else {
2700 nes_debug(NES_DBG_CM, "Unable to process packet for CM,"
2701 " cm is not setup properly.\n");
2702 }
2703
2704 return 0;
2705}
2706
2707
2708/**
2709 * nes_cm_start
2710 * Start and init a cm core module
2711 */
2712int nes_cm_start(void)
2713{
2714 nes_debug(NES_DBG_CM, "\n");
2715 /* create the primary CM core, pass this handle to subsequent core inits */
2716 g_cm_core = nes_cm_alloc_core();
2717 if (g_cm_core) {
2718 return 0;
2719 } else {
2720 return -ENOMEM;
2721 }
2722}
2723
2724
2725/**
2726 * nes_cm_stop
2727 * stop and dealloc all cm core instances
2728 */
2729int nes_cm_stop(void)
2730{
2731 g_cm_core->api->destroy_cm_core(g_cm_core);
2732 return 0;
2733}
2734
2735
2736/**
2737 * cm_event_connected
2738 * handle a connected event, setup QPs and HW
2739 */
2740void cm_event_connected(struct nes_cm_event *event)
2741{
2742 u64 u64temp;
2743 struct nes_qp *nesqp;
2744 struct nes_vnic *nesvnic;
2745 struct nes_device *nesdev;
2746 struct nes_cm_node *cm_node;
2747 struct nes_adapter *nesadapter;
2748 struct ib_qp_attr attr;
2749 struct iw_cm_id *cm_id;
2750 struct iw_cm_event cm_event;
2751 struct nes_hw_qp_wqe *wqe;
2752 struct nes_v4_quad nes_quad;
2753 int ret;
2754
2755 /* get all our handles */
2756 cm_node = event->cm_node;
2757 cm_id = cm_node->cm_id;
2758 nes_debug(NES_DBG_CM, "cm_event_connected - %p - cm_id = %p\n", cm_node, cm_id);
2759 nesqp = (struct nes_qp *)cm_id->provider_data;
2760 nesvnic = to_nesvnic(nesqp->ibqp.device);
2761 nesdev = nesvnic->nesdev;
2762 nesadapter = nesdev->nesadapter;
2763
2764 if (nesqp->destroyed) {
2765 return;
2766 }
2767 atomic_inc(&cm_connecteds);
2768 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
2769 " local port 0x%04X. jiffies = %lu.\n",
2770 nesqp->hwqp.qp_id,
2771 ntohl(cm_id->remote_addr.sin_addr.s_addr),
2772 ntohs(cm_id->remote_addr.sin_port),
2773 ntohs(cm_id->local_addr.sin_port),
2774 jiffies);
2775
2776 nes_cm_init_tsa_conn(nesqp, cm_node);
2777
2778 /* set the QP tsa context */
2779 nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
2780 nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
2781 nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
2782
2783 nesqp->nesqp_context->misc2 |= cpu_to_le32(
2784 (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_QPCONTEXT_MISC2_SRC_IP_SHIFT);
2785 nesqp->nesqp_context->arp_index_vlan |= cpu_to_le32(
2786 nes_arp_table(nesdev, le32_to_cpu(nesqp->nesqp_context->ip0),
2787 NULL, NES_ARP_RESOLVE) << 16);
2788 nesqp->nesqp_context->ts_val_delta = cpu_to_le32(
2789 jiffies - nes_read_indexed(nesdev, NES_IDX_TCP_NOW));
2790 nesqp->nesqp_context->ird_index = cpu_to_le32(nesqp->hwqp.qp_id);
2791 nesqp->nesqp_context->ird_ord_sizes |=
2792 cpu_to_le32((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT);
2793
2794 /* Adjust tail for not having a LSMM */
2795 nesqp->hwqp.sq_tail = 1;
2796
2797#if defined(NES_SEND_FIRST_WRITE)
2798 if (cm_node->send_write0) {
2799 nes_debug(NES_DBG_CM, "Sending first write.\n");
2800 wqe = &nesqp->hwqp.sq_vbase[0];
2801 u64temp = (unsigned long)nesqp;
2802 u64temp |= NES_SW_CONTEXT_ALIGN>>1;
2803 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
2804 u64temp);
2805 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(NES_IWARP_SQ_OP_RDMAW);
2806 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] = 0;
2807 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] = 0;
2808 wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] = 0;
2809 wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = 0;
2810 wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
2811
2812 /* use the reserved spot on the WQ for the extra first WQE */
2813 nesqp->nesqp_context->ird_ord_sizes &= cpu_to_le32(~(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
2814 NES_QPCONTEXT_ORDIRD_WRPDU | NES_QPCONTEXT_ORDIRD_ALSMM));
2815 nesqp->skip_lsmm = 1;
2816 nesqp->hwqp.sq_tail = 0;
2817 nes_write32(nesdev->regs + NES_WQE_ALLOC,
2818 (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
2819 }
2820#endif
2821
2822 memset(&nes_quad, 0, sizeof(nes_quad));
2823
2824 nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
2825 nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
2826 nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
2827 nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
2828
2829 /* Produce hash key */
2830 nesqp->hte_index = cpu_to_be32(
2831 crc32c(~0, (void *)&nes_quad, sizeof(nes_quad)) ^ 0xffffffff);
2832 nes_debug(NES_DBG_CM, "HTE Index = 0x%08X, After CRC = 0x%08X\n",
2833 nesqp->hte_index, nesqp->hte_index & nesadapter->hte_index_mask);
2834
2835 nesqp->hte_index &= nesadapter->hte_index_mask;
2836 nesqp->nesqp_context->hte_index = cpu_to_le32(nesqp->hte_index);
2837
2838 nesqp->ietf_frame = &cm_node->mpa_frame;
2839 nesqp->private_data_len = (u8) cm_node->mpa_frame_size;
2840 cm_node->cm_core->api->accelerated(cm_node->cm_core, cm_node);
2841
2842 /* modify QP state to rts */
2843 attr.qp_state = IB_QPS_RTS;
2844 nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
2845
2846 /* notify OF layer we successfully created the requested connection */
2847 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
2848 cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED;
2849 cm_event.provider_data = cm_id->provider_data;
2850 cm_event.local_addr.sin_family = AF_INET;
2851 cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
2852 cm_event.remote_addr = cm_id->remote_addr;
2853
2854 cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
2855 cm_event.private_data_len = (u8) event->cm_node->mpa_frame_size;
2856
2857 cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr;
2858 ret = cm_id->event_handler(cm_id, &cm_event);
2859 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
2860
2861 if (ret)
2862 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
2863 __FUNCTION__, __LINE__, ret);
2864 nes_debug(NES_DBG_CM, "Exiting connect thread for QP%u. jiffies = %lu\n",
2865 nesqp->hwqp.qp_id, jiffies );
2866
2867 nes_rem_ref(&nesqp->ibqp);
2868
2869 return;
2870}
2871
2872
2873/**
2874 * cm_event_connect_error
2875 */
2876void cm_event_connect_error(struct nes_cm_event *event)
2877{
2878 struct nes_qp *nesqp;
2879 struct iw_cm_id *cm_id;
2880 struct iw_cm_event cm_event;
2881 /* struct nes_cm_info cm_info; */
2882 int ret;
2883
2884 if (!event->cm_node)
2885 return;
2886
2887 cm_id = event->cm_node->cm_id;
2888 if (!cm_id) {
2889 return;
2890 }
2891
2892 nes_debug(NES_DBG_CM, "cm_node=%p, cm_id=%p\n", event->cm_node, cm_id);
2893 nesqp = cm_id->provider_data;
2894
2895 if (!nesqp) {
2896 return;
2897 }
2898
2899 /* notify OF layer about this connection error event */
2900 /* cm_id->rem_ref(cm_id); */
2901 nesqp->cm_id = NULL;
2902 cm_id->provider_data = NULL;
2903 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
2904 cm_event.status = IW_CM_EVENT_STATUS_REJECTED;
2905 cm_event.provider_data = cm_id->provider_data;
2906 cm_event.local_addr = cm_id->local_addr;
2907 cm_event.remote_addr = cm_id->remote_addr;
2908 cm_event.private_data = NULL;
2909 cm_event.private_data_len = 0;
2910
2911 nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remove_addr=%08x\n",
2912 cm_event.local_addr.sin_addr.s_addr, cm_event.remote_addr.sin_addr.s_addr);
2913
2914 ret = cm_id->event_handler(cm_id, &cm_event);
2915 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
2916 if (ret)
2917 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
2918 __FUNCTION__, __LINE__, ret);
2919 nes_rem_ref(&nesqp->ibqp);
2920 cm_id->rem_ref(cm_id);
2921
2922 return;
2923}
2924
2925
2926/**
2927 * cm_event_reset
2928 */
2929void cm_event_reset(struct nes_cm_event *event)
2930{
2931 struct nes_qp *nesqp;
2932 struct iw_cm_id *cm_id;
2933 struct iw_cm_event cm_event;
2934 /* struct nes_cm_info cm_info; */
2935 int ret;
2936
2937 if (!event->cm_node)
2938 return;
2939
2940 if (!event->cm_node->cm_id)
2941 return;
2942
2943 cm_id = event->cm_node->cm_id;
2944
2945 nes_debug(NES_DBG_CM, "%p - cm_id = %p\n", event->cm_node, cm_id);
2946 nesqp = cm_id->provider_data;
2947
2948 nesqp->cm_id = NULL;
2949 /* cm_id->provider_data = NULL; */
2950 cm_event.event = IW_CM_EVENT_DISCONNECT;
2951 cm_event.status = IW_CM_EVENT_STATUS_RESET;
2952 cm_event.provider_data = cm_id->provider_data;
2953 cm_event.local_addr = cm_id->local_addr;
2954 cm_event.remote_addr = cm_id->remote_addr;
2955 cm_event.private_data = NULL;
2956 cm_event.private_data_len = 0;
2957
2958 ret = cm_id->event_handler(cm_id, &cm_event);
2959 nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
2960
2961
2962 /* notify OF layer about this connection error event */
2963 cm_id->rem_ref(cm_id);
2964
2965 return;
2966}
2967
2968
2969/**
2970 * cm_event_mpa_req
2971 */
2972void cm_event_mpa_req(struct nes_cm_event *event)
2973{
2974 struct iw_cm_id *cm_id;
2975 struct iw_cm_event cm_event;
2976 int ret;
2977 struct nes_cm_node *cm_node;
2978
2979 cm_node = event->cm_node;
2980 if (!cm_node)
2981 return;
2982 cm_id = cm_node->cm_id;
2983
2984 atomic_inc(&cm_connect_reqs);
2985 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
2986 cm_node, cm_id, jiffies);
2987
2988 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
2989 cm_event.status = IW_CM_EVENT_STATUS_OK;
2990 cm_event.provider_data = (void *)cm_node;
2991
2992 cm_event.local_addr.sin_family = AF_INET;
2993 cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
2994 cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
2995
2996 cm_event.remote_addr.sin_family = AF_INET;
2997 cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
2998 cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
2999
3000 cm_event.private_data = cm_node->mpa_frame_buf;
3001 cm_event.private_data_len = (u8) cm_node->mpa_frame_size;
3002
3003 ret = cm_id->event_handler(cm_id, &cm_event);
3004 if (ret)
3005 printk("%s[%u] OFA CM event_handler returned, ret=%d\n",
3006 __FUNCTION__, __LINE__, ret);
3007
3008 return;
3009}
3010
3011
3012static void nes_cm_event_handler(struct work_struct *);
3013
3014/**
3015 * nes_cm_post_event
3016 * post an event to the cm event handler
3017 */
3018int nes_cm_post_event(struct nes_cm_event *event)
3019{
3020 atomic_inc(&event->cm_node->cm_core->events_posted);
3021 add_ref_cm_node(event->cm_node);
3022 event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
3023 INIT_WORK(&event->event_work, nes_cm_event_handler);
3024 nes_debug(NES_DBG_CM, "queue_work, event=%p\n", event);
3025
3026 queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
3027
3028 nes_debug(NES_DBG_CM, "Exit\n");
3029 return 0;
3030}
3031
3032
3033/**
3034 * nes_cm_event_handler
3035 * worker function to handle cm events
3036 * will free instance of nes_cm_event
3037 */
3038static void nes_cm_event_handler(struct work_struct *work)
3039{
3040 struct nes_cm_event *event = container_of(work, struct nes_cm_event, event_work);
3041 struct nes_cm_core *cm_core;
3042
3043 if ((!event) || (!event->cm_node) || (!event->cm_node->cm_core)) {
3044 return;
3045 }
3046 cm_core = event->cm_node->cm_core;
3047 nes_debug(NES_DBG_CM, "event=%p, event->type=%u, events posted=%u\n",
3048 event, event->type, atomic_read(&cm_core->events_posted));
3049
3050 switch (event->type) {
3051 case NES_CM_EVENT_MPA_REQ:
3052 cm_event_mpa_req(event);
3053 nes_debug(NES_DBG_CM, "CM Event: MPA REQUEST\n");
3054 break;
3055 case NES_CM_EVENT_RESET:
3056 nes_debug(NES_DBG_CM, "CM Event: RESET\n");
3057 cm_event_reset(event);
3058 break;
3059 case NES_CM_EVENT_CONNECTED:
3060 if ((!event->cm_node->cm_id) ||
3061 (event->cm_node->state != NES_CM_STATE_TSA)) {
3062 break;
3063 }
3064 cm_event_connected(event);
3065 nes_debug(NES_DBG_CM, "CM Event: CONNECTED\n");
3066 break;
3067 case NES_CM_EVENT_ABORTED:
3068 if ((!event->cm_node->cm_id) || (event->cm_node->state == NES_CM_STATE_TSA)) {
3069 break;
3070 }
3071 cm_event_connect_error(event);
3072 nes_debug(NES_DBG_CM, "CM Event: ABORTED\n");
3073 break;
3074 case NES_CM_EVENT_DROPPED_PKT:
3075 nes_debug(NES_DBG_CM, "CM Event: DROPPED PKT\n");
3076 break;
3077 default:
3078 nes_debug(NES_DBG_CM, "CM Event: UNKNOWN EVENT TYPE\n");
3079 break;
3080 }
3081
3082 atomic_dec(&cm_core->events_posted);
3083 event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
3084 rem_ref_cm_node(cm_core, event->cm_node);
3085 kfree(event);
3086
3087 return;
3088}
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
new file mode 100644
index 000000000000..a59f0a7fb278
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -0,0 +1,433 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef NES_CM_H
35#define NES_CM_H
36
37#define QUEUE_EVENTS
38
39#define NES_MANAGE_APBVT_DEL 0
40#define NES_MANAGE_APBVT_ADD 1
41
42/* IETF MPA -- defines, enums, structs */
43#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
44#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
45#define IETF_MPA_KEY_SIZE 16
46#define IETF_MPA_VERSION 1
47
48enum ietf_mpa_flags {
49 IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
50 IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
51 IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */
52};
53
54struct ietf_mpa_frame {
55 u8 key[IETF_MPA_KEY_SIZE];
56 u8 flags;
57 u8 rev;
58 __be16 priv_data_len;
59 u8 priv_data[0];
60};
61
62#define ietf_mpa_req_resp_frame ietf_mpa_frame
63
64struct nes_v4_quad {
65 u32 rsvd0;
66 __le32 DstIpAdrIndex; /* Only most significant 5 bits are valid */
67 __be32 SrcIpadr;
68 __be16 TcpPorts[2]; /* src is low, dest is high */
69};
70
71struct nes_cm_node;
72enum nes_timer_type {
73 NES_TIMER_TYPE_SEND,
74 NES_TIMER_TYPE_RECV,
75 NES_TIMER_NODE_CLEANUP,
76 NES_TIMER_TYPE_CLOSE,
77};
78
79#define MAX_NES_IFS 4
80
81#define SET_ACK 1
82#define SET_SYN 2
83#define SET_FIN 4
84#define SET_RST 8
85
86struct option_base {
87 u8 optionnum;
88 u8 length;
89};
90
91enum option_numbers {
92 OPTION_NUMBER_END,
93 OPTION_NUMBER_NONE,
94 OPTION_NUMBER_MSS,
95 OPTION_NUMBER_WINDOW_SCALE,
96 OPTION_NUMBER_SACK_PERM,
97 OPTION_NUMBER_SACK,
98 OPTION_NUMBER_WRITE0 = 0xbc
99};
100
101struct option_mss {
102 u8 optionnum;
103 u8 length;
104 __be16 mss;
105};
106
107struct option_windowscale {
108 u8 optionnum;
109 u8 length;
110 u8 shiftcount;
111};
112
113union all_known_options {
114 char as_end;
115 struct option_base as_base;
116 struct option_mss as_mss;
117 struct option_windowscale as_windowscale;
118};
119
120struct nes_timer_entry {
121 struct list_head list;
122 unsigned long timetosend; /* jiffies */
123 struct sk_buff *skb;
124 u32 type;
125 u32 retrycount;
126 u32 retranscount;
127 u32 context;
128 u32 seq_num;
129 u32 send_retrans;
130 int close_when_complete;
131 struct net_device *netdev;
132};
133
134#define NES_DEFAULT_RETRYS 64
135#define NES_DEFAULT_RETRANS 8
136#ifdef CONFIG_INFINIBAND_NES_DEBUG
137#define NES_RETRY_TIMEOUT (1000*HZ/1000)
138#else
139#define NES_RETRY_TIMEOUT (3000*HZ/1000)
140#endif
141#define NES_SHORT_TIME (10)
142#define NES_LONG_TIME (2000*HZ/1000)
143
144#define NES_CM_HASHTABLE_SIZE 1024
145#define NES_CM_TCP_TIMER_INTERVAL 3000
146#define NES_CM_DEFAULT_MTU 1540
147#define NES_CM_DEFAULT_FRAME_CNT 10
148#define NES_CM_THREAD_STACK_SIZE 256
149#define NES_CM_DEFAULT_RCV_WND 64240 // before we know that window scaling is allowed
150#define NES_CM_DEFAULT_RCV_WND_SCALED 256960 // after we know that window scaling is allowed
151#define NES_CM_DEFAULT_RCV_WND_SCALE 2
152#define NES_CM_DEFAULT_FREE_PKTS 0x000A
153#define NES_CM_FREE_PKT_LO_WATERMARK 2
154
155#define NES_CM_DEFAULT_MSS 536
156
157#define NES_CM_DEF_SEQ 0x159bf75f
158#define NES_CM_DEF_LOCAL_ID 0x3b47
159
160#define NES_CM_DEF_SEQ2 0x18ed5740
161#define NES_CM_DEF_LOCAL_ID2 0xb807
162
163typedef u32 nes_addr_t;
164
165#define nes_cm_tsa_context nes_qp_context
166
167struct nes_qp;
168
169/* cm node transition states */
170enum nes_cm_node_state {
171 NES_CM_STATE_UNKNOWN,
172 NES_CM_STATE_INITED,
173 NES_CM_STATE_LISTENING,
174 NES_CM_STATE_SYN_RCVD,
175 NES_CM_STATE_SYN_SENT,
176 NES_CM_STATE_ONE_SIDE_ESTABLISHED,
177 NES_CM_STATE_ESTABLISHED,
178 NES_CM_STATE_ACCEPTING,
179 NES_CM_STATE_MPAREQ_SENT,
180 NES_CM_STATE_TSA,
181 NES_CM_STATE_FIN_WAIT1,
182 NES_CM_STATE_FIN_WAIT2,
183 NES_CM_STATE_CLOSE_WAIT,
184 NES_CM_STATE_TIME_WAIT,
185 NES_CM_STATE_LAST_ACK,
186 NES_CM_STATE_CLOSING,
187 NES_CM_STATE_CLOSED
188};
189
190/* type of nes connection */
191enum nes_cm_conn_type {
192 NES_CM_IWARP_CONN_TYPE,
193};
194
195/* CM context params */
196struct nes_cm_tcp_context {
197 u8 client;
198
199 u32 loc_seq_num;
200 u32 loc_ack_num;
201 u32 rem_ack_num;
202 u32 rcv_nxt;
203
204 u32 loc_id;
205 u32 rem_id;
206
207 u32 snd_wnd;
208 u32 max_snd_wnd;
209
210 u32 rcv_wnd;
211 u32 mss;
212 u8 snd_wscale;
213 u8 rcv_wscale;
214
215 struct nes_cm_tsa_context tsa_cntxt;
216 struct timeval sent_ts;
217};
218
219
220enum nes_cm_listener_state {
221 NES_CM_LISTENER_PASSIVE_STATE=1,
222 NES_CM_LISTENER_ACTIVE_STATE=2,
223 NES_CM_LISTENER_EITHER_STATE=3
224};
225
226struct nes_cm_listener {
227 struct list_head list;
228 u64 session_id;
229 struct nes_cm_core *cm_core;
230 u8 loc_mac[ETH_ALEN];
231 nes_addr_t loc_addr;
232 u16 loc_port;
233 struct iw_cm_id *cm_id;
234 enum nes_cm_conn_type conn_type;
235 atomic_t ref_count;
236 struct nes_vnic *nesvnic;
237 atomic_t pend_accepts_cnt;
238 int backlog;
239 enum nes_cm_listener_state listener_state;
240 u32 reused_node;
241};
242
243/* per connection node and node state information */
244struct nes_cm_node {
245 u64 session_id;
246 u32 hashkey;
247
248 nes_addr_t loc_addr, rem_addr;
249 u16 loc_port, rem_port;
250
251 u8 loc_mac[ETH_ALEN];
252 u8 rem_mac[ETH_ALEN];
253
254 enum nes_cm_node_state state;
255 struct nes_cm_tcp_context tcp_cntxt;
256 struct nes_cm_core *cm_core;
257 struct sk_buff_head resend_list;
258 atomic_t ref_count;
259 struct net_device *netdev;
260
261 struct nes_cm_node *loopbackpartner;
262 struct list_head retrans_list;
263 spinlock_t retrans_list_lock;
264 struct list_head recv_list;
265 spinlock_t recv_list_lock;
266
267 int send_write0;
268 union {
269 struct ietf_mpa_frame mpa_frame;
270 u8 mpa_frame_buf[NES_CM_DEFAULT_MTU];
271 };
272 u16 mpa_frame_size;
273 struct iw_cm_id *cm_id;
274 struct list_head list;
275 int accelerated;
276 struct nes_cm_listener *listener;
277 enum nes_cm_conn_type conn_type;
278 struct nes_vnic *nesvnic;
279 int apbvt_set;
280 int accept_pend;
281};
282
283/* structure for client or CM to fill when making CM api calls. */
284/* - only need to set relevant data, based on op. */
285struct nes_cm_info {
286 union {
287 struct iw_cm_id *cm_id;
288 struct net_device *netdev;
289 };
290
291 u16 loc_port;
292 u16 rem_port;
293 nes_addr_t loc_addr;
294 nes_addr_t rem_addr;
295
296 enum nes_cm_conn_type conn_type;
297 int backlog;
298};
299
300/* CM event codes */
301enum nes_cm_event_type {
302 NES_CM_EVENT_UNKNOWN,
303 NES_CM_EVENT_ESTABLISHED,
304 NES_CM_EVENT_MPA_REQ,
305 NES_CM_EVENT_MPA_CONNECT,
306 NES_CM_EVENT_MPA_ACCEPT,
307 NES_CM_EVENT_MPA_ESTABLISHED,
308 NES_CM_EVENT_CONNECTED,
309 NES_CM_EVENT_CLOSED,
310 NES_CM_EVENT_RESET,
311 NES_CM_EVENT_DROPPED_PKT,
312 NES_CM_EVENT_CLOSE_IMMED,
313 NES_CM_EVENT_CLOSE_HARD,
314 NES_CM_EVENT_CLOSE_CLEAN,
315 NES_CM_EVENT_ABORTED,
316 NES_CM_EVENT_SEND_FIRST
317};
318
319/* event to post to CM event handler */
320struct nes_cm_event {
321 enum nes_cm_event_type type;
322
323 struct nes_cm_info cm_info;
324 struct work_struct event_work;
325 struct nes_cm_node *cm_node;
326};
327
328struct nes_cm_core {
329 enum nes_cm_node_state state;
330 atomic_t session_id;
331
332 atomic_t listen_node_cnt;
333 struct nes_cm_node listen_list;
334 spinlock_t listen_list_lock;
335
336 u32 mtu;
337 u32 free_tx_pkt_max;
338 u32 rx_pkt_posted;
339 struct sk_buff_head tx_free_list;
340 atomic_t ht_node_cnt;
341 struct list_head connected_nodes;
342 /* struct list_head hashtable[NES_CM_HASHTABLE_SIZE]; */
343 spinlock_t ht_lock;
344
345 struct timer_list tcp_timer;
346
347 struct nes_cm_ops *api;
348
349 int (*post_event)(struct nes_cm_event *event);
350 atomic_t events_posted;
351 struct workqueue_struct *event_wq;
352 struct workqueue_struct *disconn_wq;
353
354 atomic_t node_cnt;
355 u64 aborted_connects;
356 u32 options;
357
358 struct nes_cm_node *current_listen_node;
359};
360
361
362#define NES_CM_SET_PKT_SIZE (1 << 1)
363#define NES_CM_SET_FREE_PKT_Q_SIZE (1 << 2)
364
365/* CM ops/API for client interface */
366struct nes_cm_ops {
367 int (*accelerated)(struct nes_cm_core *, struct nes_cm_node *);
368 struct nes_cm_listener * (*listen)(struct nes_cm_core *, struct nes_vnic *,
369 struct nes_cm_info *);
370 int (*stop_listener)(struct nes_cm_core *, struct nes_cm_listener *);
371 struct nes_cm_node * (*connect)(struct nes_cm_core *,
372 struct nes_vnic *, struct ietf_mpa_frame *,
373 struct nes_cm_info *);
374 int (*close)(struct nes_cm_core *, struct nes_cm_node *);
375 int (*accept)(struct nes_cm_core *, struct ietf_mpa_frame *,
376 struct nes_cm_node *);
377 int (*reject)(struct nes_cm_core *, struct ietf_mpa_frame *,
378 struct nes_cm_node *);
379 int (*recv_pkt)(struct nes_cm_core *, struct nes_vnic *,
380 struct sk_buff *);
381 int (*destroy_cm_core)(struct nes_cm_core *);
382 int (*get)(struct nes_cm_core *);
383 int (*set)(struct nes_cm_core *, u32, u32);
384};
385
386
387int send_mpa_request(struct nes_cm_node *);
388struct sk_buff *form_cm_frame(struct sk_buff *, struct nes_cm_node *,
389 void *, u32, void *, u32, u8);
390int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
391 enum nes_timer_type, int, int);
392void nes_cm_timer_tick(unsigned long);
393int send_syn(struct nes_cm_node *, u32);
394int send_reset(struct nes_cm_node *);
395int send_ack(struct nes_cm_node *);
396int send_fin(struct nes_cm_node *, struct sk_buff *);
397struct sk_buff *get_free_pkt(struct nes_cm_node *);
398int process_packet(struct nes_cm_node *, struct sk_buff *, struct nes_cm_core *);
399
400struct nes_cm_node * mini_cm_connect(struct nes_cm_core *,
401 struct nes_vnic *, struct ietf_mpa_frame *, struct nes_cm_info *);
402int mini_cm_accept(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
403int mini_cm_reject(struct nes_cm_core *, struct ietf_mpa_frame *, struct nes_cm_node *);
404int mini_cm_close(struct nes_cm_core *, struct nes_cm_node *);
405int mini_cm_recv_pkt(struct nes_cm_core *, struct nes_vnic *, struct sk_buff *);
406struct nes_cm_core *mini_cm_alloc_core(struct nes_cm_info *);
407int mini_cm_dealloc_core(struct nes_cm_core *);
408int mini_cm_get(struct nes_cm_core *);
409int mini_cm_set(struct nes_cm_core *, u32, u32);
410
411int nes_cm_disconn(struct nes_qp *);
412void nes_disconnect_worker(struct work_struct *);
413int nes_cm_disconn_true(struct nes_qp *);
414int nes_disconnect(struct nes_qp *, int);
415
416int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
417int nes_reject(struct iw_cm_id *, const void *, u8);
418int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
419int nes_create_listen(struct iw_cm_id *, int);
420int nes_destroy_listen(struct iw_cm_id *);
421
422int nes_cm_recv(struct sk_buff *, struct net_device *);
423int nes_cm_start(void);
424int nes_cm_stop(void);
425
426/* CM event handler functions */
427void cm_event_connected(struct nes_cm_event *);
428void cm_event_connect_error(struct nes_cm_event *);
429void cm_event_reset(struct nes_cm_event *);
430void cm_event_mpa_req(struct nes_cm_event *);
431int nes_cm_post_event(struct nes_cm_event *);
432
433#endif /* NES_CM_H */
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
new file mode 100644
index 000000000000..da9daba8e668
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -0,0 +1,193 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef NES_CONTEXT_H
34#define NES_CONTEXT_H
35
36struct nes_qp_context {
37 __le32 misc;
38 __le32 cqs;
39 __le32 sq_addr_low;
40 __le32 sq_addr_high;
41 __le32 rq_addr_low;
42 __le32 rq_addr_high;
43 __le32 misc2;
44 __le16 tcpPorts[2];
45 __le32 ip0;
46 __le32 ip1;
47 __le32 ip2;
48 __le32 ip3;
49 __le32 mss;
50 __le32 arp_index_vlan;
51 __le32 tcp_state_flow_label;
52 __le32 pd_index_wscale;
53 __le32 keepalive;
54 u32 ts_recent;
55 u32 ts_age;
56 __le32 snd_nxt;
57 __le32 snd_wnd;
58 __le32 rcv_nxt;
59 __le32 rcv_wnd;
60 __le32 snd_max;
61 __le32 snd_una;
62 u32 srtt;
63 __le32 rttvar;
64 __le32 ssthresh;
65 __le32 cwnd;
66 __le32 snd_wl1;
67 __le32 snd_wl2;
68 __le32 max_snd_wnd;
69 __le32 ts_val_delta;
70 u32 retransmit;
71 u32 probe_cnt;
72 u32 hte_index;
73 __le32 q2_addr_low;
74 __le32 q2_addr_high;
75 __le32 ird_index;
76 u32 Rsvd3;
77 __le32 ird_ord_sizes;
78 u32 mrkr_offset;
79 __le32 aeq_token_low;
80 __le32 aeq_token_high;
81};
82
83/* QP Context Misc Field */
84
85#define NES_QPCONTEXT_MISC_IWARP_VER_MASK 0x00000003
86#define NES_QPCONTEXT_MISC_IWARP_VER_SHIFT 0
87#define NES_QPCONTEXT_MISC_EFB_SIZE_MASK 0x000000C0
88#define NES_QPCONTEXT_MISC_EFB_SIZE_SHIFT 6
89#define NES_QPCONTEXT_MISC_RQ_SIZE_MASK 0x00000300
90#define NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT 8
91#define NES_QPCONTEXT_MISC_SQ_SIZE_MASK 0x00000c00
92#define NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT 10
93#define NES_QPCONTEXT_MISC_PCI_FCN_MASK 0x00007000
94#define NES_QPCONTEXT_MISC_PCI_FCN_SHIFT 12
95#define NES_QPCONTEXT_MISC_DUP_ACKS_MASK 0x00070000
96#define NES_QPCONTEXT_MISC_DUP_ACKS_SHIFT 16
97
98enum nes_qp_context_misc_bits {
99 NES_QPCONTEXT_MISC_RX_WQE_SIZE = 0x00000004,
100 NES_QPCONTEXT_MISC_IPV4 = 0x00000008,
101 NES_QPCONTEXT_MISC_DO_NOT_FRAG = 0x00000010,
102 NES_QPCONTEXT_MISC_INSERT_VLAN = 0x00000020,
103 NES_QPCONTEXT_MISC_DROS = 0x00008000,
104 NES_QPCONTEXT_MISC_WSCALE = 0x00080000,
105 NES_QPCONTEXT_MISC_KEEPALIVE = 0x00100000,
106 NES_QPCONTEXT_MISC_TIMESTAMP = 0x00200000,
107 NES_QPCONTEXT_MISC_SACK = 0x00400000,
108 NES_QPCONTEXT_MISC_RDMA_WRITE_EN = 0x00800000,
109 NES_QPCONTEXT_MISC_RDMA_READ_EN = 0x01000000,
110 NES_QPCONTEXT_MISC_WBIND_EN = 0x10000000,
111 NES_QPCONTEXT_MISC_FAST_REGISTER_EN = 0x20000000,
112 NES_QPCONTEXT_MISC_PRIV_EN = 0x40000000,
113 NES_QPCONTEXT_MISC_NO_NAGLE = 0x80000000
114};
115
116enum nes_qp_acc_wq_sizes {
117 HCONTEXT_TSA_WQ_SIZE_4 = 0,
118 HCONTEXT_TSA_WQ_SIZE_32 = 1,
119 HCONTEXT_TSA_WQ_SIZE_128 = 2,
120 HCONTEXT_TSA_WQ_SIZE_512 = 3
121};
122
123/* QP Context Misc2 Fields */
124#define NES_QPCONTEXT_MISC2_TTL_MASK 0x000000ff
125#define NES_QPCONTEXT_MISC2_TTL_SHIFT 0
126#define NES_QPCONTEXT_MISC2_HOP_LIMIT_MASK 0x000000ff
127#define NES_QPCONTEXT_MISC2_HOP_LIMIT_SHIFT 0
128#define NES_QPCONTEXT_MISC2_LIMIT_MASK 0x00000300
129#define NES_QPCONTEXT_MISC2_LIMIT_SHIFT 8
130#define NES_QPCONTEXT_MISC2_NIC_INDEX_MASK 0x0000fc00
131#define NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT 10
132#define NES_QPCONTEXT_MISC2_SRC_IP_MASK 0x001f0000
133#define NES_QPCONTEXT_MISC2_SRC_IP_SHIFT 16
134#define NES_QPCONTEXT_MISC2_TOS_MASK 0xff000000
135#define NES_QPCONTEXT_MISC2_TOS_SHIFT 24
136#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_MASK 0xff000000
137#define NES_QPCONTEXT_MISC2_TRAFFIC_CLASS_SHIFT 24
138
139/* QP Context Tcp State/Flow Label Fields */
140#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_MASK 0x000fffff
141#define NES_QPCONTEXT_TCPFLOW_FLOW_LABEL_SHIFT 0
142#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_MASK 0xf0000000
143#define NES_QPCONTEXT_TCPFLOW_TCP_STATE_SHIFT 28
144
145enum nes_qp_tcp_state {
146 NES_QPCONTEXT_TCPSTATE_CLOSED = 1,
147 NES_QPCONTEXT_TCPSTATE_EST = 5,
148 NES_QPCONTEXT_TCPSTATE_TIME_WAIT = 11,
149};
150
151/* QP Context PD Index/wscale Fields */
152#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_MASK 0x0000000f
153#define NES_QPCONTEXT_PDWSCALE_RCV_WSCALE_SHIFT 0
154#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_MASK 0x00000f00
155#define NES_QPCONTEXT_PDWSCALE_SND_WSCALE_SHIFT 8
156#define NES_QPCONTEXT_PDWSCALE_PDINDEX_MASK 0xffff0000
157#define NES_QPCONTEXT_PDWSCALE_PDINDEX_SHIFT 16
158
159/* QP Context Keepalive Fields */
160#define NES_QPCONTEXT_KEEPALIVE_DELTA_MASK 0x0000ffff
161#define NES_QPCONTEXT_KEEPALIVE_DELTA_SHIFT 0
162#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_MASK 0x00ff0000
163#define NES_QPCONTEXT_KEEPALIVE_PROBE_CNT_SHIFT 16
164#define NES_QPCONTEXT_KEEPALIVE_INTV_MASK 0xff000000
165#define NES_QPCONTEXT_KEEPALIVE_INTV_SHIFT 24
166
167/* QP Context ORD/IRD Fields */
168#define NES_QPCONTEXT_ORDIRD_ORDSIZE_MASK 0x0000007f
169#define NES_QPCONTEXT_ORDIRD_ORDSIZE_SHIFT 0
170#define NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK 0x00030000
171#define NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT 16
172#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_MASK 0x30000000
173#define NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT 28
174
175enum nes_ord_ird_bits {
176 NES_QPCONTEXT_ORDIRD_WRPDU = 0x02000000,
177 NES_QPCONTEXT_ORDIRD_LSMM_PRESENT = 0x04000000,
178 NES_QPCONTEXT_ORDIRD_ALSMM = 0x08000000,
179 NES_QPCONTEXT_ORDIRD_AAH = 0x40000000,
180 NES_QPCONTEXT_ORDIRD_RNMC = 0x80000000
181};
182
183enum nes_iwarp_qp_state {
184 NES_QPCONTEXT_IWARP_STATE_NONEXIST = 0,
185 NES_QPCONTEXT_IWARP_STATE_IDLE = 1,
186 NES_QPCONTEXT_IWARP_STATE_RTS = 2,
187 NES_QPCONTEXT_IWARP_STATE_CLOSING = 3,
188 NES_QPCONTEXT_IWARP_STATE_TERMINATE = 5,
189 NES_QPCONTEXT_IWARP_STATE_ERROR = 6
190};
191
192
193#endif /* NES_CONTEXT_H */
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
new file mode 100644
index 000000000000..7c4c0fbf0abd
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -0,0 +1,3080 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/if_vlan.h>
41
42#include "nes.h"
43
44u32 crit_err_count = 0;
45u32 int_mod_timer_init;
46u32 int_mod_cq_depth_256;
47u32 int_mod_cq_depth_128;
48u32 int_mod_cq_depth_32;
49u32 int_mod_cq_depth_24;
50u32 int_mod_cq_depth_16;
51u32 int_mod_cq_depth_4;
52u32 int_mod_cq_depth_1;
53
54#include "nes_cm.h"
55
56
57#ifdef CONFIG_INFINIBAND_NES_DEBUG
58static unsigned char *nes_iwarp_state_str[] = {
59 "Non-Existant",
60 "Idle",
61 "RTS",
62 "Closing",
63 "RSVD1",
64 "Terminate",
65 "Error",
66 "RSVD2",
67};
68
69static unsigned char *nes_tcp_state_str[] = {
70 "Non-Existant",
71 "Closed",
72 "Listen",
73 "SYN Sent",
74 "SYN Rcvd",
75 "Established",
76 "Close Wait",
77 "FIN Wait 1",
78 "Closing",
79 "Last Ack",
80 "FIN Wait 2",
81 "Time Wait",
82 "RSVD1",
83 "RSVD2",
84 "RSVD3",
85 "RSVD4",
86};
87#endif
88
89
90/**
91 * nes_nic_init_timer_defaults
92 */
93void nes_nic_init_timer_defaults(struct nes_device *nesdev, u8 jumbomode)
94{
95 unsigned long flags;
96 struct nes_adapter *nesadapter = nesdev->nesadapter;
97 struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
98
99 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
100
101 shared_timer->timer_in_use_min = NES_NIC_FAST_TIMER_LOW;
102 shared_timer->timer_in_use_max = NES_NIC_FAST_TIMER_HIGH;
103 if (jumbomode) {
104 shared_timer->threshold_low = DEFAULT_JUMBO_NES_QL_LOW;
105 shared_timer->threshold_target = DEFAULT_JUMBO_NES_QL_TARGET;
106 shared_timer->threshold_high = DEFAULT_JUMBO_NES_QL_HIGH;
107 } else {
108 shared_timer->threshold_low = DEFAULT_NES_QL_LOW;
109 shared_timer->threshold_target = DEFAULT_NES_QL_TARGET;
110 shared_timer->threshold_high = DEFAULT_NES_QL_HIGH;
111 }
112
113 /* todo use netdev->mtu to set thresholds */
114 spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
115}
116
117
118/**
119 * nes_nic_init_timer
120 */
121static void nes_nic_init_timer(struct nes_device *nesdev)
122{
123 unsigned long flags;
124 struct nes_adapter *nesadapter = nesdev->nesadapter;
125 struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
126
127 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
128
129 if (shared_timer->timer_in_use_old == 0) {
130 nesdev->deepcq_count = 0;
131 shared_timer->timer_direction_upward = 0;
132 shared_timer->timer_direction_downward = 0;
133 shared_timer->timer_in_use = NES_NIC_FAST_TIMER;
134 shared_timer->timer_in_use_old = 0;
135
136 }
137 if (shared_timer->timer_in_use != shared_timer->timer_in_use_old) {
138 shared_timer->timer_in_use_old = shared_timer->timer_in_use;
139 nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
140 0x80000000 | ((u32)(shared_timer->timer_in_use*8)));
141 }
142 /* todo use netdev->mtu to set thresholds */
143 spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
144}
145
146
147/**
148 * nes_nic_tune_timer
149 */
150static void nes_nic_tune_timer(struct nes_device *nesdev)
151{
152 unsigned long flags;
153 struct nes_adapter *nesadapter = nesdev->nesadapter;
154 struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
155 u16 cq_count = nesdev->currcq_count;
156
157 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
158
159 if (shared_timer->cq_count_old < cq_count) {
160 if (cq_count > shared_timer->threshold_low)
161 shared_timer->cq_direction_downward=0;
162 }
163 if (shared_timer->cq_count_old >= cq_count)
164 shared_timer->cq_direction_downward++;
165 shared_timer->cq_count_old = cq_count;
166 if (shared_timer->cq_direction_downward > NES_NIC_CQ_DOWNWARD_TREND) {
167 if (cq_count <= shared_timer->threshold_low) {
168 shared_timer->threshold_low = shared_timer->threshold_low/2;
169 shared_timer->cq_direction_downward=0;
170 nesdev->currcq_count = 0;
171 spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
172 return;
173 }
174 }
175
176 if (cq_count > 1) {
177 nesdev->deepcq_count += cq_count;
178 if (cq_count <= shared_timer->threshold_low) { /* increase timer gently */
179 shared_timer->timer_direction_upward++;
180 shared_timer->timer_direction_downward = 0;
181 } else if (cq_count <= shared_timer->threshold_target) { /* balanced */
182 shared_timer->timer_direction_upward = 0;
183 shared_timer->timer_direction_downward = 0;
184 } else if (cq_count <= shared_timer->threshold_high) { /* decrease timer gently */
185 shared_timer->timer_direction_downward++;
186 shared_timer->timer_direction_upward = 0;
187 } else if (cq_count <= (shared_timer->threshold_high) * 2) {
188 shared_timer->timer_in_use -= 2;
189 shared_timer->timer_direction_upward = 0;
190 shared_timer->timer_direction_downward++;
191 } else {
192 shared_timer->timer_in_use -= 4;
193 shared_timer->timer_direction_upward = 0;
194 shared_timer->timer_direction_downward++;
195 }
196
197 if (shared_timer->timer_direction_upward > 3 ) { /* using history */
198 shared_timer->timer_in_use += 3;
199 shared_timer->timer_direction_upward = 0;
200 shared_timer->timer_direction_downward = 0;
201 }
202 if (shared_timer->timer_direction_downward > 5) { /* using history */
203 shared_timer->timer_in_use -= 4 ;
204 shared_timer->timer_direction_downward = 0;
205 shared_timer->timer_direction_upward = 0;
206 }
207 }
208
209 /* boundary checking */
210 if (shared_timer->timer_in_use > NES_NIC_FAST_TIMER_HIGH)
211 shared_timer->timer_in_use = NES_NIC_FAST_TIMER_HIGH;
212 else if (shared_timer->timer_in_use < NES_NIC_FAST_TIMER_LOW) {
213 shared_timer->timer_in_use = NES_NIC_FAST_TIMER_LOW;
214 }
215
216 nesdev->currcq_count = 0;
217
218 spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
219}
220
221
222/**
223 * nes_init_adapter - initialize adapter
224 */
225struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
226 struct nes_adapter *nesadapter = NULL;
227 unsigned long num_pds;
228 u32 u32temp;
229 u32 port_count;
230 u16 max_rq_wrs;
231 u16 max_sq_wrs;
232 u32 max_mr;
233 u32 max_256pbl;
234 u32 max_4kpbl;
235 u32 max_qp;
236 u32 max_irrq;
237 u32 max_cq;
238 u32 hte_index_mask;
239 u32 adapter_size;
240 u32 arp_table_size;
241 u16 vendor_id;
242 u8 OneG_Mode;
243 u8 func_index;
244
245 /* search the list of existing adapters */
246 list_for_each_entry(nesadapter, &nes_adapter_list, list) {
247 nes_debug(NES_DBG_INIT, "Searching Adapter list for PCI devfn = 0x%X,"
248 " adapter PCI slot/bus = %u/%u, pci devices PCI slot/bus = %u/%u, .\n",
249 nesdev->pcidev->devfn,
250 PCI_SLOT(nesadapter->devfn),
251 nesadapter->bus_number,
252 PCI_SLOT(nesdev->pcidev->devfn),
253 nesdev->pcidev->bus->number );
254 if ((PCI_SLOT(nesadapter->devfn) == PCI_SLOT(nesdev->pcidev->devfn)) &&
255 (nesadapter->bus_number == nesdev->pcidev->bus->number)) {
256 nesadapter->ref_count++;
257 return nesadapter;
258 }
259 }
260
261 /* no adapter found */
262 num_pds = pci_resource_len(nesdev->pcidev, BAR_1) >> PAGE_SHIFT;
263 if ((hw_rev != NE020_REV) && (hw_rev != NE020_REV1)) {
264 nes_debug(NES_DBG_INIT, "NE020 driver detected unknown hardware revision 0x%x\n",
265 hw_rev);
266 return NULL;
267 }
268
269 nes_debug(NES_DBG_INIT, "Determine Soft Reset, QP_control=0x%x, CPU0=0x%x, CPU1=0x%x, CPU2=0x%x\n",
270 nes_read_indexed(nesdev, NES_IDX_QP_CONTROL + PCI_FUNC(nesdev->pcidev->devfn) * 8),
271 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS),
272 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 4),
273 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS + 8));
274
275 nes_debug(NES_DBG_INIT, "Reset and init NE020\n");
276
277
278 if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0)
279 return NULL;
280 if (nes_init_serdes(nesdev, hw_rev, port_count, OneG_Mode))
281 return NULL;
282 nes_init_csr_ne020(nesdev, hw_rev, port_count);
283
284 max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE);
285 nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp);
286
287 u32temp = nes_read_indexed(nesdev, NES_IDX_QUAD_HASH_TABLE_SIZE);
288 if (max_qp > ((u32)1 << (u32temp & 0x001f))) {
289 nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to hash table size = 0x%08X\n",
290 max_qp, u32temp);
291 max_qp = (u32)1 << (u32temp & 0x001f);
292 }
293
294 hte_index_mask = ((u32)1 << ((u32temp & 0x001f)+1))-1;
295 nes_debug(NES_DBG_INIT, "Max QP = %u, hte_index_mask = 0x%08X.\n",
296 max_qp, hte_index_mask);
297
298 u32temp = nes_read_indexed(nesdev, NES_IDX_IRRQ_COUNT);
299
300 max_irrq = 1 << (u32temp & 0x001f);
301
302 if (max_qp > max_irrq) {
303 max_qp = max_irrq;
304 nes_debug(NES_DBG_INIT, "Reducing Max QPs to %u due to Available Q1s.\n",
305 max_qp);
306 }
307
308 /* there should be no reason to allocate more pds than qps */
309 if (num_pds > max_qp)
310 num_pds = max_qp;
311
312 u32temp = nes_read_indexed(nesdev, NES_IDX_MRT_SIZE);
313 max_mr = (u32)8192 << (u32temp & 0x7);
314
315 u32temp = nes_read_indexed(nesdev, NES_IDX_PBL_REGION_SIZE);
316 max_256pbl = (u32)1 << (u32temp & 0x0000001f);
317 max_4kpbl = (u32)1 << ((u32temp >> 16) & 0x0000001f);
318 max_cq = nes_read_indexed(nesdev, NES_IDX_CQ_CTX_SIZE);
319
320 u32temp = nes_read_indexed(nesdev, NES_IDX_ARP_CACHE_SIZE);
321 arp_table_size = 1 << u32temp;
322
323 adapter_size = (sizeof(struct nes_adapter) +
324 (sizeof(unsigned long)-1)) & (~(sizeof(unsigned long)-1));
325 adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
326 adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
327 adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
328 adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
329 adapter_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
330 adapter_size += sizeof(struct nes_qp **) * max_qp;
331
332 /* allocate a new adapter struct */
333 nesadapter = kzalloc(adapter_size, GFP_KERNEL);
334 if (nesadapter == NULL) {
335 return NULL;
336 }
337
338 nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
339 nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
340
341 /* populate the new nesadapter */
342 nesadapter->devfn = nesdev->pcidev->devfn;
343 nesadapter->bus_number = nesdev->pcidev->bus->number;
344 nesadapter->ref_count = 1;
345 nesadapter->timer_int_req = 0xffff0000;
346 nesadapter->OneG_Mode = OneG_Mode;
347 nesadapter->doorbell_start = nesdev->doorbell_region;
348
349 /* nesadapter->tick_delta = clk_divisor; */
350 nesadapter->hw_rev = hw_rev;
351 nesadapter->port_count = port_count;
352
353 nesadapter->max_qp = max_qp;
354 nesadapter->hte_index_mask = hte_index_mask;
355 nesadapter->max_irrq = max_irrq;
356 nesadapter->max_mr = max_mr;
357 nesadapter->max_256pbl = max_256pbl - 1;
358 nesadapter->max_4kpbl = max_4kpbl - 1;
359 nesadapter->max_cq = max_cq;
360 nesadapter->free_256pbl = max_256pbl - 1;
361 nesadapter->free_4kpbl = max_4kpbl - 1;
362 nesadapter->max_pd = num_pds;
363 nesadapter->arp_table_size = arp_table_size;
364
365 nesadapter->et_pkt_rate_low = NES_TIMER_ENABLE_LIMIT;
366 if (nes_drv_opt & NES_DRV_OPT_DISABLE_INT_MOD) {
367 nesadapter->et_use_adaptive_rx_coalesce = 0;
368 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
369 nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
370 } else {
371 nesadapter->et_use_adaptive_rx_coalesce = 1;
372 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
373 nesadapter->et_rx_coalesce_usecs_irq = 0;
374 printk(PFX "%s: Using Adaptive Interrupt Moderation\n", __FUNCTION__);
375 }
376 /* Setup and enable the periodic timer */
377 if (nesadapter->et_rx_coalesce_usecs_irq)
378 nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x80000000 |
379 ((u32)(nesadapter->et_rx_coalesce_usecs_irq * 8)));
380 else
381 nes_write32(nesdev->regs+NES_PERIODIC_CONTROL, 0x00000000);
382
383 nesadapter->base_pd = 1;
384
385 nesadapter->device_cap_flags =
386 IB_DEVICE_ZERO_STAG | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW;
387
388 nesadapter->allocated_qps = (unsigned long *)&(((unsigned char *)nesadapter)
389 [(sizeof(struct nes_adapter)+(sizeof(unsigned long)-1))&(~(sizeof(unsigned long)-1))]);
390 nesadapter->allocated_cqs = &nesadapter->allocated_qps[BITS_TO_LONGS(max_qp)];
391 nesadapter->allocated_mrs = &nesadapter->allocated_cqs[BITS_TO_LONGS(max_cq)];
392 nesadapter->allocated_pds = &nesadapter->allocated_mrs[BITS_TO_LONGS(max_mr)];
393 nesadapter->allocated_arps = &nesadapter->allocated_pds[BITS_TO_LONGS(num_pds)];
394 nesadapter->qp_table = (struct nes_qp **)(&nesadapter->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
395
396
397 /* mark the usual suspect QPs and CQs as in use */
398 for (u32temp = 0; u32temp < NES_FIRST_QPN; u32temp++) {
399 set_bit(u32temp, nesadapter->allocated_qps);
400 set_bit(u32temp, nesadapter->allocated_cqs);
401 }
402
403 for (u32temp = 0; u32temp < 20; u32temp++)
404 set_bit(u32temp, nesadapter->allocated_pds);
405 u32temp = nes_read_indexed(nesdev, NES_IDX_QP_MAX_CFG_SIZES);
406
407 max_rq_wrs = ((u32temp >> 8) & 3);
408 switch (max_rq_wrs) {
409 case 0:
410 max_rq_wrs = 4;
411 break;
412 case 1:
413 max_rq_wrs = 16;
414 break;
415 case 2:
416 max_rq_wrs = 32;
417 break;
418 case 3:
419 max_rq_wrs = 512;
420 break;
421 }
422
423 max_sq_wrs = (u32temp & 3);
424 switch (max_sq_wrs) {
425 case 0:
426 max_sq_wrs = 4;
427 break;
428 case 1:
429 max_sq_wrs = 16;
430 break;
431 case 2:
432 max_sq_wrs = 32;
433 break;
434 case 3:
435 max_sq_wrs = 512;
436 break;
437 }
438 nesadapter->max_qp_wr = min(max_rq_wrs, max_sq_wrs);
439 nesadapter->max_irrq_wr = (u32temp >> 16) & 3;
440
441 nesadapter->max_sge = 4;
442 nesadapter->max_cqe = 32767;
443
444 if (nes_read_eeprom_values(nesdev, nesadapter)) {
445 printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
446 kfree(nesadapter);
447 return NULL;
448 }
449
450 u32temp = nes_read_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG);
451 nes_write_indexed(nesdev, NES_IDX_TCP_TIMER_CONFIG,
452 (u32temp & 0xff000000) | (nesadapter->tcp_timer_core_clk_divisor & 0x00ffffff));
453
454 /* setup port configuration */
455 if (nesadapter->port_count == 1) {
456 u32temp = 0x00000000;
457 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT)
458 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002);
459 else
460 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
461 } else {
462 if (nesadapter->port_count == 2)
463 u32temp = 0x00000044;
464 else
465 u32temp = 0x000000e4;
466 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
467 }
468
469 nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT, u32temp);
470 nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n",
471 nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT));
472
473 spin_lock_init(&nesadapter->resource_lock);
474 spin_lock_init(&nesadapter->phy_lock);
475 spin_lock_init(&nesadapter->pbl_lock);
476 spin_lock_init(&nesadapter->periodic_timer_lock);
477
478 INIT_LIST_HEAD(&nesadapter->nesvnic_list[0]);
479 INIT_LIST_HEAD(&nesadapter->nesvnic_list[1]);
480 INIT_LIST_HEAD(&nesadapter->nesvnic_list[2]);
481 INIT_LIST_HEAD(&nesadapter->nesvnic_list[3]);
482
483 if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
484 u32 pcs_control_status0, pcs_control_status1;
485 u32 reset_value;
486 u32 i = 0;
487 u32 int_cnt = 0;
488 u32 ext_cnt = 0;
489 unsigned long flags;
490 u32 j = 0;
491
492 pcs_control_status0 = nes_read_indexed(nesdev,
493 NES_IDX_PHY_PCS_CONTROL_STATUS0);
494 pcs_control_status1 = nes_read_indexed(nesdev,
495 NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
496
497 for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
498 pcs_control_status0 = nes_read_indexed(nesdev,
499 NES_IDX_PHY_PCS_CONTROL_STATUS0);
500 pcs_control_status1 = nes_read_indexed(nesdev,
501 NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
502 if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
503 || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
504 int_cnt++;
505 msleep(1);
506 }
507 if (int_cnt > 1) {
508 spin_lock_irqsave(&nesadapter->phy_lock, flags);
509 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
510 mh_detected++;
511 reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
512 reset_value |= 0x0000003d;
513 nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
514
515 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
516 & 0x00000040) != 0x00000040) && (j++ < 5000));
517 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
518
519 pcs_control_status0 = nes_read_indexed(nesdev,
520 NES_IDX_PHY_PCS_CONTROL_STATUS0);
521 pcs_control_status1 = nes_read_indexed(nesdev,
522 NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
523
524 for (i = 0; i < NES_MAX_LINK_CHECK; i++) {
525 pcs_control_status0 = nes_read_indexed(nesdev,
526 NES_IDX_PHY_PCS_CONTROL_STATUS0);
527 pcs_control_status1 = nes_read_indexed(nesdev,
528 NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
529 if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
530 || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) {
531 if (++ext_cnt > int_cnt) {
532 spin_lock_irqsave(&nesadapter->phy_lock, flags);
533 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1,
534 0x0000F0C8);
535 mh_detected++;
536 reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
537 reset_value |= 0x0000003d;
538 nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
539
540 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
541 & 0x00000040) != 0x00000040) && (j++ < 5000));
542 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
543 break;
544 }
545 }
546 msleep(1);
547 }
548 }
549 }
550
551 if (nesadapter->hw_rev == NE020_REV) {
552 init_timer(&nesadapter->mh_timer);
553 nesadapter->mh_timer.function = nes_mh_fix;
554 nesadapter->mh_timer.expires = jiffies + (HZ/5); /* 1 second */
555 nesadapter->mh_timer.data = (unsigned long)nesdev;
556 add_timer(&nesadapter->mh_timer);
557 } else {
558 nes_write32(nesdev->regs+NES_INTF_INT_STAT, 0x0f000000);
559 }
560
561 init_timer(&nesadapter->lc_timer);
562 nesadapter->lc_timer.function = nes_clc;
563 nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
564 nesadapter->lc_timer.data = (unsigned long)nesdev;
565 add_timer(&nesadapter->lc_timer);
566
567 list_add_tail(&nesadapter->list, &nes_adapter_list);
568
569 for (func_index = 0; func_index < 8; func_index++) {
570 pci_bus_read_config_word(nesdev->pcidev->bus,
571 PCI_DEVFN(PCI_SLOT(nesdev->pcidev->devfn),
572 func_index), 0, &vendor_id);
573 if (vendor_id == 0xffff)
574 break;
575 }
576 nes_debug(NES_DBG_INIT, "%s %d functions found for %s.\n", __FUNCTION__,
577 func_index, pci_name(nesdev->pcidev));
578 nesadapter->adapter_fcn_count = func_index;
579
580 return nesadapter;
581}
582
583
584/**
585 * nes_reset_adapter_ne020
586 */
587unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode)
588{
589 u32 port_count;
590 u32 u32temp;
591 u32 i;
592
593 u32temp = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
594 port_count = ((u32temp & 0x00000300) >> 8) + 1;
595 /* TODO: assuming that both SERDES are set the same for now */
596 *OneG_Mode = (u32temp & 0x00003c00) ? 0 : 1;
597 nes_debug(NES_DBG_INIT, "Initial Software Reset = 0x%08X, port_count=%u\n",
598 u32temp, port_count);
599 if (*OneG_Mode)
600 nes_debug(NES_DBG_INIT, "Running in 1G mode.\n");
601 u32temp &= 0xff00ffc0;
602 switch (port_count) {
603 case 1:
604 u32temp |= 0x00ee0000;
605 break;
606 case 2:
607 u32temp |= 0x00cc0000;
608 break;
609 case 4:
610 u32temp |= 0x00000000;
611 break;
612 default:
613 return 0;
614 break;
615 }
616
617 /* check and do full reset if needed */
618 if (nes_read_indexed(nesdev, NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8))) {
619 nes_debug(NES_DBG_INIT, "Issuing Full Soft reset = 0x%08X\n", u32temp | 0xd);
620 nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd);
621
622 i = 0;
623 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
624 mdelay(1);
625 if (i >= 10000) {
626 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
627 return 0;
628 }
629 }
630
631 /* port reset */
632 switch (port_count) {
633 case 1:
634 u32temp |= 0x00ee0010;
635 break;
636 case 2:
637 u32temp |= 0x00cc0030;
638 break;
639 case 4:
640 u32temp |= 0x00000030;
641 break;
642 }
643
644 nes_debug(NES_DBG_INIT, "Issuing Port Soft reset = 0x%08X\n", u32temp | 0xd);
645 nes_write32(nesdev->regs+NES_SOFTWARE_RESET, u32temp | 0xd);
646
647 i = 0;
648 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
649 mdelay(1);
650 if (i >= 10000) {
651 nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n");
652 return 0;
653 }
654
655 /* serdes 0 */
656 i = 0;
657 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
658 & 0x0000000f)) != 0x0000000f) && i++ < 5000)
659 mdelay(1);
660 if (i >= 5000) {
661 nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp);
662 return 0;
663 }
664
665 /* serdes 1 */
666 if (port_count > 1) {
667 i = 0;
668 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
669 & 0x0000000f)) != 0x0000000f) && i++ < 5000)
670 mdelay(1);
671 if (i >= 5000) {
672 nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp);
673 return 0;
674 }
675 }
676
677
678
679 i = 0;
680 while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
681 mdelay(1);
682 if (i >= 10000) {
683 printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
684 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
685 return 0;
686 }
687
688 return port_count;
689}
690
691
692/**
693 * nes_init_serdes
694 */
695int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, u8 OneG_Mode)
696{
697 int i;
698 u32 u32temp;
699
700 if (hw_rev != NE020_REV) {
701 /* init serdes 0 */
702
703 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
704 if (!OneG_Mode)
705 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
706 if (port_count > 1) {
707 /* init serdes 1 */
708 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
709 if (!OneG_Mode)
710 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
711 }
712 } else {
713 /* init serdes 0 */
714 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
715 i = 0;
716 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
717 & 0x0000000f)) != 0x0000000f) && i++ < 5000)
718 mdelay(1);
719 if (i >= 5000) {
720 nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp);
721 return 1;
722 }
723 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
724 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
725 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000);
726 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000);
727 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000);
728 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000);
729 if (OneG_Mode)
730 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222);
731 else
732 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
733
734 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
735 if (port_count > 1) {
736 /* init serdes 1 */
737 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x00000048);
738 i = 0;
739 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
740 & 0x0000000f)) != 0x0000000f) && (i++ < 5000))
741 mdelay(1);
742 if (i >= 5000) {
743 printk("%s: Init: serdes 1 not ready, status=%x\n", __FUNCTION__, u32temp);
744 /* return 1; */
745 }
746 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x000bdef7);
747 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE1, 0x9ce73000);
748 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE1, 0x0ff00000);
749 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET1, 0x00000000);
750 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS1, 0x00000000);
751 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1, 0x00000000);
752 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL1, 0xf0002222);
753 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000ff);
754 }
755 }
756 return 0;
757}
758
759
760/**
761 * nes_init_csr_ne020
762 * Initialize registers for ne020 hardware
763 */
764void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count)
765{
766 u32 u32temp;
767
768 nes_debug(NES_DBG_INIT, "port_count=%d\n", port_count);
769
770 nes_write_indexed(nesdev, 0x000001E4, 0x00000007);
771 /* nes_write_indexed(nesdev, 0x000001E8, 0x000208C4); */
772 nes_write_indexed(nesdev, 0x000001E8, 0x00020874);
773 nes_write_indexed(nesdev, 0x000001D8, 0x00048002);
774 /* nes_write_indexed(nesdev, 0x000001D8, 0x0004B002); */
775 nes_write_indexed(nesdev, 0x000001FC, 0x00050005);
776 nes_write_indexed(nesdev, 0x00000600, 0x55555555);
777 nes_write_indexed(nesdev, 0x00000604, 0x55555555);
778
779 /* TODO: move these MAC register settings to NIC bringup */
780 nes_write_indexed(nesdev, 0x00002000, 0x00000001);
781 nes_write_indexed(nesdev, 0x00002004, 0x00000001);
782 nes_write_indexed(nesdev, 0x00002008, 0x0000FFFF);
783 nes_write_indexed(nesdev, 0x0000200C, 0x00000001);
784 nes_write_indexed(nesdev, 0x00002010, 0x000003c1);
785 nes_write_indexed(nesdev, 0x0000201C, 0x75345678);
786 if (port_count > 1) {
787 nes_write_indexed(nesdev, 0x00002200, 0x00000001);
788 nes_write_indexed(nesdev, 0x00002204, 0x00000001);
789 nes_write_indexed(nesdev, 0x00002208, 0x0000FFFF);
790 nes_write_indexed(nesdev, 0x0000220C, 0x00000001);
791 nes_write_indexed(nesdev, 0x00002210, 0x000003c1);
792 nes_write_indexed(nesdev, 0x0000221C, 0x75345678);
793 nes_write_indexed(nesdev, 0x00000908, 0x20000001);
794 }
795 if (port_count > 2) {
796 nes_write_indexed(nesdev, 0x00002400, 0x00000001);
797 nes_write_indexed(nesdev, 0x00002404, 0x00000001);
798 nes_write_indexed(nesdev, 0x00002408, 0x0000FFFF);
799 nes_write_indexed(nesdev, 0x0000240C, 0x00000001);
800 nes_write_indexed(nesdev, 0x00002410, 0x000003c1);
801 nes_write_indexed(nesdev, 0x0000241C, 0x75345678);
802 nes_write_indexed(nesdev, 0x00000910, 0x20000001);
803
804 nes_write_indexed(nesdev, 0x00002600, 0x00000001);
805 nes_write_indexed(nesdev, 0x00002604, 0x00000001);
806 nes_write_indexed(nesdev, 0x00002608, 0x0000FFFF);
807 nes_write_indexed(nesdev, 0x0000260C, 0x00000001);
808 nes_write_indexed(nesdev, 0x00002610, 0x000003c1);
809 nes_write_indexed(nesdev, 0x0000261C, 0x75345678);
810 nes_write_indexed(nesdev, 0x00000918, 0x20000001);
811 }
812
813 nes_write_indexed(nesdev, 0x00005000, 0x00018000);
814 /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */
815 nes_write_indexed(nesdev, 0x00005004, 0x00020001);
816 nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F);
817 nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F);
818 nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F);
819 nes_write_indexed(nesdev, 0x00005020, 0x1F1F1F1F);
820 nes_write_indexed(nesdev, 0x00006090, 0xFFFFFFFF);
821
822 /* TODO: move this to code, get from EEPROM */
823 nes_write_indexed(nesdev, 0x00000900, 0x20000001);
824 nes_write_indexed(nesdev, 0x000060C0, 0x0000028e);
825 nes_write_indexed(nesdev, 0x000060C8, 0x00000020);
826 //
827 nes_write_indexed(nesdev, 0x000001EC, 0x7b2625a0);
828 /* nes_write_indexed(nesdev, 0x000001EC, 0x5f2625a0); */
829
830 if (hw_rev != NE020_REV) {
831 u32temp = nes_read_indexed(nesdev, 0x000008e8);
832 u32temp |= 0x80000000;
833 nes_write_indexed(nesdev, 0x000008e8, u32temp);
834 u32temp = nes_read_indexed(nesdev, 0x000021f8);
835 u32temp &= 0x7fffffff;
836 u32temp |= 0x7fff0010;
837 nes_write_indexed(nesdev, 0x000021f8, u32temp);
838 }
839}
840
841
842/**
843 * nes_destroy_adapter - destroy the adapter structure
844 */
845void nes_destroy_adapter(struct nes_adapter *nesadapter)
846{
847 struct nes_adapter *tmp_adapter;
848
849 list_for_each_entry(tmp_adapter, &nes_adapter_list, list) {
850 nes_debug(NES_DBG_SHUTDOWN, "Nes Adapter list entry = 0x%p.\n",
851 tmp_adapter);
852 }
853
854 nesadapter->ref_count--;
855 if (!nesadapter->ref_count) {
856 if (nesadapter->hw_rev == NE020_REV) {
857 del_timer(&nesadapter->mh_timer);
858 }
859 del_timer(&nesadapter->lc_timer);
860
861 list_del(&nesadapter->list);
862 kfree(nesadapter);
863 }
864}
865
866
867/**
868 * nes_init_cqp
869 */
870int nes_init_cqp(struct nes_device *nesdev)
871{
872 struct nes_adapter *nesadapter = nesdev->nesadapter;
873 struct nes_hw_cqp_qp_context *cqp_qp_context;
874 struct nes_hw_cqp_wqe *cqp_wqe;
875 struct nes_hw_ceq *ceq;
876 struct nes_hw_ceq *nic_ceq;
877 struct nes_hw_aeq *aeq;
878 void *vmem;
879 dma_addr_t pmem;
880 u32 count=0;
881 u32 cqp_head;
882 u64 u64temp;
883 u32 u32temp;
884
885 /* allocate CQP memory */
886 /* Need to add max_cq to the aeq size once cq overflow checking is added back */
887 /* SQ is 512 byte aligned, others are 256 byte aligned */
888 nesdev->cqp_mem_size = 512 +
889 (sizeof(struct nes_hw_cqp_wqe) * NES_CQP_SQ_SIZE) +
890 (sizeof(struct nes_hw_cqe) * NES_CCQ_SIZE) +
891 max(((u32)sizeof(struct nes_hw_ceqe) * NES_CCEQ_SIZE), (u32)256) +
892 max(((u32)sizeof(struct nes_hw_ceqe) * NES_NIC_CEQ_SIZE), (u32)256) +
893 (sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) +
894 sizeof(struct nes_hw_cqp_qp_context);
895
896 nesdev->cqp_vbase = pci_alloc_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
897 &nesdev->cqp_pbase);
898 if (!nesdev->cqp_vbase) {
899 nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n");
900 return -ENOMEM;
901 }
902 memset(nesdev->cqp_vbase, 0, nesdev->cqp_mem_size);
903
904 /* Allocate a twice the number of CQP requests as the SQ size */
905 nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
906 2 * NES_CQP_SQ_SIZE, GFP_KERNEL);
907 if (nesdev->nes_cqp_requests == NULL) {
908 nes_debug(NES_DBG_INIT, "Unable to allocate memory CQP request entries.\n");
909 pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
910 nesdev->cqp.sq_pbase);
911 return -ENOMEM;
912 }
913
914 nes_debug(NES_DBG_INIT, "Allocated CQP structures at %p (phys = %016lX), size = %u.\n",
915 nesdev->cqp_vbase, (unsigned long)nesdev->cqp_pbase, nesdev->cqp_mem_size);
916
917 spin_lock_init(&nesdev->cqp.lock);
918 init_waitqueue_head(&nesdev->cqp.waitq);
919
920 /* Setup Various Structures */
921 vmem = (void *)(((unsigned long)nesdev->cqp_vbase + (512 - 1)) &
922 ~(unsigned long)(512 - 1));
923 pmem = (dma_addr_t)(((unsigned long long)nesdev->cqp_pbase + (512 - 1)) &
924 ~(unsigned long long)(512 - 1));
925
926 nesdev->cqp.sq_vbase = vmem;
927 nesdev->cqp.sq_pbase = pmem;
928 nesdev->cqp.sq_size = NES_CQP_SQ_SIZE;
929 nesdev->cqp.sq_head = 0;
930 nesdev->cqp.sq_tail = 0;
931 nesdev->cqp.qp_id = PCI_FUNC(nesdev->pcidev->devfn);
932
933 vmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size);
934 pmem += (sizeof(struct nes_hw_cqp_wqe) * nesdev->cqp.sq_size);
935
936 nesdev->ccq.cq_vbase = vmem;
937 nesdev->ccq.cq_pbase = pmem;
938 nesdev->ccq.cq_size = NES_CCQ_SIZE;
939 nesdev->ccq.cq_head = 0;
940 nesdev->ccq.ce_handler = nes_cqp_ce_handler;
941 nesdev->ccq.cq_number = PCI_FUNC(nesdev->pcidev->devfn);
942
943 vmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size);
944 pmem += (sizeof(struct nes_hw_cqe) * nesdev->ccq.cq_size);
945
946 nesdev->ceq_index = PCI_FUNC(nesdev->pcidev->devfn);
947 ceq = &nesadapter->ceq[nesdev->ceq_index];
948 ceq->ceq_vbase = vmem;
949 ceq->ceq_pbase = pmem;
950 ceq->ceq_size = NES_CCEQ_SIZE;
951 ceq->ceq_head = 0;
952
953 vmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256);
954 pmem += max(((u32)sizeof(struct nes_hw_ceqe) * ceq->ceq_size), (u32)256);
955
956 nesdev->nic_ceq_index = PCI_FUNC(nesdev->pcidev->devfn) + 8;
957 nic_ceq = &nesadapter->ceq[nesdev->nic_ceq_index];
958 nic_ceq->ceq_vbase = vmem;
959 nic_ceq->ceq_pbase = pmem;
960 nic_ceq->ceq_size = NES_NIC_CEQ_SIZE;
961 nic_ceq->ceq_head = 0;
962
963 vmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256);
964 pmem += max(((u32)sizeof(struct nes_hw_ceqe) * nic_ceq->ceq_size), (u32)256);
965
966 aeq = &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)];
967 aeq->aeq_vbase = vmem;
968 aeq->aeq_pbase = pmem;
969 aeq->aeq_size = nesadapter->max_qp;
970 aeq->aeq_head = 0;
971
972 /* Setup QP Context */
973 vmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size);
974 pmem += (sizeof(struct nes_hw_aeqe) * aeq->aeq_size);
975
976 cqp_qp_context = vmem;
977 cqp_qp_context->context_words[0] =
978 cpu_to_le32((PCI_FUNC(nesdev->pcidev->devfn) << 12) + (2 << 10));
979 cqp_qp_context->context_words[1] = 0;
980 cqp_qp_context->context_words[2] = cpu_to_le32((u32)nesdev->cqp.sq_pbase);
981 cqp_qp_context->context_words[3] = cpu_to_le32(((u64)nesdev->cqp.sq_pbase) >> 32);
982
983
984 /* Write the address to Create CQP */
985 if ((sizeof(dma_addr_t) > 4)) {
986 nes_write_indexed(nesdev,
987 NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8),
988 ((u64)pmem) >> 32);
989 } else {
990 nes_write_indexed(nesdev,
991 NES_IDX_CREATE_CQP_HIGH + (PCI_FUNC(nesdev->pcidev->devfn) * 8), 0);
992 }
993 nes_write_indexed(nesdev,
994 NES_IDX_CREATE_CQP_LOW + (PCI_FUNC(nesdev->pcidev->devfn) * 8),
995 (u32)pmem);
996
997 INIT_LIST_HEAD(&nesdev->cqp_avail_reqs);
998 INIT_LIST_HEAD(&nesdev->cqp_pending_reqs);
999
1000 for (count = 0; count < 2*NES_CQP_SQ_SIZE; count++) {
1001 init_waitqueue_head(&nesdev->nes_cqp_requests[count].waitq);
1002 list_add_tail(&nesdev->nes_cqp_requests[count].list, &nesdev->cqp_avail_reqs);
1003 }
1004
1005 /* Write Create CCQ WQE */
1006 cqp_head = nesdev->cqp.sq_head++;
1007 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1008 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1009 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
1010 (NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
1011 NES_CQP_CQ_CHK_OVERFLOW | ((u32)nesdev->ccq.cq_size << 16)));
1012 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
1013 (nesdev->ccq.cq_number |
1014 ((u32)nesdev->ceq_index << 16)));
1015 u64temp = (u64)nesdev->ccq.cq_pbase;
1016 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
1017 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
1018 u64temp = (unsigned long)&nesdev->ccq;
1019 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] =
1020 cpu_to_le32((u32)(u64temp >> 1));
1021 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
1022 cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
1023 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
1024
1025 /* Write Create CEQ WQE */
1026 cqp_head = nesdev->cqp.sq_head++;
1027 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1028 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1029 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
1030 (NES_CQP_CREATE_CEQ + ((u32)nesdev->ceq_index << 8)));
1031 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, ceq->ceq_size);
1032 u64temp = (u64)ceq->ceq_pbase;
1033 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
1034
1035 /* Write Create AEQ WQE */
1036 cqp_head = nesdev->cqp.sq_head++;
1037 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1038 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1039 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
1040 (NES_CQP_CREATE_AEQ + ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8)));
1041 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX, aeq->aeq_size);
1042 u64temp = (u64)aeq->aeq_pbase;
1043 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
1044
1045 /* Write Create NIC CEQ WQE */
1046 cqp_head = nesdev->cqp.sq_head++;
1047 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1048 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1049 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
1050 (NES_CQP_CREATE_CEQ + ((u32)nesdev->nic_ceq_index << 8)));
1051 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX, nic_ceq->ceq_size);
1052 u64temp = (u64)nic_ceq->ceq_pbase;
1053 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
1054
1055 /* Poll until CCQP done */
1056 count = 0;
1057 do {
1058 if (count++ > 1000) {
1059 printk(KERN_ERR PFX "Error creating CQP\n");
1060 pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
1061 nesdev->cqp_vbase, nesdev->cqp_pbase);
1062 return -1;
1063 }
1064 udelay(10);
1065 } while (!(nes_read_indexed(nesdev,
1066 NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn) * 8)) & (1 << 8)));
1067
1068 nes_debug(NES_DBG_INIT, "CQP Status = 0x%08X\n", nes_read_indexed(nesdev,
1069 NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
1070
1071 u32temp = 0x04800000;
1072 nes_write32(nesdev->regs+NES_WQE_ALLOC, u32temp | nesdev->cqp.qp_id);
1073
1074 /* wait for the CCQ, CEQ, and AEQ to get created */
1075 count = 0;
1076 do {
1077 if (count++ > 1000) {
1078 printk(KERN_ERR PFX "Error creating CCQ, CEQ, and AEQ\n");
1079 pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
1080 nesdev->cqp_vbase, nesdev->cqp_pbase);
1081 return -1;
1082 }
1083 udelay(10);
1084 } while (((nes_read_indexed(nesdev,
1085 NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15<<8)) != (15<<8)));
1086
1087 /* dump the QP status value */
1088 nes_debug(NES_DBG_INIT, "QP Status = 0x%08X\n", nes_read_indexed(nesdev,
1089 NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
1090
1091 nesdev->cqp.sq_tail++;
1092
1093 return 0;
1094}
1095
1096
1097/**
1098 * nes_destroy_cqp
1099 */
1100int nes_destroy_cqp(struct nes_device *nesdev)
1101{
1102 struct nes_hw_cqp_wqe *cqp_wqe;
1103 u32 count = 0;
1104 u32 cqp_head;
1105 unsigned long flags;
1106
1107 do {
1108 if (count++ > 1000)
1109 break;
1110 udelay(10);
1111 } while (!(nesdev->cqp.sq_head == nesdev->cqp.sq_tail));
1112
1113 /* Reset CCQ */
1114 nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_RESET |
1115 nesdev->ccq.cq_number);
1116
1117 /* Disable device interrupts */
1118 nes_write32(nesdev->regs+NES_INT_MASK, 0x7fffffff);
1119
1120 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1121
1122 /* Destroy the AEQ */
1123 cqp_head = nesdev->cqp.sq_head++;
1124 nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
1125 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1126 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_AEQ |
1127 ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 8));
1128 cqp_wqe->wqe_words[NES_CQP_WQE_COMP_CTX_HIGH_IDX] = 0;
1129
1130 /* Destroy the NIC CEQ */
1131 cqp_head = nesdev->cqp.sq_head++;
1132 nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
1133 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1134 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ |
1135 ((u32)nesdev->nic_ceq_index << 8));
1136
1137 /* Destroy the CEQ */
1138 cqp_head = nesdev->cqp.sq_head++;
1139 nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
1140 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1141 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CEQ |
1142 (nesdev->ceq_index << 8));
1143
1144 /* Destroy the CCQ */
1145 cqp_head = nesdev->cqp.sq_head++;
1146 nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
1147 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1148 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_CQ);
1149 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->ccq.cq_number |
1150 ((u32)nesdev->ceq_index << 16));
1151
1152 /* Destroy CQP */
1153 cqp_head = nesdev->cqp.sq_head++;
1154 nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
1155 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1156 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_DESTROY_QP |
1157 NES_CQP_QP_TYPE_CQP);
1158 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesdev->cqp.qp_id);
1159
1160 barrier();
1161 /* Ring doorbell (5 WQEs) */
1162 nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x05800000 | nesdev->cqp.qp_id);
1163
1164 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1165
1166 /* wait for the CCQ, CEQ, and AEQ to get destroyed */
1167 count = 0;
1168 do {
1169 if (count++ > 1000) {
1170 printk(KERN_ERR PFX "Function%d: Error destroying CCQ, CEQ, and AEQ\n",
1171 PCI_FUNC(nesdev->pcidev->devfn));
1172 break;
1173 }
1174 udelay(10);
1175 } while (((nes_read_indexed(nesdev,
1176 NES_IDX_QP_CONTROL + (PCI_FUNC(nesdev->pcidev->devfn)*8)) & (15 << 8)) != 0));
1177
1178 /* dump the QP status value */
1179 nes_debug(NES_DBG_SHUTDOWN, "Function%d: QP Status = 0x%08X\n",
1180 PCI_FUNC(nesdev->pcidev->devfn),
1181 nes_read_indexed(nesdev,
1182 NES_IDX_QP_CONTROL+(PCI_FUNC(nesdev->pcidev->devfn)*8)));
1183
1184 kfree(nesdev->nes_cqp_requests);
1185
1186 /* Free the control structures */
1187 pci_free_consistent(nesdev->pcidev, nesdev->cqp_mem_size, nesdev->cqp.sq_vbase,
1188 nesdev->cqp.sq_pbase);
1189
1190 return 0;
1191}
1192
1193
1194/**
1195 * nes_init_phy
1196 */
1197int nes_init_phy(struct nes_device *nesdev)
1198{
1199 struct nes_adapter *nesadapter = nesdev->nesadapter;
1200 u32 counter = 0;
1201 u32 mac_index = nesdev->mac_index;
1202 u32 tx_config;
1203 u16 phy_data;
1204
1205 if (nesadapter->OneG_Mode) {
1206 nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
1207 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
1208 printk(PFX "%s: Programming mdc config for 1G\n", __FUNCTION__);
1209 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1210 tx_config |= 0x04;
1211 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1212 }
1213
1214 nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data);
1215 nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n",
1216 nesadapter->phy_index[mac_index], phy_data);
1217 nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000);
1218
1219 /* Reset the PHY */
1220 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000);
1221 udelay(100);
1222 counter = 0;
1223 do {
1224 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
1225 nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data);
1226 if (counter++ > 100) break;
1227 } while (phy_data & 0x8000);
1228
1229 /* Setting no phy loopback */
1230 phy_data &= 0xbfff;
1231 phy_data |= 0x1140;
1232 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data);
1233 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
1234 nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data);
1235
1236 nes_read_1G_phy_reg(nesdev, 0x17, nesadapter->phy_index[mac_index], &phy_data);
1237 nes_debug(NES_DBG_PHY, "Phy data from register 0x17 = 0x%X.\n", phy_data);
1238
1239 nes_read_1G_phy_reg(nesdev, 0x1e, nesadapter->phy_index[mac_index], &phy_data);
1240 nes_debug(NES_DBG_PHY, "Phy data from register 0x1e = 0x%X.\n", phy_data);
1241
1242 /* Setting the interrupt mask */
1243 nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data);
1244 nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data);
1245 nes_write_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], 0xffee);
1246
1247 nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data);
1248 nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data);
1249
1250 /* turning on flow control */
1251 nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data);
1252 nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data);
1253 nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index],
1254 (phy_data & ~(0x03E0)) | 0xc00);
1255 /* nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index],
1256 phy_data | 0xc00); */
1257 nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data);
1258 nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data);
1259
1260 nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
1261 nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
1262 /* Clear Half duplex */
1263 nes_write_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index],
1264 phy_data & ~(0x0100));
1265 nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data);
1266 nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data);
1267
1268 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
1269 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300);
1270 } else {
1271 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) {
1272 /* setup 10G MDIO operation */
1273 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1274 tx_config |= 0x14;
1275 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1276 }
1277 }
1278 return 0;
1279}
1280
1281
1282/**
1283 * nes_replenish_nic_rq
1284 */
1285static void nes_replenish_nic_rq(struct nes_vnic *nesvnic)
1286{
1287 unsigned long flags;
1288 dma_addr_t bus_address;
1289 struct sk_buff *skb;
1290 struct nes_hw_nic_rq_wqe *nic_rqe;
1291 struct nes_hw_nic *nesnic;
1292 struct nes_device *nesdev;
1293 u32 rx_wqes_posted = 0;
1294
1295 nesnic = &nesvnic->nic;
1296 nesdev = nesvnic->nesdev;
1297 spin_lock_irqsave(&nesnic->rq_lock, flags);
1298 if (nesnic->replenishing_rq !=0) {
1299 if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) &&
1300 (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) {
1301 atomic_set(&nesvnic->rx_skb_timer_running, 1);
1302 spin_unlock_irqrestore(&nesnic->rq_lock, flags);
1303 nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */
1304 add_timer(&nesvnic->rq_wqes_timer);
1305 } else
1306 spin_unlock_irqrestore(&nesnic->rq_lock, flags);
1307 return;
1308 }
1309 nesnic->replenishing_rq = 1;
1310 spin_unlock_irqrestore(&nesnic->rq_lock, flags);
1311 do {
1312 skb = dev_alloc_skb(nesvnic->max_frame_size);
1313 if (skb) {
1314 skb->dev = nesvnic->netdev;
1315
1316 bus_address = pci_map_single(nesdev->pcidev,
1317 skb->data, nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
1318
1319 nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_head];
1320 nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] =
1321 cpu_to_le32(nesvnic->max_frame_size);
1322 nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
1323 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] =
1324 cpu_to_le32((u32)bus_address);
1325 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] =
1326 cpu_to_le32((u32)((u64)bus_address >> 32));
1327 nesnic->rx_skb[nesnic->rq_head] = skb;
1328 nesnic->rq_head++;
1329 nesnic->rq_head &= nesnic->rq_size - 1;
1330 atomic_dec(&nesvnic->rx_skbs_needed);
1331 barrier();
1332 if (++rx_wqes_posted == 255) {
1333 nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id);
1334 rx_wqes_posted = 0;
1335 }
1336 } else {
1337 spin_lock_irqsave(&nesnic->rq_lock, flags);
1338 if (((nesnic->rq_size-1) == atomic_read(&nesvnic->rx_skbs_needed)) &&
1339 (atomic_read(&nesvnic->rx_skb_timer_running) == 0)) {
1340 atomic_set(&nesvnic->rx_skb_timer_running, 1);
1341 spin_unlock_irqrestore(&nesnic->rq_lock, flags);
1342 nesvnic->rq_wqes_timer.expires = jiffies + (HZ/2); /* 1/2 second */
1343 add_timer(&nesvnic->rq_wqes_timer);
1344 } else
1345 spin_unlock_irqrestore(&nesnic->rq_lock, flags);
1346 break;
1347 }
1348 } while (atomic_read(&nesvnic->rx_skbs_needed));
1349 barrier();
1350 if (rx_wqes_posted)
1351 nes_write32(nesdev->regs+NES_WQE_ALLOC, (rx_wqes_posted << 24) | nesnic->qp_id);
1352 nesnic->replenishing_rq = 0;
1353}
1354
1355
1356/**
1357 * nes_rq_wqes_timeout
1358 */
1359static void nes_rq_wqes_timeout(unsigned long parm)
1360{
1361 struct nes_vnic *nesvnic = (struct nes_vnic *)parm;
1362 printk("%s: Timer fired.\n", __FUNCTION__);
1363 atomic_set(&nesvnic->rx_skb_timer_running, 0);
1364 if (atomic_read(&nesvnic->rx_skbs_needed))
1365 nes_replenish_nic_rq(nesvnic);
1366}
1367
1368
1369/**
1370 * nes_init_nic_qp
1371 */
1372int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
1373{
1374 struct nes_hw_cqp_wqe *cqp_wqe;
1375 struct nes_hw_nic_sq_wqe *nic_sqe;
1376 struct nes_hw_nic_qp_context *nic_context;
1377 struct sk_buff *skb;
1378 struct nes_hw_nic_rq_wqe *nic_rqe;
1379 struct nes_vnic *nesvnic = netdev_priv(netdev);
1380 unsigned long flags;
1381 void *vmem;
1382 dma_addr_t pmem;
1383 u64 u64temp;
1384 int ret;
1385 u32 cqp_head;
1386 u32 counter;
1387 u32 wqe_count;
1388 u8 jumbomode=0;
1389
1390 /* Allocate fragment, SQ, RQ, and CQ; Reuse CEQ based on the PCI function */
1391 nesvnic->nic_mem_size = 256 +
1392 (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag)) +
1393 (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe)) +
1394 (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe)) +
1395 (NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) +
1396 sizeof(struct nes_hw_nic_qp_context);
1397
1398 nesvnic->nic_vbase = pci_alloc_consistent(nesdev->pcidev, nesvnic->nic_mem_size,
1399 &nesvnic->nic_pbase);
1400 if (!nesvnic->nic_vbase) {
1401 nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n");
1402 return -ENOMEM;
1403 }
1404 memset(nesvnic->nic_vbase, 0, nesvnic->nic_mem_size);
1405 nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n",
1406 nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size);
1407
1408 vmem = (void *)(((unsigned long)nesvnic->nic_vbase + (256 - 1)) &
1409 ~(unsigned long)(256 - 1));
1410 pmem = (dma_addr_t)(((unsigned long long)nesvnic->nic_pbase + (256 - 1)) &
1411 ~(unsigned long long)(256 - 1));
1412
1413 /* Setup the first Fragment buffers */
1414 nesvnic->nic.first_frag_vbase = vmem;
1415
1416 for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) {
1417 nesvnic->nic.frag_paddr[counter] = pmem;
1418 pmem += sizeof(struct nes_first_frag);
1419 }
1420
1421 /* setup the SQ */
1422 vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_first_frag));
1423
1424 nesvnic->nic.sq_vbase = (void *)vmem;
1425 nesvnic->nic.sq_pbase = pmem;
1426 nesvnic->nic.sq_head = 0;
1427 nesvnic->nic.sq_tail = 0;
1428 nesvnic->nic.sq_size = NES_NIC_WQ_SIZE;
1429 for (counter = 0; counter < NES_NIC_WQ_SIZE; counter++) {
1430 nic_sqe = &nesvnic->nic.sq_vbase[counter];
1431 nic_sqe->wqe_words[NES_NIC_SQ_WQE_MISC_IDX] =
1432 cpu_to_le32(NES_NIC_SQ_WQE_DISABLE_CHKSUM |
1433 NES_NIC_SQ_WQE_COMPLETION);
1434 nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX] =
1435 cpu_to_le32((u32)NES_FIRST_FRAG_SIZE << 16);
1436 nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX] =
1437 cpu_to_le32((u32)nesvnic->nic.frag_paddr[counter]);
1438 nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX] =
1439 cpu_to_le32((u32)((u64)nesvnic->nic.frag_paddr[counter] >> 32));
1440 }
1441
1442 nesvnic->get_cqp_request = nes_get_cqp_request;
1443 nesvnic->post_cqp_request = nes_post_cqp_request;
1444 nesvnic->mcrq_mcast_filter = NULL;
1445
1446 spin_lock_init(&nesvnic->nic.sq_lock);
1447 spin_lock_init(&nesvnic->nic.rq_lock);
1448
1449 /* setup the RQ */
1450 vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe));
1451 pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_sq_wqe));
1452
1453
1454 nesvnic->nic.rq_vbase = vmem;
1455 nesvnic->nic.rq_pbase = pmem;
1456 nesvnic->nic.rq_head = 0;
1457 nesvnic->nic.rq_tail = 0;
1458 nesvnic->nic.rq_size = NES_NIC_WQ_SIZE;
1459
1460 /* setup the CQ */
1461 vmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe));
1462 pmem += (NES_NIC_WQ_SIZE * sizeof(struct nes_hw_nic_rq_wqe));
1463
1464 if (nesdev->nesadapter->netdev_count > 2)
1465 nesvnic->mcrq_qp_id = nesvnic->nic_index + 32;
1466 else
1467 nesvnic->mcrq_qp_id = nesvnic->nic.qp_id + 4;
1468
1469 nesvnic->nic_cq.cq_vbase = vmem;
1470 nesvnic->nic_cq.cq_pbase = pmem;
1471 nesvnic->nic_cq.cq_head = 0;
1472 nesvnic->nic_cq.cq_size = NES_NIC_WQ_SIZE * 2;
1473
1474 nesvnic->nic_cq.ce_handler = nes_nic_napi_ce_handler;
1475
1476 /* Send CreateCQ request to CQP */
1477 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1478 cqp_head = nesdev->cqp.sq_head;
1479
1480 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1481 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1482
1483 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
1484 NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
1485 ((u32)nesvnic->nic_cq.cq_size << 16));
1486 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(
1487 nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16));
1488 u64temp = (u64)nesvnic->nic_cq.cq_pbase;
1489 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
1490 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
1491 u64temp = (unsigned long)&nesvnic->nic_cq;
1492 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] = cpu_to_le32((u32)(u64temp >> 1));
1493 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
1494 cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
1495 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
1496 if (++cqp_head >= nesdev->cqp.sq_size)
1497 cqp_head = 0;
1498 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1499 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1500
1501 /* Send CreateQP request to CQP */
1502 nic_context = (void *)(&nesvnic->nic_cq.cq_vbase[nesvnic->nic_cq.cq_size]);
1503 nic_context->context_words[NES_NIC_CTX_MISC_IDX] =
1504 cpu_to_le32((u32)NES_NIC_CTX_SIZE |
1505 ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 12));
1506 nes_debug(NES_DBG_INIT, "RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x%08X, RX_WINDOW_BUFFER_SIZE = 0x%08X\n",
1507 nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE),
1508 nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE));
1509 if (nes_read_indexed(nesdev, NES_IDX_RX_WINDOW_BUFFER_SIZE) != 0) {
1510 nic_context->context_words[NES_NIC_CTX_MISC_IDX] |= cpu_to_le32(NES_NIC_BACK_STORE);
1511 }
1512
1513 u64temp = (u64)nesvnic->nic.sq_pbase;
1514 nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
1515 nic_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
1516 u64temp = (u64)nesvnic->nic.rq_pbase;
1517 nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
1518 nic_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
1519
1520 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
1521 NES_CQP_QP_TYPE_NIC);
1522 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesvnic->nic.qp_id);
1523 u64temp = (u64)nesvnic->nic_cq.cq_pbase +
1524 (nesvnic->nic_cq.cq_size * sizeof(struct nes_hw_nic_cqe));
1525 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
1526
1527 if (++cqp_head >= nesdev->cqp.sq_size)
1528 cqp_head = 0;
1529 nesdev->cqp.sq_head = cqp_head;
1530
1531 barrier();
1532
1533 /* Ring doorbell (2 WQEs) */
1534 nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
1535
1536 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1537 nes_debug(NES_DBG_INIT, "Waiting for create NIC QP%u to complete.\n",
1538 nesvnic->nic.qp_id);
1539
1540 ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
1541 NES_EVENT_TIMEOUT);
1542 nes_debug(NES_DBG_INIT, "Create NIC QP%u completed, wait_event_timeout ret = %u.\n",
1543 nesvnic->nic.qp_id, ret);
1544 if (!ret) {
1545 nes_debug(NES_DBG_INIT, "NIC QP%u create timeout expired\n", nesvnic->nic.qp_id);
1546 pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase,
1547 nesvnic->nic_pbase);
1548 return -EIO;
1549 }
1550
1551 /* Populate the RQ */
1552 for (counter = 0; counter < (NES_NIC_WQ_SIZE - 1); counter++) {
1553 skb = dev_alloc_skb(nesvnic->max_frame_size);
1554 if (!skb) {
1555 nes_debug(NES_DBG_INIT, "%s: out of memory for receive skb\n", netdev->name);
1556
1557 nes_destroy_nic_qp(nesvnic);
1558 return -ENOMEM;
1559 }
1560
1561 skb->dev = netdev;
1562
1563 pmem = pci_map_single(nesdev->pcidev, skb->data,
1564 nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
1565
1566 nic_rqe = &nesvnic->nic.rq_vbase[counter];
1567 nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size);
1568 nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
1569 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
1570 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
1571 nesvnic->nic.rx_skb[counter] = skb;
1572 }
1573
1574 wqe_count = NES_NIC_WQ_SIZE - 1;
1575 nesvnic->nic.rq_head = wqe_count;
1576 barrier();
1577 do {
1578 counter = min(wqe_count, ((u32)255));
1579 wqe_count -= counter;
1580 nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter << 24) | nesvnic->nic.qp_id);
1581 } while (wqe_count);
1582 init_timer(&nesvnic->rq_wqes_timer);
1583 nesvnic->rq_wqes_timer.function = nes_rq_wqes_timeout;
1584 nesvnic->rq_wqes_timer.data = (unsigned long)nesvnic;
1585 nes_debug(NES_DBG_INIT, "NAPI support Enabled\n");
1586
1587 if (nesdev->nesadapter->et_use_adaptive_rx_coalesce)
1588 {
1589 nes_nic_init_timer(nesdev);
1590 if (netdev->mtu > 1500)
1591 jumbomode = 1;
1592 nes_nic_init_timer_defaults(nesdev, jumbomode);
1593 }
1594
1595 return 0;
1596}
1597
1598
1599/**
1600 * nes_destroy_nic_qp
1601 */
1602void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
1603{
1604 struct nes_device *nesdev = nesvnic->nesdev;
1605 struct nes_hw_cqp_wqe *cqp_wqe;
1606 struct nes_hw_nic_rq_wqe *nic_rqe;
1607 u64 wqe_frag;
1608 u32 cqp_head;
1609 unsigned long flags;
1610 int ret;
1611
1612 /* Free remaining NIC receive buffers */
1613 while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
1614 nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
1615 wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
1616 wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
1617 pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag,
1618 nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
1619 dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]);
1620 nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1);
1621 }
1622
1623 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1624
1625 /* Destroy NIC QP */
1626 cqp_head = nesdev->cqp.sq_head;
1627 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1628 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1629
1630 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
1631 (NES_CQP_DESTROY_QP | NES_CQP_QP_TYPE_NIC));
1632 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
1633 nesvnic->nic.qp_id);
1634
1635 if (++cqp_head >= nesdev->cqp.sq_size)
1636 cqp_head = 0;
1637
1638 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
1639
1640 /* Destroy NIC CQ */
1641 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1642 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
1643 (NES_CQP_DESTROY_CQ | ((u32)nesvnic->nic_cq.cq_size << 16)));
1644 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
1645 (nesvnic->nic_cq.cq_number | ((u32)nesdev->nic_ceq_index << 16)));
1646
1647 if (++cqp_head >= nesdev->cqp.sq_size)
1648 cqp_head = 0;
1649
1650 nesdev->cqp.sq_head = cqp_head;
1651 barrier();
1652
1653 /* Ring doorbell (2 WQEs) */
1654 nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x02800000 | nesdev->cqp.qp_id);
1655
1656 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1657 nes_debug(NES_DBG_SHUTDOWN, "Waiting for CQP, cqp_head=%u, cqp.sq_head=%u,"
1658 " cqp.sq_tail=%u, cqp.sq_size=%u\n",
1659 cqp_head, nesdev->cqp.sq_head,
1660 nesdev->cqp.sq_tail, nesdev->cqp.sq_size);
1661
1662 ret = wait_event_timeout(nesdev->cqp.waitq, (nesdev->cqp.sq_tail == cqp_head),
1663 NES_EVENT_TIMEOUT);
1664
1665 nes_debug(NES_DBG_SHUTDOWN, "Destroy NIC QP returned, wait_event_timeout ret = %u, cqp_head=%u,"
1666 " cqp.sq_head=%u, cqp.sq_tail=%u\n",
1667 ret, cqp_head, nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
1668 if (!ret) {
1669 nes_debug(NES_DBG_SHUTDOWN, "NIC QP%u destroy timeout expired\n",
1670 nesvnic->nic.qp_id);
1671 }
1672
1673 pci_free_consistent(nesdev->pcidev, nesvnic->nic_mem_size, nesvnic->nic_vbase,
1674 nesvnic->nic_pbase);
1675}
1676
1677/**
1678 * nes_napi_isr
1679 */
1680int nes_napi_isr(struct nes_device *nesdev)
1681{
1682 struct nes_adapter *nesadapter = nesdev->nesadapter;
1683 u32 int_stat;
1684
1685 if (nesdev->napi_isr_ran) {
1686 /* interrupt status has already been read in ISR */
1687 int_stat = nesdev->int_stat;
1688 } else {
1689 int_stat = nes_read32(nesdev->regs + NES_INT_STAT);
1690 nesdev->int_stat = int_stat;
1691 nesdev->napi_isr_ran = 1;
1692 }
1693
1694 int_stat &= nesdev->int_req;
1695 /* iff NIC, process here, else wait for DPC */
1696 if ((int_stat) && ((int_stat & 0x0000ff00) == int_stat)) {
1697 nesdev->napi_isr_ran = 0;
1698 nes_write32(nesdev->regs+NES_INT_STAT,
1699 (int_stat &
1700 ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)));
1701
1702 /* Process the CEQs */
1703 nes_process_ceq(nesdev, &nesdev->nesadapter->ceq[nesdev->nic_ceq_index]);
1704
1705 if (unlikely((((nesadapter->et_rx_coalesce_usecs_irq) &&
1706 (!nesadapter->et_use_adaptive_rx_coalesce)) ||
1707 ((nesadapter->et_use_adaptive_rx_coalesce) &&
1708 (nesdev->deepcq_count > nesadapter->et_pkt_rate_low)))) ) {
1709 if ((nesdev->int_req & NES_INT_TIMER) == 0) {
1710 /* Enable Periodic timer interrupts */
1711 nesdev->int_req |= NES_INT_TIMER;
1712 /* ack any pending periodic timer interrupts so we don't get an immediate interrupt */
1713 /* TODO: need to also ack other unused periodic timer values, get from nesadapter */
1714 nes_write32(nesdev->regs+NES_TIMER_STAT,
1715 nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
1716 nes_write32(nesdev->regs+NES_INTF_INT_MASK,
1717 ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER));
1718 }
1719
1720 if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
1721 {
1722 nes_nic_init_timer(nesdev);
1723 }
1724 /* Enable interrupts, except CEQs */
1725 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
1726 } else {
1727 /* Enable interrupts, make sure timer is off */
1728 nesdev->int_req &= ~NES_INT_TIMER;
1729 nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
1730 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
1731 nesadapter->tune_timer.timer_in_use_old = 0;
1732 }
1733 nesdev->deepcq_count = 0;
1734 return 1;
1735 } else {
1736 return 0;
1737 }
1738}
1739
1740
1741/**
1742 * nes_dpc
1743 */
1744void nes_dpc(unsigned long param)
1745{
1746 struct nes_device *nesdev = (struct nes_device *)param;
1747 struct nes_adapter *nesadapter = nesdev->nesadapter;
1748 u32 counter;
1749 u32 loop_counter = 0;
1750 u32 int_status_bit;
1751 u32 int_stat;
1752 u32 timer_stat;
1753 u32 temp_int_stat;
1754 u32 intf_int_stat;
1755 u32 debug_error;
1756 u32 processed_intf_int = 0;
1757 u16 processed_timer_int = 0;
1758 u16 completion_ints = 0;
1759 u16 timer_ints = 0;
1760
1761 /* nes_debug(NES_DBG_ISR, "\n"); */
1762
1763 do {
1764 timer_stat = 0;
1765 if (nesdev->napi_isr_ran) {
1766 nesdev->napi_isr_ran = 0;
1767 int_stat = nesdev->int_stat;
1768 } else
1769 int_stat = nes_read32(nesdev->regs+NES_INT_STAT);
1770 if (processed_intf_int != 0)
1771 int_stat &= nesdev->int_req & ~NES_INT_INTF;
1772 else
1773 int_stat &= nesdev->int_req;
1774 if (processed_timer_int == 0) {
1775 processed_timer_int = 1;
1776 if (int_stat & NES_INT_TIMER) {
1777 timer_stat = nes_read32(nesdev->regs + NES_TIMER_STAT);
1778 if ((timer_stat & nesdev->timer_int_req) == 0) {
1779 int_stat &= ~NES_INT_TIMER;
1780 }
1781 }
1782 } else {
1783 int_stat &= ~NES_INT_TIMER;
1784 }
1785
1786 if (int_stat) {
1787 if (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
1788 NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)) {
1789 /* Ack the interrupts */
1790 nes_write32(nesdev->regs+NES_INT_STAT,
1791 (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
1792 NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)));
1793 }
1794
1795 temp_int_stat = int_stat;
1796 for (counter = 0, int_status_bit = 1; counter < 16; counter++) {
1797 if (int_stat & int_status_bit) {
1798 nes_process_ceq(nesdev, &nesadapter->ceq[counter]);
1799 temp_int_stat &= ~int_status_bit;
1800 completion_ints = 1;
1801 }
1802 if (!(temp_int_stat & 0x0000ffff))
1803 break;
1804 int_status_bit <<= 1;
1805 }
1806
1807 /* Process the AEQ for this pci function */
1808 int_status_bit = 1 << (16 + PCI_FUNC(nesdev->pcidev->devfn));
1809 if (int_stat & int_status_bit) {
1810 nes_process_aeq(nesdev, &nesadapter->aeq[PCI_FUNC(nesdev->pcidev->devfn)]);
1811 }
1812
1813 /* Process the MAC interrupt for this pci function */
1814 int_status_bit = 1 << (24 + nesdev->mac_index);
1815 if (int_stat & int_status_bit) {
1816 nes_process_mac_intr(nesdev, nesdev->mac_index);
1817 }
1818
1819 if (int_stat & NES_INT_TIMER) {
1820 if (timer_stat & nesdev->timer_int_req) {
1821 nes_write32(nesdev->regs + NES_TIMER_STAT,
1822 (timer_stat & nesdev->timer_int_req) |
1823 ~(nesdev->nesadapter->timer_int_req));
1824 timer_ints = 1;
1825 }
1826 }
1827
1828 if (int_stat & NES_INT_INTF) {
1829 processed_intf_int = 1;
1830 intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
1831 intf_int_stat &= nesdev->intf_int_req;
1832 if (NES_INTF_INT_CRITERR & intf_int_stat) {
1833 debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS);
1834 printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
1835 (u16)debug_error);
1836 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
1837 0x01010000 | (debug_error & 0x0000ffff));
1838 /* BUG(); */
1839 if (crit_err_count++ > 10)
1840 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
1841 }
1842 if (NES_INTF_INT_PCIERR & intf_int_stat) {
1843 printk(KERN_ERR PFX "PCI Error reported by device!!!\n");
1844 BUG();
1845 }
1846 if (NES_INTF_INT_AEQ_OFLOW & intf_int_stat) {
1847 printk(KERN_ERR PFX "AEQ Overflow reported by device!!!\n");
1848 BUG();
1849 }
1850 nes_write32(nesdev->regs+NES_INTF_INT_STAT, intf_int_stat);
1851 }
1852
1853 if (int_stat & NES_INT_TSW) {
1854 }
1855 }
1856 /* Don't use the interface interrupt bit stay in loop */
1857 int_stat &= ~NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
1858 NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3;
1859 } while ((int_stat != 0) && (loop_counter++ < MAX_DPC_ITERATIONS));
1860
1861 if (timer_ints == 1) {
1862 if ((nesadapter->et_rx_coalesce_usecs_irq) || (nesadapter->et_use_adaptive_rx_coalesce)) {
1863 if (completion_ints == 0) {
1864 nesdev->timer_only_int_count++;
1865 if (nesdev->timer_only_int_count>=nesadapter->timer_int_limit) {
1866 nesdev->timer_only_int_count = 0;
1867 nesdev->int_req &= ~NES_INT_TIMER;
1868 nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
1869 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
1870 nesdev->nesadapter->tune_timer.timer_in_use_old = 0;
1871 } else {
1872 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
1873 }
1874 } else {
1875 if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
1876 {
1877 nes_nic_init_timer(nesdev);
1878 }
1879 nesdev->timer_only_int_count = 0;
1880 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
1881 }
1882 } else {
1883 nesdev->timer_only_int_count = 0;
1884 nesdev->int_req &= ~NES_INT_TIMER;
1885 nes_write32(nesdev->regs+NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
1886 nes_write32(nesdev->regs+NES_TIMER_STAT,
1887 nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
1888 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
1889 }
1890 } else {
1891 if ( (completion_ints == 1) &&
1892 (((nesadapter->et_rx_coalesce_usecs_irq) &&
1893 (!nesadapter->et_use_adaptive_rx_coalesce)) ||
1894 ((nesdev->deepcq_count > nesadapter->et_pkt_rate_low) &&
1895 (nesadapter->et_use_adaptive_rx_coalesce) )) ) {
1896 /* nes_debug(NES_DBG_ISR, "Enabling periodic timer interrupt.\n" ); */
1897 nesdev->timer_only_int_count = 0;
1898 nesdev->int_req |= NES_INT_TIMER;
1899 nes_write32(nesdev->regs+NES_TIMER_STAT,
1900 nesdev->timer_int_req | ~(nesdev->nesadapter->timer_int_req));
1901 nes_write32(nesdev->regs+NES_INTF_INT_MASK,
1902 ~(nesdev->intf_int_req | NES_INTF_PERIODIC_TIMER));
1903 nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
1904 } else {
1905 nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
1906 }
1907 }
1908 nesdev->deepcq_count = 0;
1909}
1910
1911
1912/**
1913 * nes_process_ceq
1914 */
1915void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
1916{
1917 u64 u64temp;
1918 struct nes_hw_cq *cq;
1919 u32 head;
1920 u32 ceq_size;
1921
1922 /* nes_debug(NES_DBG_CQ, "\n"); */
1923 head = ceq->ceq_head;
1924 ceq_size = ceq->ceq_size;
1925
1926 do {
1927 if (le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]) &
1928 NES_CEQE_VALID) {
1929 u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX])))<<32) |
1930 ((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_LOW_IDX])));
1931 u64temp <<= 1;
1932 cq = *((struct nes_hw_cq **)&u64temp);
1933 /* nes_debug(NES_DBG_CQ, "pCQ = %p\n", cq); */
1934 barrier();
1935 ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX] = 0;
1936
1937 /* call the event handler */
1938 cq->ce_handler(nesdev, cq);
1939
1940 if (++head >= ceq_size)
1941 head = 0;
1942 } else {
1943 break;
1944 }
1945
1946 } while (1);
1947
1948 ceq->ceq_head = head;
1949}
1950
1951
1952/**
1953 * nes_process_aeq
1954 */
1955void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
1956{
1957// u64 u64temp;
1958 u32 head;
1959 u32 aeq_size;
1960 u32 aeqe_misc;
1961 u32 aeqe_cq_id;
1962 struct nes_hw_aeqe volatile *aeqe;
1963
1964 head = aeq->aeq_head;
1965 aeq_size = aeq->aeq_size;
1966
1967 do {
1968 aeqe = &aeq->aeq_vbase[head];
1969 if ((le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]) & NES_AEQE_VALID) == 0)
1970 break;
1971 aeqe_misc = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
1972 aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
1973 if (aeqe_misc & (NES_AEQE_QP|NES_AEQE_CQ)) {
1974 if (aeqe_cq_id >= NES_FIRST_QPN) {
1975 /* dealing with an accelerated QP related AE */
1976// u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])))<<32) |
1977// ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX])));
1978 nes_process_iwarp_aeqe(nesdev, (struct nes_hw_aeqe *)aeqe);
1979 } else {
1980 /* TODO: dealing with a CQP related AE */
1981 nes_debug(NES_DBG_AEQ, "Processing CQP related AE, misc = 0x%04X\n",
1982 (u16)(aeqe_misc >> 16));
1983 }
1984 }
1985
1986 aeqe->aeqe_words[NES_AEQE_MISC_IDX] = 0;
1987
1988 if (++head >= aeq_size)
1989 head = 0;
1990 }
1991 while (1);
1992 aeq->aeq_head = head;
1993}
1994
1995static void nes_reset_link(struct nes_device *nesdev, u32 mac_index)
1996{
1997 struct nes_adapter *nesadapter = nesdev->nesadapter;
1998 u32 reset_value;
1999 u32 i=0;
2000 u32 u32temp;
2001
2002 if (nesadapter->hw_rev == NE020_REV) {
2003 return;
2004 }
2005 mh_detected++;
2006
2007 reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
2008
2009 if ((mac_index == 0) || ((mac_index == 1) && (nesadapter->OneG_Mode)))
2010 reset_value |= 0x0000001d;
2011 else
2012 reset_value |= 0x0000002d;
2013
2014 if (4 <= (nesadapter->link_interrupt_count[mac_index] / ((u16)NES_MAX_LINK_INTERRUPTS))) {
2015 if ((!nesadapter->OneG_Mode) && (nesadapter->port_count == 2)) {
2016 nesadapter->link_interrupt_count[0] = 0;
2017 nesadapter->link_interrupt_count[1] = 0;
2018 u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
2019 if (0x00000040 & u32temp)
2020 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
2021 else
2022 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
2023
2024 reset_value |= 0x0000003d;
2025 }
2026 nesadapter->link_interrupt_count[mac_index] = 0;
2027 }
2028
2029 nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
2030
2031 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
2032 & 0x00000040) != 0x00000040) && (i++ < 5000));
2033
2034 if (0x0000003d == (reset_value & 0x0000003d)) {
2035 u32 pcs_control_status0, pcs_control_status1;
2036
2037 for (i = 0; i < 10; i++) {
2038 pcs_control_status0 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0);
2039 pcs_control_status1 = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
2040 if (((0x0F000000 == (pcs_control_status0 & 0x0F000000))
2041 && (pcs_control_status0 & 0x00100000))
2042 || ((0x0F000000 == (pcs_control_status1 & 0x0F000000))
2043 && (pcs_control_status1 & 0x00100000)))
2044 continue;
2045 else
2046 break;
2047 }
2048 if (10 == i) {
2049 u32temp = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
2050 if (0x00000040 & u32temp)
2051 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088);
2052 else
2053 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F0C8);
2054
2055 nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value);
2056
2057 while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET)
2058 & 0x00000040) != 0x00000040) && (i++ < 5000));
2059 }
2060 }
2061}
2062
2063/**
2064 * nes_process_mac_intr
2065 */
2066void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2067{
2068 unsigned long flags;
2069 u32 pcs_control_status;
2070 struct nes_adapter *nesadapter = nesdev->nesadapter;
2071 struct nes_vnic *nesvnic;
2072 u32 mac_status;
2073 u32 mac_index = nesdev->mac_index;
2074 u32 u32temp;
2075 u16 phy_data;
2076 u16 temp_phy_data;
2077
2078 spin_lock_irqsave(&nesadapter->phy_lock, flags);
2079 if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) {
2080 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2081 return;
2082 }
2083 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_INTERRUPT;
2084 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2085
2086 /* ack the MAC interrupt */
2087 mac_status = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200));
2088 /* Clear the interrupt */
2089 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (mac_index * 0x200), mac_status);
2090
2091 nes_debug(NES_DBG_PHY, "MAC%u interrupt status = 0x%X.\n", mac_number, mac_status);
2092
2093 if (mac_status & (NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT)) {
2094 nesdev->link_status_interrupts++;
2095 if (0 == (++nesadapter->link_interrupt_count[mac_index] % ((u16)NES_MAX_LINK_INTERRUPTS))) {
2096 spin_lock_irqsave(&nesadapter->phy_lock, flags);
2097 nes_reset_link(nesdev, mac_index);
2098 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2099 }
2100 /* read the PHY interrupt status register */
2101 if (nesadapter->OneG_Mode) {
2102 do {
2103 nes_read_1G_phy_reg(nesdev, 0x1a,
2104 nesadapter->phy_index[mac_index], &phy_data);
2105 nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1a = 0x%X.\n",
2106 nesadapter->phy_index[mac_index], phy_data);
2107 } while (phy_data&0x8000);
2108
2109 temp_phy_data = 0;
2110 do {
2111 nes_read_1G_phy_reg(nesdev, 0x11,
2112 nesadapter->phy_index[mac_index], &phy_data);
2113 nes_debug(NES_DBG_PHY, "Phy%d data from register 0x11 = 0x%X.\n",
2114 nesadapter->phy_index[mac_index], phy_data);
2115 if (temp_phy_data == phy_data)
2116 break;
2117 temp_phy_data = phy_data;
2118 } while (1);
2119
2120 nes_read_1G_phy_reg(nesdev, 0x1e,
2121 nesadapter->phy_index[mac_index], &phy_data);
2122 nes_debug(NES_DBG_PHY, "Phy%d data from register 0x1e = 0x%X.\n",
2123 nesadapter->phy_index[mac_index], phy_data);
2124
2125 nes_read_1G_phy_reg(nesdev, 1,
2126 nesadapter->phy_index[mac_index], &phy_data);
2127 nes_debug(NES_DBG_PHY, "1G phy%u data from register 1 = 0x%X\n",
2128 nesadapter->phy_index[mac_index], phy_data);
2129
2130 if (temp_phy_data & 0x1000) {
2131 nes_debug(NES_DBG_PHY, "The Link is up according to the PHY\n");
2132 phy_data = 4;
2133 } else {
2134 nes_debug(NES_DBG_PHY, "The Link is down according to the PHY\n");
2135 }
2136 }
2137 nes_debug(NES_DBG_PHY, "Eth SERDES Common Status: 0=0x%08X, 1=0x%08X\n",
2138 nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0),
2139 nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0+0x200));
2140 pcs_control_status = nes_read_indexed(nesdev,
2141 NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200));
2142 pcs_control_status = nes_read_indexed(nesdev,
2143 NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200));
2144 nes_debug(NES_DBG_PHY, "PCS PHY Control/Status%u: 0x%08X\n",
2145 mac_index, pcs_control_status);
2146 if (nesadapter->OneG_Mode) {
2147 u32temp = 0x01010000;
2148 if (nesadapter->port_count > 2) {
2149 u32temp |= 0x02020000;
2150 }
2151 if ((pcs_control_status & u32temp)!= u32temp) {
2152 phy_data = 0;
2153 nes_debug(NES_DBG_PHY, "PCS says the link is down\n");
2154 }
2155 } else if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) {
2156 nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
2157 temp_phy_data = (u16)nes_read_indexed(nesdev,
2158 NES_IDX_MAC_MDIO_CONTROL);
2159 u32temp = 20;
2160 do {
2161 nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
2162 phy_data = (u16)nes_read_indexed(nesdev,
2163 NES_IDX_MAC_MDIO_CONTROL);
2164 if ((phy_data == temp_phy_data) || (!(--u32temp)))
2165 break;
2166 temp_phy_data = phy_data;
2167 } while (1);
2168 nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
2169 __FUNCTION__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
2170
2171 } else {
2172 phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0;
2173 }
2174
2175 if (phy_data & 0x0004) {
2176 nesadapter->mac_link_down[mac_index] = 0;
2177 list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
2178 nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n",
2179 nesvnic->linkup);
2180 if (nesvnic->linkup == 0) {
2181 printk(PFX "The Link is now up for port %u, netdev %p.\n",
2182 mac_index, nesvnic->netdev);
2183 if (netif_queue_stopped(nesvnic->netdev))
2184 netif_start_queue(nesvnic->netdev);
2185 nesvnic->linkup = 1;
2186 netif_carrier_on(nesvnic->netdev);
2187 }
2188 }
2189 } else {
2190 nesadapter->mac_link_down[mac_index] = 1;
2191 list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) {
2192 nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n",
2193 nesvnic->linkup);
2194 if (nesvnic->linkup == 1) {
2195 printk(PFX "The Link is now down for port %u, netdev %p.\n",
2196 mac_index, nesvnic->netdev);
2197 if (!(netif_queue_stopped(nesvnic->netdev)))
2198 netif_stop_queue(nesvnic->netdev);
2199 nesvnic->linkup = 0;
2200 netif_carrier_off(nesvnic->netdev);
2201 }
2202 }
2203 }
2204 }
2205
2206 nesadapter->mac_sw_state[mac_number] = NES_MAC_SW_IDLE;
2207}
2208
2209
2210
2211void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2212{
2213 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
2214
2215 netif_rx_schedule(nesdev->netdev[nesvnic->netdev_index], &nesvnic->napi);
2216}
2217
2218
2219/* The MAX_RQES_TO_PROCESS defines how many max read requests to complete before
2220* getting out of nic_ce_handler
2221*/
2222#define MAX_RQES_TO_PROCESS 384
2223
2224/**
2225 * nes_nic_ce_handler
2226 */
2227void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
2228{
2229 u64 u64temp;
2230 dma_addr_t bus_address;
2231 struct nes_hw_nic *nesnic;
2232 struct nes_vnic *nesvnic = container_of(cq, struct nes_vnic, nic_cq);
2233 struct nes_adapter *nesadapter = nesdev->nesadapter;
2234 struct nes_hw_nic_rq_wqe *nic_rqe;
2235 struct nes_hw_nic_sq_wqe *nic_sqe;
2236 struct sk_buff *skb;
2237 struct sk_buff *rx_skb;
2238 __le16 *wqe_fragment_length;
2239 u32 head;
2240 u32 cq_size;
2241 u32 rx_pkt_size;
2242 u32 cqe_count=0;
2243 u32 cqe_errv;
2244 u32 cqe_misc;
2245 u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
2246 u16 vlan_tag;
2247 u16 pkt_type;
2248 u16 rqes_processed = 0;
2249 u8 sq_cqes = 0;
2250
2251 head = cq->cq_head;
2252 cq_size = cq->cq_size;
2253 cq->cqes_pending = 1;
2254 do {
2255 if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
2256 NES_NIC_CQE_VALID) {
2257 nesnic = &nesvnic->nic;
2258 cqe_misc = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]);
2259 if (cqe_misc & NES_NIC_CQE_SQ) {
2260 sq_cqes++;
2261 wqe_fragment_index = 1;
2262 nic_sqe = &nesnic->sq_vbase[nesnic->sq_tail];
2263 skb = nesnic->tx_skb[nesnic->sq_tail];
2264 wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
2265 /* bump past the vlan tag */
2266 wqe_fragment_length++;
2267 if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
2268 u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]);
2269 u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32;
2270 bus_address = (dma_addr_t)u64temp;
2271 if (test_and_clear_bit(nesnic->sq_tail, nesnic->first_frag_overflow)) {
2272 pci_unmap_single(nesdev->pcidev,
2273 bus_address,
2274 le16_to_cpu(wqe_fragment_length[wqe_fragment_index++]),
2275 PCI_DMA_TODEVICE);
2276 }
2277 for (; wqe_fragment_index < 5; wqe_fragment_index++) {
2278 if (wqe_fragment_length[wqe_fragment_index]) {
2279 u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]);
2280 u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32;
2281 bus_address = (dma_addr_t)u64temp;
2282 pci_unmap_page(nesdev->pcidev,
2283 bus_address,
2284 le16_to_cpu(wqe_fragment_length[wqe_fragment_index]),
2285 PCI_DMA_TODEVICE);
2286 } else
2287 break;
2288 }
2289 if (skb)
2290 dev_kfree_skb_any(skb);
2291 }
2292 nesnic->sq_tail++;
2293 nesnic->sq_tail &= nesnic->sq_size-1;
2294 if (sq_cqes > 128) {
2295 barrier();
2296 /* restart the queue if it had been stopped */
2297 if (netif_queue_stopped(nesvnic->netdev))
2298 netif_wake_queue(nesvnic->netdev);
2299 sq_cqes = 0;
2300 }
2301 } else {
2302 rqes_processed ++;
2303
2304 cq->rx_cqes_completed++;
2305 cq->rx_pkts_indicated++;
2306 rx_pkt_size = cqe_misc & 0x0000ffff;
2307 nic_rqe = &nesnic->rq_vbase[nesnic->rq_tail];
2308 /* Get the skb */
2309 rx_skb = nesnic->rx_skb[nesnic->rq_tail];
2310 nic_rqe = &nesnic->rq_vbase[nesvnic->nic.rq_tail];
2311 bus_address = (dma_addr_t)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
2312 bus_address += ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
2313 pci_unmap_single(nesdev->pcidev, bus_address,
2314 nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
2315 /* rx_skb->tail = rx_skb->data + rx_pkt_size; */
2316 /* rx_skb->len = rx_pkt_size; */
2317 rx_skb->len = 0; /* TODO: see if this is necessary */
2318 skb_put(rx_skb, rx_pkt_size);
2319 rx_skb->protocol = eth_type_trans(rx_skb, nesvnic->netdev);
2320 nesnic->rq_tail++;
2321 nesnic->rq_tail &= nesnic->rq_size - 1;
2322
2323 atomic_inc(&nesvnic->rx_skbs_needed);
2324 if (atomic_read(&nesvnic->rx_skbs_needed) > (nesvnic->nic.rq_size>>1)) {
2325 nes_write32(nesdev->regs+NES_CQE_ALLOC,
2326 cq->cq_number | (cqe_count << 16));
2327// nesadapter->tune_timer.cq_count += cqe_count;
2328 nesdev->currcq_count += cqe_count;
2329 cqe_count = 0;
2330 nes_replenish_nic_rq(nesvnic);
2331 }
2332 pkt_type = (u16)(le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX]));
2333 cqe_errv = (cqe_misc & NES_NIC_CQE_ERRV_MASK) >> NES_NIC_CQE_ERRV_SHIFT;
2334 rx_skb->ip_summed = CHECKSUM_NONE;
2335
2336 if ((NES_PKT_TYPE_TCPV4_BITS == (pkt_type & NES_PKT_TYPE_TCPV4_MASK)) ||
2337 (NES_PKT_TYPE_UDPV4_BITS == (pkt_type & NES_PKT_TYPE_UDPV4_MASK))) {
2338 if ((cqe_errv &
2339 (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR |
2340 NES_NIC_ERRV_BITS_IPH_ERR | NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
2341 if (nesvnic->rx_checksum_disabled == 0) {
2342 rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
2343 }
2344 } else
2345 nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet."
2346 " errv = 0x%X, pkt_type = 0x%X.\n",
2347 nesvnic->netdev->name, cqe_errv, pkt_type);
2348
2349 } else if ((pkt_type & NES_PKT_TYPE_IPV4_MASK) == NES_PKT_TYPE_IPV4_BITS) {
2350 if ((cqe_errv &
2351 (NES_NIC_ERRV_BITS_IPV4_CSUM_ERR | NES_NIC_ERRV_BITS_IPH_ERR |
2352 NES_NIC_ERRV_BITS_WQE_OVERRUN)) == 0) {
2353 if (nesvnic->rx_checksum_disabled == 0) {
2354 rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
2355 /* nes_debug(NES_DBG_CQ, "%s: Reporting successfully checksummed IPv4 packet.\n",
2356 nesvnic->netdev->name); */
2357 }
2358 } else
2359 nes_debug(NES_DBG_CQ, "%s: unsuccessfully checksummed TCP or UDP packet."
2360 " errv = 0x%X, pkt_type = 0x%X.\n",
2361 nesvnic->netdev->name, cqe_errv, pkt_type);
2362 }
2363 /* nes_debug(NES_DBG_CQ, "pkt_type=%x, APBVT_MASK=%x\n",
2364 pkt_type, (pkt_type & NES_PKT_TYPE_APBVT_MASK)); */
2365
2366 if ((pkt_type & NES_PKT_TYPE_APBVT_MASK) == NES_PKT_TYPE_APBVT_BITS) {
2367 nes_cm_recv(rx_skb, nesvnic->netdev);
2368 } else {
2369 if ((cqe_misc & NES_NIC_CQE_TAG_VALID) && (nesvnic->vlan_grp != NULL)) {
2370 vlan_tag = (u16)(le32_to_cpu(
2371 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_TAG_PKT_TYPE_IDX])
2372 >> 16);
2373 nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
2374 nesvnic->netdev->name, vlan_tag);
2375 nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag);
2376 } else {
2377 nes_netif_rx(rx_skb);
2378 }
2379 }
2380
2381 nesvnic->netdev->last_rx = jiffies;
2382 /* nesvnic->netstats.rx_packets++; */
2383 /* nesvnic->netstats.rx_bytes += rx_pkt_size; */
2384 }
2385
2386 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX] = 0;
2387 /* Accounting... */
2388 cqe_count++;
2389 if (++head >= cq_size)
2390 head = 0;
2391 if (cqe_count == 255) {
2392 /* Replenish Nic CQ */
2393 nes_write32(nesdev->regs+NES_CQE_ALLOC,
2394 cq->cq_number | (cqe_count << 16));
2395// nesdev->nesadapter->tune_timer.cq_count += cqe_count;
2396 nesdev->currcq_count += cqe_count;
2397 cqe_count = 0;
2398 }
2399
2400 if (cq->rx_cqes_completed >= nesvnic->budget)
2401 break;
2402 } else {
2403 cq->cqes_pending = 0;
2404 break;
2405 }
2406
2407 } while (1);
2408
2409 if (sq_cqes) {
2410 barrier();
2411 /* restart the queue if it had been stopped */
2412 if (netif_queue_stopped(nesvnic->netdev))
2413 netif_wake_queue(nesvnic->netdev);
2414 }
2415
2416 cq->cq_head = head;
2417 /* nes_debug(NES_DBG_CQ, "CQ%u Processed = %u cqes, new head = %u.\n",
2418 cq->cq_number, cqe_count, cq->cq_head); */
2419 cq->cqe_allocs_pending = cqe_count;
2420 if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
2421 {
2422// nesdev->nesadapter->tune_timer.cq_count += cqe_count;
2423 nesdev->currcq_count += cqe_count;
2424 nes_nic_tune_timer(nesdev);
2425 }
2426 if (atomic_read(&nesvnic->rx_skbs_needed))
2427 nes_replenish_nic_rq(nesvnic);
2428 }
2429
2430
2431/**
2432 * nes_cqp_ce_handler
2433 */
2434void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
2435{
2436 u64 u64temp;
2437 unsigned long flags;
2438 struct nes_hw_cqp *cqp = NULL;
2439 struct nes_cqp_request *cqp_request;
2440 struct nes_hw_cqp_wqe *cqp_wqe;
2441 u32 head;
2442 u32 cq_size;
2443 u32 cqe_count=0;
2444 u32 error_code;
2445 /* u32 counter; */
2446
2447 head = cq->cq_head;
2448 cq_size = cq->cq_size;
2449
2450 do {
2451 /* process the CQE */
2452 /* nes_debug(NES_DBG_CQP, "head=%u cqe_words=%08X\n", head,
2453 le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])); */
2454
2455 if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
2456 u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head].
2457 cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
2458 ((u64)(le32_to_cpu(cq->cq_vbase[head].
2459 cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX])));
2460 cqp = *((struct nes_hw_cqp **)&u64temp);
2461
2462 error_code = le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX]);
2463 if (error_code) {
2464 nes_debug(NES_DBG_CQP, "Bad Completion code for opcode 0x%02X from CQP,"
2465 " Major/Minor codes = 0x%04X:%04X.\n",
2466 le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX])&0x3f,
2467 (u16)(error_code >> 16),
2468 (u16)error_code);
2469 nes_debug(NES_DBG_CQP, "cqp: qp_id=%u, sq_head=%u, sq_tail=%u\n",
2470 cqp->qp_id, cqp->sq_head, cqp->sq_tail);
2471 }
2472
2473 u64temp = (((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
2474 wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX])))<<32) |
2475 ((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
2476 wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX])));
2477 cqp_request = *((struct nes_cqp_request **)&u64temp);
2478 if (cqp_request) {
2479 if (cqp_request->waiting) {
2480 /* nes_debug(NES_DBG_CQP, "%s: Waking up requestor\n"); */
2481 cqp_request->major_code = (u16)(error_code >> 16);
2482 cqp_request->minor_code = (u16)error_code;
2483 barrier();
2484 cqp_request->request_done = 1;
2485 wake_up(&cqp_request->waitq);
2486 if (atomic_dec_and_test(&cqp_request->refcount)) {
2487 nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
2488 cqp_request,
2489 le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f);
2490 if (cqp_request->dynamic) {
2491 kfree(cqp_request);
2492 } else {
2493 spin_lock_irqsave(&nesdev->cqp.lock, flags);
2494 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2495 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
2496 }
2497 }
2498 } else if (cqp_request->callback) {
2499 /* Envoke the callback routine */
2500 cqp_request->cqp_callback(nesdev, cqp_request);
2501 if (cqp_request->dynamic) {
2502 kfree(cqp_request);
2503 } else {
2504 spin_lock_irqsave(&nesdev->cqp.lock, flags);
2505 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2506 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
2507 }
2508 } else {
2509 nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
2510 cqp_request,
2511 le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f);
2512 if (cqp_request->dynamic) {
2513 kfree(cqp_request);
2514 } else {
2515 spin_lock_irqsave(&nesdev->cqp.lock, flags);
2516 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2517 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
2518 }
2519 }
2520 } else {
2521 wake_up(&nesdev->cqp.waitq);
2522 }
2523
2524 cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
2525 nes_write32(nesdev->regs+NES_CQE_ALLOC, cq->cq_number | (1 << 16));
2526 if (++cqp->sq_tail >= cqp->sq_size)
2527 cqp->sq_tail = 0;
2528
2529 /* Accounting... */
2530 cqe_count++;
2531 if (++head >= cq_size)
2532 head = 0;
2533 } else {
2534 break;
2535 }
2536 } while (1);
2537 cq->cq_head = head;
2538
2539 spin_lock_irqsave(&nesdev->cqp.lock, flags);
2540 while ((!list_empty(&nesdev->cqp_pending_reqs)) &&
2541 ((((nesdev->cqp.sq_tail+nesdev->cqp.sq_size)-nesdev->cqp.sq_head) &
2542 (nesdev->cqp.sq_size - 1)) != 1)) {
2543 cqp_request = list_entry(nesdev->cqp_pending_reqs.next,
2544 struct nes_cqp_request, list);
2545 list_del_init(&cqp_request->list);
2546 head = nesdev->cqp.sq_head++;
2547 nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
2548 cqp_wqe = &nesdev->cqp.sq_vbase[head];
2549 memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
2550 barrier();
2551 cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX] =
2552 cpu_to_le32((u32)((unsigned long)cqp_request));
2553 cqp_wqe->wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX] =
2554 cpu_to_le32((u32)(upper_32_bits((unsigned long)cqp_request)));
2555 nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) put on CQPs SQ wqe%u.\n",
2556 cqp_request, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f, head);
2557 /* Ring doorbell (1 WQEs) */
2558 barrier();
2559 nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
2560 }
2561 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
2562
2563 /* Arm the CCQ */
2564 nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
2565 cq->cq_number);
2566 nes_read32(nesdev->regs+NES_CQE_ALLOC);
2567}
2568
2569
2570/**
2571 * nes_process_iwarp_aeqe
2572 */
2573void nes_process_iwarp_aeqe(struct nes_device *nesdev, struct nes_hw_aeqe *aeqe)
2574{
2575 u64 context;
2576 u64 aeqe_context = 0;
2577 unsigned long flags;
2578 struct nes_qp *nesqp;
2579 int resource_allocated;
2580 /* struct iw_cm_id *cm_id; */
2581 struct nes_adapter *nesadapter = nesdev->nesadapter;
2582 struct ib_event ibevent;
2583 /* struct iw_cm_event cm_event; */
2584 u32 aeq_info;
2585 u32 next_iwarp_state = 0;
2586 u16 async_event_id;
2587 u8 tcp_state;
2588 u8 iwarp_state;
2589
2590 nes_debug(NES_DBG_AEQ, "\n");
2591 aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
2592 if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
2593 context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2594 context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2595 } else {
2596 aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
2597 aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
2598 context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
2599 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
2600 BUG_ON(!context);
2601 }
2602
2603 async_event_id = (u16)aeq_info;
2604 tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
2605 iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
2606 nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
2607 " Tcp state = %s, iWARP state = %s\n",
2608 async_event_id,
2609 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
2610 nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
2611
2612
2613 switch (async_event_id) {
2614 case NES_AEQE_AEID_LLP_FIN_RECEIVED:
2615 nesqp = *((struct nes_qp **)&context);
2616 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
2617 nesqp->cm_id->add_ref(nesqp->cm_id);
2618 nes_add_ref(&nesqp->ibqp);
2619 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
2620 NES_TIMER_TYPE_CLOSE, 1, 0);
2621 nes_debug(NES_DBG_AEQ, "QP%u Not decrementing QP refcount (%d),"
2622 " need ae to finish up, original_last_aeq = 0x%04X."
2623 " last_aeq = 0x%04X, scheduling timer. TCP state = %d\n",
2624 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
2625 async_event_id, nesqp->last_aeq, tcp_state);
2626 }
2627 if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2628 (nesqp->ibqp_state != IB_QPS_RTS)) {
2629 /* FIN Received but tcp state or IB state moved on,
2630 should expect a close complete */
2631 return;
2632 }
2633 case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
2634 case NES_AEQE_AEID_LLP_CONNECTION_RESET:
2635 case NES_AEQE_AEID_TERMINATE_SENT:
2636 case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
2637 case NES_AEQE_AEID_RESET_SENT:
2638 nesqp = *((struct nes_qp **)&context);
2639 if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
2640 tcp_state = NES_AEQE_TCP_STATE_CLOSED;
2641 }
2642 nes_add_ref(&nesqp->ibqp);
2643 spin_lock_irqsave(&nesqp->lock, flags);
2644 nesqp->hw_iwarp_state = iwarp_state;
2645 nesqp->hw_tcp_state = tcp_state;
2646 nesqp->last_aeq = async_event_id;
2647
2648 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
2649 (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
2650 nesqp->hte_added = 0;
2651 spin_unlock_irqrestore(&nesqp->lock, flags);
2652 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
2653 nesqp->hwqp.qp_id);
2654 nes_hw_modify_qp(nesdev, nesqp,
2655 NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
2656 spin_lock_irqsave(&nesqp->lock, flags);
2657 }
2658
2659 if ((nesqp->ibqp_state == IB_QPS_RTS) &&
2660 ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2661 (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2662 switch (nesqp->hw_iwarp_state) {
2663 case NES_AEQE_IWARP_STATE_RTS:
2664 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
2665 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
2666 break;
2667 case NES_AEQE_IWARP_STATE_TERMINATE:
2668 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
2669 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
2670 if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
2671 next_iwarp_state |= 0x02000000;
2672 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
2673 }
2674 break;
2675 default:
2676 next_iwarp_state = 0;
2677 }
2678 spin_unlock_irqrestore(&nesqp->lock, flags);
2679 if (next_iwarp_state) {
2680 nes_add_ref(&nesqp->ibqp);
2681 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
2682 " also added another reference\n",
2683 nesqp->hwqp.qp_id, next_iwarp_state);
2684 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
2685 }
2686 nes_cm_disconn(nesqp);
2687 } else {
2688 if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
2689 /* FIN Received but ib state not RTS,
2690 close complete will be on its way */
2691 spin_unlock_irqrestore(&nesqp->lock, flags);
2692 nes_rem_ref(&nesqp->ibqp);
2693 return;
2694 }
2695 spin_unlock_irqrestore(&nesqp->lock, flags);
2696 if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
2697 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
2698 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
2699 nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
2700 " also added another reference\n",
2701 nesqp->hwqp.qp_id, next_iwarp_state);
2702 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
2703 }
2704 nes_cm_disconn(nesqp);
2705 }
2706 break;
2707 case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
2708 nesqp = *((struct nes_qp **)&context);
2709 spin_lock_irqsave(&nesqp->lock, flags);
2710 nesqp->hw_iwarp_state = iwarp_state;
2711 nesqp->hw_tcp_state = tcp_state;
2712 nesqp->last_aeq = async_event_id;
2713 spin_unlock_irqrestore(&nesqp->lock, flags);
2714 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED"
2715 " event on QP%u \n Q2 Data:\n",
2716 nesqp->hwqp.qp_id);
2717 if (nesqp->ibqp.event_handler) {
2718 ibevent.device = nesqp->ibqp.device;
2719 ibevent.element.qp = &nesqp->ibqp;
2720 ibevent.event = IB_EVENT_QP_FATAL;
2721 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2722 }
2723 if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
2724 ((nesqp->ibqp_state == IB_QPS_RTS)&&
2725 (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2726 nes_add_ref(&nesqp->ibqp);
2727 nes_cm_disconn(nesqp);
2728 } else {
2729 nesqp->in_disconnect = 0;
2730 wake_up(&nesqp->kick_waitq);
2731 }
2732 break;
2733 case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
2734 nesqp = *((struct nes_qp **)&context);
2735 nes_add_ref(&nesqp->ibqp);
2736 spin_lock_irqsave(&nesqp->lock, flags);
2737 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
2738 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
2739 nesqp->last_aeq = async_event_id;
2740 if (nesqp->cm_id) {
2741 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
2742 " event on QP%u, remote IP = 0x%08X \n",
2743 nesqp->hwqp.qp_id,
2744 ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
2745 } else {
2746 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
2747 " event on QP%u \n",
2748 nesqp->hwqp.qp_id);
2749 }
2750 spin_unlock_irqrestore(&nesqp->lock, flags);
2751 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
2752 nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
2753 if (nesqp->ibqp.event_handler) {
2754 ibevent.device = nesqp->ibqp.device;
2755 ibevent.element.qp = &nesqp->ibqp;
2756 ibevent.event = IB_EVENT_QP_FATAL;
2757 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2758 }
2759 break;
2760 case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
2761 if (NES_AEQE_INBOUND_RDMA&aeq_info) {
2762 nesqp = nesadapter->qp_table[le32_to_cpu(
2763 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
2764 } else {
2765 /* TODO: get the actual WQE and mask off wqe index */
2766 context &= ~((u64)511);
2767 nesqp = *((struct nes_qp **)&context);
2768 }
2769 spin_lock_irqsave(&nesqp->lock, flags);
2770 nesqp->hw_iwarp_state = iwarp_state;
2771 nesqp->hw_tcp_state = tcp_state;
2772 nesqp->last_aeq = async_event_id;
2773 spin_unlock_irqrestore(&nesqp->lock, flags);
2774 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
2775 nesqp->hwqp.qp_id);
2776 if (nesqp->ibqp.event_handler) {
2777 ibevent.device = nesqp->ibqp.device;
2778 ibevent.element.qp = &nesqp->ibqp;
2779 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2780 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2781 }
2782 break;
2783 case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
2784 nesqp = *((struct nes_qp **)&context);
2785 spin_lock_irqsave(&nesqp->lock, flags);
2786 nesqp->hw_iwarp_state = iwarp_state;
2787 nesqp->hw_tcp_state = tcp_state;
2788 nesqp->last_aeq = async_event_id;
2789 spin_unlock_irqrestore(&nesqp->lock, flags);
2790 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
2791 nesqp->hwqp.qp_id);
2792 if (nesqp->ibqp.event_handler) {
2793 ibevent.device = nesqp->ibqp.device;
2794 ibevent.element.qp = &nesqp->ibqp;
2795 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2796 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2797 }
2798 break;
2799 case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
2800 nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words
2801 [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
2802 spin_lock_irqsave(&nesqp->lock, flags);
2803 nesqp->hw_iwarp_state = iwarp_state;
2804 nesqp->hw_tcp_state = tcp_state;
2805 nesqp->last_aeq = async_event_id;
2806 spin_unlock_irqrestore(&nesqp->lock, flags);
2807 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u,"
2808 " nesqp = %p, AE reported %p\n",
2809 nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context));
2810 if (nesqp->ibqp.event_handler) {
2811 ibevent.device = nesqp->ibqp.device;
2812 ibevent.element.qp = &nesqp->ibqp;
2813 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2814 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2815 }
2816 break;
2817 case NES_AEQE_AEID_CQ_OPERATION_ERROR:
2818 context <<= 1;
2819 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
2820 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), (void *)(unsigned long)context);
2821 resource_allocated = nes_is_resource_allocated(nesadapter, nesadapter->allocated_cqs,
2822 le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
2823 if (resource_allocated) {
2824 printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
2825 __FUNCTION__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
2826 }
2827 break;
2828 case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
2829 nesqp = nesadapter->qp_table[le32_to_cpu(
2830 aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
2831 spin_lock_irqsave(&nesqp->lock, flags);
2832 nesqp->hw_iwarp_state = iwarp_state;
2833 nesqp->hw_tcp_state = tcp_state;
2834 nesqp->last_aeq = async_event_id;
2835 spin_unlock_irqrestore(&nesqp->lock, flags);
2836 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
2837 "_FOR_AVAILABLE_BUFFER event on QP%u\n",
2838 nesqp->hwqp.qp_id);
2839 if (nesqp->ibqp.event_handler) {
2840 ibevent.device = nesqp->ibqp.device;
2841 ibevent.element.qp = &nesqp->ibqp;
2842 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2843 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2844 }
2845 /* tell cm to disconnect, cm will queue work to thread */
2846 nes_add_ref(&nesqp->ibqp);
2847 nes_cm_disconn(nesqp);
2848 break;
2849 case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
2850 nesqp = *((struct nes_qp **)&context);
2851 spin_lock_irqsave(&nesqp->lock, flags);
2852 nesqp->hw_iwarp_state = iwarp_state;
2853 nesqp->hw_tcp_state = tcp_state;
2854 nesqp->last_aeq = async_event_id;
2855 spin_unlock_irqrestore(&nesqp->lock, flags);
2856 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
2857 "_NO_BUFFER_AVAILABLE event on QP%u\n",
2858 nesqp->hwqp.qp_id);
2859 if (nesqp->ibqp.event_handler) {
2860 ibevent.device = nesqp->ibqp.device;
2861 ibevent.element.qp = &nesqp->ibqp;
2862 ibevent.event = IB_EVENT_QP_FATAL;
2863 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2864 }
2865 /* tell cm to disconnect, cm will queue work to thread */
2866 nes_add_ref(&nesqp->ibqp);
2867 nes_cm_disconn(nesqp);
2868 break;
2869 case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
2870 nesqp = *((struct nes_qp **)&context);
2871 spin_lock_irqsave(&nesqp->lock, flags);
2872 nesqp->hw_iwarp_state = iwarp_state;
2873 nesqp->hw_tcp_state = tcp_state;
2874 nesqp->last_aeq = async_event_id;
2875 spin_unlock_irqrestore(&nesqp->lock, flags);
2876 nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
2877 " event on QP%u \n Q2 Data:\n",
2878 nesqp->hwqp.qp_id);
2879 if (nesqp->ibqp.event_handler) {
2880 ibevent.device = nesqp->ibqp.device;
2881 ibevent.element.qp = &nesqp->ibqp;
2882 ibevent.event = IB_EVENT_QP_FATAL;
2883 nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
2884 }
2885 /* tell cm to disconnect, cm will queue work to thread */
2886 nes_add_ref(&nesqp->ibqp);
2887 nes_cm_disconn(nesqp);
2888 break;
2889 /* TODO: additional AEs need to be here */
2890 default:
2891 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
2892 async_event_id);
2893 break;
2894 }
2895
2896}
2897
2898
2899/**
2900 * nes_iwarp_ce_handler
2901 */
2902void nes_iwarp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *hw_cq)
2903{
2904 struct nes_cq *nescq = container_of(hw_cq, struct nes_cq, hw_cq);
2905
2906 /* nes_debug(NES_DBG_CQ, "Processing completion event for iWARP CQ%u.\n",
2907 nescq->hw_cq.cq_number); */
2908 nes_write32(nesdev->regs+NES_CQ_ACK, nescq->hw_cq.cq_number);
2909
2910 if (nescq->ibcq.comp_handler)
2911 nescq->ibcq.comp_handler(&nescq->ibcq, nescq->ibcq.cq_context);
2912
2913 return;
2914}
2915
2916
2917/**
2918 * nes_manage_apbvt()
2919 */
2920int nes_manage_apbvt(struct nes_vnic *nesvnic, u32 accel_local_port,
2921 u32 nic_index, u32 add_port)
2922{
2923 struct nes_device *nesdev = nesvnic->nesdev;
2924 struct nes_hw_cqp_wqe *cqp_wqe;
2925 unsigned long flags;
2926 struct nes_cqp_request *cqp_request;
2927 int ret = 0;
2928 u16 major_code;
2929
2930 /* Send manage APBVT request to CQP */
2931 cqp_request = nes_get_cqp_request(nesdev);
2932 if (cqp_request == NULL) {
2933 nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
2934 return -ENOMEM;
2935 }
2936 cqp_request->waiting = 1;
2937 cqp_wqe = &cqp_request->cqp_wqe;
2938
2939 nes_debug(NES_DBG_QP, "%s APBV for local port=%u(0x%04x), nic_index=%u\n",
2940 (add_port == NES_MANAGE_APBVT_ADD) ? "ADD" : "DEL",
2941 accel_local_port, accel_local_port, nic_index);
2942
2943 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
2944 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, (NES_CQP_MANAGE_APBVT |
2945 ((add_port == NES_MANAGE_APBVT_ADD) ? NES_CQP_APBVT_ADD : 0)));
2946 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
2947 ((nic_index << NES_CQP_APBVT_NIC_SHIFT) | accel_local_port));
2948
2949 nes_debug(NES_DBG_QP, "Waiting for CQP completion for APBVT.\n");
2950
2951 atomic_set(&cqp_request->refcount, 2);
2952 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
2953
2954 if (add_port == NES_MANAGE_APBVT_ADD)
2955 ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
2956 NES_EVENT_TIMEOUT);
2957 nes_debug(NES_DBG_QP, "Completed, ret=%u, CQP Major:Minor codes = 0x%04X:0x%04X\n",
2958 ret, cqp_request->major_code, cqp_request->minor_code);
2959 major_code = cqp_request->major_code;
2960 if (atomic_dec_and_test(&cqp_request->refcount)) {
2961 if (cqp_request->dynamic) {
2962 kfree(cqp_request);
2963 } else {
2964 spin_lock_irqsave(&nesdev->cqp.lock, flags);
2965 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2966 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
2967 }
2968 }
2969 if (!ret)
2970 return -ETIME;
2971 else if (major_code)
2972 return -EIO;
2973 else
2974 return 0;
2975}
2976
2977
2978/**
2979 * nes_manage_arp_cache
2980 */
2981void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
2982 u32 ip_addr, u32 action)
2983{
2984 struct nes_hw_cqp_wqe *cqp_wqe;
2985 struct nes_vnic *nesvnic = netdev_priv(netdev);
2986 struct nes_device *nesdev;
2987 struct nes_cqp_request *cqp_request;
2988 int arp_index;
2989
2990 nesdev = nesvnic->nesdev;
2991 arp_index = nes_arp_table(nesdev, ip_addr, mac_addr, action);
2992 if (arp_index == -1) {
2993 return;
2994 }
2995
2996 /* update the ARP entry */
2997 cqp_request = nes_get_cqp_request(nesdev);
2998 if (cqp_request == NULL) {
2999 nes_debug(NES_DBG_NETDEV, "Failed to get a cqp_request.\n");
3000 return;
3001 }
3002 cqp_request->waiting = 0;
3003 cqp_wqe = &cqp_request->cqp_wqe;
3004 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
3005
3006 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(
3007 NES_CQP_MANAGE_ARP_CACHE | NES_CQP_ARP_PERM);
3008 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(
3009 (u32)PCI_FUNC(nesdev->pcidev->devfn) << NES_CQP_ARP_AEQ_INDEX_SHIFT);
3010 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(arp_index);
3011
3012 if (action == NES_ARP_ADD) {
3013 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_ARP_VALID);
3014 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = cpu_to_le32(
3015 (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
3016 (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
3017 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
3018 (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
3019 } else {
3020 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = 0;
3021 cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = 0;
3022 }
3023
3024 nes_debug(NES_DBG_NETDEV, "Not waiting for CQP, cqp.sq_head=%u, cqp.sq_tail=%u\n",
3025 nesdev->cqp.sq_head, nesdev->cqp.sq_tail);
3026
3027 atomic_set(&cqp_request->refcount, 1);
3028 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
3029}
3030
3031
3032/**
3033 * flush_wqes
3034 */
3035void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
3036 u32 which_wq, u32 wait_completion)
3037{
3038 unsigned long flags;
3039 struct nes_cqp_request *cqp_request;
3040 struct nes_hw_cqp_wqe *cqp_wqe;
3041 int ret;
3042
3043 cqp_request = nes_get_cqp_request(nesdev);
3044 if (cqp_request == NULL) {
3045 nes_debug(NES_DBG_QP, "Failed to get a cqp_request.\n");
3046 return;
3047 }
3048 if (wait_completion) {
3049 cqp_request->waiting = 1;
3050 atomic_set(&cqp_request->refcount, 2);
3051 } else {
3052 cqp_request->waiting = 0;
3053 }
3054 cqp_wqe = &cqp_request->cqp_wqe;
3055 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
3056
3057 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
3058 cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
3059 cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
3060
3061 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
3062
3063 if (wait_completion) {
3064 /* Wait for CQP */
3065 ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
3066 NES_EVENT_TIMEOUT);
3067 nes_debug(NES_DBG_QP, "Flush SQ QP WQEs completed, ret=%u,"
3068 " CQP Major:Minor codes = 0x%04X:0x%04X\n",
3069 ret, cqp_request->major_code, cqp_request->minor_code);
3070 if (atomic_dec_and_test(&cqp_request->refcount)) {
3071 if (cqp_request->dynamic) {
3072 kfree(cqp_request);
3073 } else {
3074 spin_lock_irqsave(&nesdev->cqp.lock, flags);
3075 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
3076 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
3077 }
3078 }
3079 }
3080}
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
new file mode 100644
index 000000000000..1e10df550c9e
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -0,0 +1,1206 @@
1/*
2* Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3*
4* This software is available to you under a choice of one of two
5* licenses. You may choose to be licensed under the terms of the GNU
6* General Public License (GPL) Version 2, available from the file
7* COPYING in the main directory of this source tree, or the
8* OpenIB.org BSD license below:
9*
10* Redistribution and use in source and binary forms, with or
11* without modification, are permitted provided that the following
12* conditions are met:
13*
14* - Redistributions of source code must retain the above
15* copyright notice, this list of conditions and the following
16* disclaimer.
17*
18* - Redistributions in binary form must reproduce the above
19* copyright notice, this list of conditions and the following
20* disclaimer in the documentation and/or other materials
21* provided with the distribution.
22*
23* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30* SOFTWARE.
31*/
32
33#ifndef __NES_HW_H
34#define __NES_HW_H
35
36#define NES_PHY_TYPE_1G 2
37#define NES_PHY_TYPE_IRIS 3
38#define NES_PHY_TYPE_PUMA_10G 6
39
40#define NES_MULTICAST_PF_MAX 8
41
42enum pci_regs {
43 NES_INT_STAT = 0x0000,
44 NES_INT_MASK = 0x0004,
45 NES_INT_PENDING = 0x0008,
46 NES_INTF_INT_STAT = 0x000C,
47 NES_INTF_INT_MASK = 0x0010,
48 NES_TIMER_STAT = 0x0014,
49 NES_PERIODIC_CONTROL = 0x0018,
50 NES_ONE_SHOT_CONTROL = 0x001C,
51 NES_EEPROM_COMMAND = 0x0020,
52 NES_EEPROM_DATA = 0x0024,
53 NES_FLASH_COMMAND = 0x0028,
54 NES_FLASH_DATA = 0x002C,
55 NES_SOFTWARE_RESET = 0x0030,
56 NES_CQ_ACK = 0x0034,
57 NES_WQE_ALLOC = 0x0040,
58 NES_CQE_ALLOC = 0x0044,
59};
60
61enum indexed_regs {
62 NES_IDX_CREATE_CQP_LOW = 0x0000,
63 NES_IDX_CREATE_CQP_HIGH = 0x0004,
64 NES_IDX_QP_CONTROL = 0x0040,
65 NES_IDX_FLM_CONTROL = 0x0080,
66 NES_IDX_INT_CPU_STATUS = 0x00a0,
67 NES_IDX_GPIO_CONTROL = 0x00f0,
68 NES_IDX_GPIO_DATA = 0x00f4,
69 NES_IDX_TCP_CONFIG0 = 0x01e4,
70 NES_IDX_TCP_TIMER_CONFIG = 0x01ec,
71 NES_IDX_TCP_NOW = 0x01f0,
72 NES_IDX_QP_MAX_CFG_SIZES = 0x0200,
73 NES_IDX_QP_CTX_SIZE = 0x0218,
74 NES_IDX_TCP_TIMER_SIZE0 = 0x0238,
75 NES_IDX_TCP_TIMER_SIZE1 = 0x0240,
76 NES_IDX_ARP_CACHE_SIZE = 0x0258,
77 NES_IDX_CQ_CTX_SIZE = 0x0260,
78 NES_IDX_MRT_SIZE = 0x0278,
79 NES_IDX_PBL_REGION_SIZE = 0x0280,
80 NES_IDX_IRRQ_COUNT = 0x02b0,
81 NES_IDX_RX_WINDOW_BUFFER_PAGE_TABLE_SIZE = 0x02f0,
82 NES_IDX_RX_WINDOW_BUFFER_SIZE = 0x0300,
83 NES_IDX_DST_IP_ADDR = 0x0400,
84 NES_IDX_PCIX_DIAG = 0x08e8,
85 NES_IDX_MPP_DEBUG = 0x0a00,
86 NES_IDX_PORT_RX_DISCARDS = 0x0a30,
87 NES_IDX_PORT_TX_DISCARDS = 0x0a34,
88 NES_IDX_MPP_LB_DEBUG = 0x0b00,
89 NES_IDX_DENALI_CTL_22 = 0x1058,
90 NES_IDX_MAC_TX_CONTROL = 0x2000,
91 NES_IDX_MAC_TX_CONFIG = 0x2004,
92 NES_IDX_MAC_TX_PAUSE_QUANTA = 0x2008,
93 NES_IDX_MAC_RX_CONTROL = 0x200c,
94 NES_IDX_MAC_RX_CONFIG = 0x2010,
95 NES_IDX_MAC_EXACT_MATCH_BOTTOM = 0x201c,
96 NES_IDX_MAC_MDIO_CONTROL = 0x2084,
97 NES_IDX_MAC_TX_OCTETS_LOW = 0x2100,
98 NES_IDX_MAC_TX_OCTETS_HIGH = 0x2104,
99 NES_IDX_MAC_TX_FRAMES_LOW = 0x2108,
100 NES_IDX_MAC_TX_FRAMES_HIGH = 0x210c,
101 NES_IDX_MAC_TX_PAUSE_FRAMES = 0x2118,
102 NES_IDX_MAC_TX_ERRORS = 0x2138,
103 NES_IDX_MAC_RX_OCTETS_LOW = 0x213c,
104 NES_IDX_MAC_RX_OCTETS_HIGH = 0x2140,
105 NES_IDX_MAC_RX_FRAMES_LOW = 0x2144,
106 NES_IDX_MAC_RX_FRAMES_HIGH = 0x2148,
107 NES_IDX_MAC_RX_BC_FRAMES_LOW = 0x214c,
108 NES_IDX_MAC_RX_MC_FRAMES_HIGH = 0x2150,
109 NES_IDX_MAC_RX_PAUSE_FRAMES = 0x2154,
110 NES_IDX_MAC_RX_SHORT_FRAMES = 0x2174,
111 NES_IDX_MAC_RX_OVERSIZED_FRAMES = 0x2178,
112 NES_IDX_MAC_RX_JABBER_FRAMES = 0x217c,
113 NES_IDX_MAC_RX_CRC_ERR_FRAMES = 0x2180,
114 NES_IDX_MAC_RX_LENGTH_ERR_FRAMES = 0x2184,
115 NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES = 0x2188,
116 NES_IDX_MAC_INT_STATUS = 0x21f0,
117 NES_IDX_MAC_INT_MASK = 0x21f4,
118 NES_IDX_PHY_PCS_CONTROL_STATUS0 = 0x2800,
119 NES_IDX_PHY_PCS_CONTROL_STATUS1 = 0x2a00,
120 NES_IDX_ETH_SERDES_COMMON_CONTROL0 = 0x2808,
121 NES_IDX_ETH_SERDES_COMMON_CONTROL1 = 0x2a08,
122 NES_IDX_ETH_SERDES_COMMON_STATUS0 = 0x280c,
123 NES_IDX_ETH_SERDES_COMMON_STATUS1 = 0x2a0c,
124 NES_IDX_ETH_SERDES_TX_EMP0 = 0x2810,
125 NES_IDX_ETH_SERDES_TX_EMP1 = 0x2a10,
126 NES_IDX_ETH_SERDES_TX_DRIVE0 = 0x2814,
127 NES_IDX_ETH_SERDES_TX_DRIVE1 = 0x2a14,
128 NES_IDX_ETH_SERDES_RX_MODE0 = 0x2818,
129 NES_IDX_ETH_SERDES_RX_MODE1 = 0x2a18,
130 NES_IDX_ETH_SERDES_RX_SIGDET0 = 0x281c,
131 NES_IDX_ETH_SERDES_RX_SIGDET1 = 0x2a1c,
132 NES_IDX_ETH_SERDES_BYPASS0 = 0x2820,
133 NES_IDX_ETH_SERDES_BYPASS1 = 0x2a20,
134 NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0 = 0x2824,
135 NES_IDX_ETH_SERDES_LOOPBACK_CONTROL1 = 0x2a24,
136 NES_IDX_ETH_SERDES_RX_EQ_CONTROL0 = 0x2828,
137 NES_IDX_ETH_SERDES_RX_EQ_CONTROL1 = 0x2a28,
138 NES_IDX_ETH_SERDES_RX_EQ_STATUS0 = 0x282c,
139 NES_IDX_ETH_SERDES_RX_EQ_STATUS1 = 0x2a2c,
140 NES_IDX_ETH_SERDES_CDR_RESET0 = 0x2830,
141 NES_IDX_ETH_SERDES_CDR_RESET1 = 0x2a30,
142 NES_IDX_ETH_SERDES_CDR_CONTROL0 = 0x2834,
143 NES_IDX_ETH_SERDES_CDR_CONTROL1 = 0x2a34,
144 NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0 = 0x2838,
145 NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1 = 0x2a38,
146 NES_IDX_ENDNODE0_NSTAT_RX_DISCARD = 0x3080,
147 NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO = 0x3000,
148 NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI = 0x3004,
149 NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO = 0x3008,
150 NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI = 0x300c,
151 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO = 0x7000,
152 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004,
153 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008,
154 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c,
155 NES_IDX_CM_CONFIG = 0x5100,
156 NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000,
157 NES_IDX_NIC_PHYPORT_TO_USW = 0x6008,
158 NES_IDX_NIC_ACTIVE = 0x6010,
159 NES_IDX_NIC_UNICAST_ALL = 0x6018,
160 NES_IDX_NIC_MULTICAST_ALL = 0x6020,
161 NES_IDX_NIC_MULTICAST_ENABLE = 0x6028,
162 NES_IDX_NIC_BROADCAST_ON = 0x6030,
163 NES_IDX_USED_CHUNKS_TX = 0x60b0,
164 NES_IDX_TX_POOL_SIZE = 0x60b8,
165 NES_IDX_QUAD_HASH_TABLE_SIZE = 0x6148,
166 NES_IDX_PERFECT_FILTER_LOW = 0x6200,
167 NES_IDX_PERFECT_FILTER_HIGH = 0x6204,
168 NES_IDX_IPV4_TCP_REXMITS = 0x7080,
169 NES_IDX_DEBUG_ERROR_CONTROL_STATUS = 0x913c,
170 NES_IDX_DEBUG_ERROR_MASKS0 = 0x9140,
171 NES_IDX_DEBUG_ERROR_MASKS1 = 0x9144,
172 NES_IDX_DEBUG_ERROR_MASKS2 = 0x9148,
173 NES_IDX_DEBUG_ERROR_MASKS3 = 0x914c,
174 NES_IDX_DEBUG_ERROR_MASKS4 = 0x9150,
175 NES_IDX_DEBUG_ERROR_MASKS5 = 0x9154,
176};
177
178#define NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE 1
179#define NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE (1 << 17)
180
181enum nes_cqp_opcodes {
182 NES_CQP_CREATE_QP = 0x00,
183 NES_CQP_MODIFY_QP = 0x01,
184 NES_CQP_DESTROY_QP = 0x02,
185 NES_CQP_CREATE_CQ = 0x03,
186 NES_CQP_MODIFY_CQ = 0x04,
187 NES_CQP_DESTROY_CQ = 0x05,
188 NES_CQP_ALLOCATE_STAG = 0x09,
189 NES_CQP_REGISTER_STAG = 0x0a,
190 NES_CQP_QUERY_STAG = 0x0b,
191 NES_CQP_REGISTER_SHARED_STAG = 0x0c,
192 NES_CQP_DEALLOCATE_STAG = 0x0d,
193 NES_CQP_MANAGE_ARP_CACHE = 0x0f,
194 NES_CQP_SUSPEND_QPS = 0x11,
195 NES_CQP_UPLOAD_CONTEXT = 0x13,
196 NES_CQP_CREATE_CEQ = 0x16,
197 NES_CQP_DESTROY_CEQ = 0x18,
198 NES_CQP_CREATE_AEQ = 0x19,
199 NES_CQP_DESTROY_AEQ = 0x1b,
200 NES_CQP_LMI_ACCESS = 0x20,
201 NES_CQP_FLUSH_WQES = 0x22,
202 NES_CQP_MANAGE_APBVT = 0x23
203};
204
205enum nes_cqp_wqe_word_idx {
206 NES_CQP_WQE_OPCODE_IDX = 0,
207 NES_CQP_WQE_ID_IDX = 1,
208 NES_CQP_WQE_COMP_CTX_LOW_IDX = 2,
209 NES_CQP_WQE_COMP_CTX_HIGH_IDX = 3,
210 NES_CQP_WQE_COMP_SCRATCH_LOW_IDX = 4,
211 NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX = 5,
212};
213
214enum nes_cqp_cq_wqeword_idx {
215 NES_CQP_CQ_WQE_PBL_LOW_IDX = 6,
216 NES_CQP_CQ_WQE_PBL_HIGH_IDX = 7,
217 NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX = 8,
218 NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX = 9,
219 NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX = 10,
220};
221
222enum nes_cqp_stag_wqeword_idx {
223 NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX = 1,
224 NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX = 6,
225 NES_CQP_STAG_WQE_LEN_LOW_IDX = 7,
226 NES_CQP_STAG_WQE_STAG_IDX = 8,
227 NES_CQP_STAG_WQE_VA_LOW_IDX = 10,
228 NES_CQP_STAG_WQE_VA_HIGH_IDX = 11,
229 NES_CQP_STAG_WQE_PA_LOW_IDX = 12,
230 NES_CQP_STAG_WQE_PA_HIGH_IDX = 13,
231 NES_CQP_STAG_WQE_PBL_LEN_IDX = 14
232};
233
234#define NES_CQP_OP_IWARP_STATE_SHIFT 28
235
236enum nes_cqp_qp_bits {
237 NES_CQP_QP_ARP_VALID = (1<<8),
238 NES_CQP_QP_WINBUF_VALID = (1<<9),
239 NES_CQP_QP_CONTEXT_VALID = (1<<10),
240 NES_CQP_QP_ORD_VALID = (1<<11),
241 NES_CQP_QP_WINBUF_DATAIND_EN = (1<<12),
242 NES_CQP_QP_VIRT_WQS = (1<<13),
243 NES_CQP_QP_DEL_HTE = (1<<14),
244 NES_CQP_QP_CQS_VALID = (1<<15),
245 NES_CQP_QP_TYPE_TSA = 0,
246 NES_CQP_QP_TYPE_IWARP = (1<<16),
247 NES_CQP_QP_TYPE_CQP = (4<<16),
248 NES_CQP_QP_TYPE_NIC = (5<<16),
249 NES_CQP_QP_MSS_CHG = (1<<20),
250 NES_CQP_QP_STATIC_RESOURCES = (1<<21),
251 NES_CQP_QP_IGNORE_MW_BOUND = (1<<22),
252 NES_CQP_QP_VWQ_USE_LMI = (1<<23),
253 NES_CQP_QP_IWARP_STATE_IDLE = (1<<NES_CQP_OP_IWARP_STATE_SHIFT),
254 NES_CQP_QP_IWARP_STATE_RTS = (2<<NES_CQP_OP_IWARP_STATE_SHIFT),
255 NES_CQP_QP_IWARP_STATE_CLOSING = (3<<NES_CQP_OP_IWARP_STATE_SHIFT),
256 NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
257 NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
258 NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
259 NES_CQP_QP_RESET = (1<<31),
260};
261
262enum nes_cqp_qp_wqe_word_idx {
263 NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
264 NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
265 NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
266};
267
268enum nes_nic_ctx_bits {
269 NES_NIC_CTX_RQ_SIZE_32 = (3<<8),
270 NES_NIC_CTX_RQ_SIZE_512 = (3<<8),
271 NES_NIC_CTX_SQ_SIZE_32 = (1<<10),
272 NES_NIC_CTX_SQ_SIZE_512 = (3<<10),
273};
274
275enum nes_nic_qp_ctx_word_idx {
276 NES_NIC_CTX_MISC_IDX = 0,
277 NES_NIC_CTX_SQ_LOW_IDX = 2,
278 NES_NIC_CTX_SQ_HIGH_IDX = 3,
279 NES_NIC_CTX_RQ_LOW_IDX = 4,
280 NES_NIC_CTX_RQ_HIGH_IDX = 5,
281};
282
283enum nes_cqp_cq_bits {
284 NES_CQP_CQ_CEQE_MASK = (1<<9),
285 NES_CQP_CQ_CEQ_VALID = (1<<10),
286 NES_CQP_CQ_RESIZE = (1<<11),
287 NES_CQP_CQ_CHK_OVERFLOW = (1<<12),
288 NES_CQP_CQ_4KB_CHUNK = (1<<14),
289 NES_CQP_CQ_VIRT = (1<<15),
290};
291
292enum nes_cqp_stag_bits {
293 NES_CQP_STAG_VA_TO = (1<<9),
294 NES_CQP_STAG_DEALLOC_PBLS = (1<<10),
295 NES_CQP_STAG_PBL_BLK_SIZE = (1<<11),
296 NES_CQP_STAG_MR = (1<<13),
297 NES_CQP_STAG_RIGHTS_LOCAL_READ = (1<<16),
298 NES_CQP_STAG_RIGHTS_LOCAL_WRITE = (1<<17),
299 NES_CQP_STAG_RIGHTS_REMOTE_READ = (1<<18),
300 NES_CQP_STAG_RIGHTS_REMOTE_WRITE = (1<<19),
301 NES_CQP_STAG_RIGHTS_WINDOW_BIND = (1<<20),
302 NES_CQP_STAG_REM_ACC_EN = (1<<21),
303 NES_CQP_STAG_LEAVE_PENDING = (1<<31),
304};
305
306enum nes_cqp_ceq_wqeword_idx {
307 NES_CQP_CEQ_WQE_ELEMENT_COUNT_IDX = 1,
308 NES_CQP_CEQ_WQE_PBL_LOW_IDX = 6,
309 NES_CQP_CEQ_WQE_PBL_HIGH_IDX = 7,
310};
311
312enum nes_cqp_ceq_bits {
313 NES_CQP_CEQ_4KB_CHUNK = (1<<14),
314 NES_CQP_CEQ_VIRT = (1<<15),
315};
316
317enum nes_cqp_aeq_wqeword_idx {
318 NES_CQP_AEQ_WQE_ELEMENT_COUNT_IDX = 1,
319 NES_CQP_AEQ_WQE_PBL_LOW_IDX = 6,
320 NES_CQP_AEQ_WQE_PBL_HIGH_IDX = 7,
321};
322
323enum nes_cqp_aeq_bits {
324 NES_CQP_AEQ_4KB_CHUNK = (1<<14),
325 NES_CQP_AEQ_VIRT = (1<<15),
326};
327
328enum nes_cqp_lmi_wqeword_idx {
329 NES_CQP_LMI_WQE_LMI_OFFSET_IDX = 1,
330 NES_CQP_LMI_WQE_FRAG_LOW_IDX = 8,
331 NES_CQP_LMI_WQE_FRAG_HIGH_IDX = 9,
332 NES_CQP_LMI_WQE_FRAG_LEN_IDX = 10,
333};
334
335enum nes_cqp_arp_wqeword_idx {
336 NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX = 6,
337 NES_CQP_ARP_WQE_MAC_HIGH_IDX = 7,
338 NES_CQP_ARP_WQE_REACHABILITY_MAX_IDX = 1,
339};
340
341enum nes_cqp_upload_wqeword_idx {
342 NES_CQP_UPLOAD_WQE_CTXT_LOW_IDX = 6,
343 NES_CQP_UPLOAD_WQE_CTXT_HIGH_IDX = 7,
344 NES_CQP_UPLOAD_WQE_HTE_IDX = 8,
345};
346
347enum nes_cqp_arp_bits {
348 NES_CQP_ARP_VALID = (1<<8),
349 NES_CQP_ARP_PERM = (1<<9),
350};
351
352enum nes_cqp_flush_bits {
353 NES_CQP_FLUSH_SQ = (1<<30),
354 NES_CQP_FLUSH_RQ = (1<<31),
355};
356
357enum nes_cqe_opcode_bits {
358 NES_CQE_STAG_VALID = (1<<6),
359 NES_CQE_ERROR = (1<<7),
360 NES_CQE_SQ = (1<<8),
361 NES_CQE_SE = (1<<9),
362 NES_CQE_PSH = (1<<29),
363 NES_CQE_FIN = (1<<30),
364 NES_CQE_VALID = (1<<31),
365};
366
367
368enum nes_cqe_word_idx {
369 NES_CQE_PAYLOAD_LENGTH_IDX = 0,
370 NES_CQE_COMP_COMP_CTX_LOW_IDX = 2,
371 NES_CQE_COMP_COMP_CTX_HIGH_IDX = 3,
372 NES_CQE_INV_STAG_IDX = 4,
373 NES_CQE_QP_ID_IDX = 5,
374 NES_CQE_ERROR_CODE_IDX = 6,
375 NES_CQE_OPCODE_IDX = 7,
376};
377
378enum nes_ceqe_word_idx {
379 NES_CEQE_CQ_CTX_LOW_IDX = 0,
380 NES_CEQE_CQ_CTX_HIGH_IDX = 1,
381};
382
383enum nes_ceqe_status_bit {
384 NES_CEQE_VALID = (1<<31),
385};
386
387enum nes_int_bits {
388 NES_INT_CEQ0 = (1<<0),
389 NES_INT_CEQ1 = (1<<1),
390 NES_INT_CEQ2 = (1<<2),
391 NES_INT_CEQ3 = (1<<3),
392 NES_INT_CEQ4 = (1<<4),
393 NES_INT_CEQ5 = (1<<5),
394 NES_INT_CEQ6 = (1<<6),
395 NES_INT_CEQ7 = (1<<7),
396 NES_INT_CEQ8 = (1<<8),
397 NES_INT_CEQ9 = (1<<9),
398 NES_INT_CEQ10 = (1<<10),
399 NES_INT_CEQ11 = (1<<11),
400 NES_INT_CEQ12 = (1<<12),
401 NES_INT_CEQ13 = (1<<13),
402 NES_INT_CEQ14 = (1<<14),
403 NES_INT_CEQ15 = (1<<15),
404 NES_INT_AEQ0 = (1<<16),
405 NES_INT_AEQ1 = (1<<17),
406 NES_INT_AEQ2 = (1<<18),
407 NES_INT_AEQ3 = (1<<19),
408 NES_INT_AEQ4 = (1<<20),
409 NES_INT_AEQ5 = (1<<21),
410 NES_INT_AEQ6 = (1<<22),
411 NES_INT_AEQ7 = (1<<23),
412 NES_INT_MAC0 = (1<<24),
413 NES_INT_MAC1 = (1<<25),
414 NES_INT_MAC2 = (1<<26),
415 NES_INT_MAC3 = (1<<27),
416 NES_INT_TSW = (1<<28),
417 NES_INT_TIMER = (1<<29),
418 NES_INT_INTF = (1<<30),
419};
420
421enum nes_intf_int_bits {
422 NES_INTF_INT_PCIERR = (1<<0),
423 NES_INTF_PERIODIC_TIMER = (1<<2),
424 NES_INTF_ONE_SHOT_TIMER = (1<<3),
425 NES_INTF_INT_CRITERR = (1<<14),
426 NES_INTF_INT_AEQ0_OFLOW = (1<<16),
427 NES_INTF_INT_AEQ1_OFLOW = (1<<17),
428 NES_INTF_INT_AEQ2_OFLOW = (1<<18),
429 NES_INTF_INT_AEQ3_OFLOW = (1<<19),
430 NES_INTF_INT_AEQ4_OFLOW = (1<<20),
431 NES_INTF_INT_AEQ5_OFLOW = (1<<21),
432 NES_INTF_INT_AEQ6_OFLOW = (1<<22),
433 NES_INTF_INT_AEQ7_OFLOW = (1<<23),
434 NES_INTF_INT_AEQ_OFLOW = (0xff<<16),
435};
436
437enum nes_mac_int_bits {
438 NES_MAC_INT_LINK_STAT_CHG = (1<<1),
439 NES_MAC_INT_XGMII_EXT = (1<<2),
440 NES_MAC_INT_TX_UNDERFLOW = (1<<6),
441 NES_MAC_INT_TX_ERROR = (1<<7),
442};
443
444enum nes_cqe_allocate_bits {
445 NES_CQE_ALLOC_INC_SELECT = (1<<28),
446 NES_CQE_ALLOC_NOTIFY_NEXT = (1<<29),
447 NES_CQE_ALLOC_NOTIFY_SE = (1<<30),
448 NES_CQE_ALLOC_RESET = (1<<31),
449};
450
451enum nes_nic_rq_wqe_word_idx {
452 NES_NIC_RQ_WQE_LENGTH_1_0_IDX = 0,
453 NES_NIC_RQ_WQE_LENGTH_3_2_IDX = 1,
454 NES_NIC_RQ_WQE_FRAG0_LOW_IDX = 2,
455 NES_NIC_RQ_WQE_FRAG0_HIGH_IDX = 3,
456 NES_NIC_RQ_WQE_FRAG1_LOW_IDX = 4,
457 NES_NIC_RQ_WQE_FRAG1_HIGH_IDX = 5,
458 NES_NIC_RQ_WQE_FRAG2_LOW_IDX = 6,
459 NES_NIC_RQ_WQE_FRAG2_HIGH_IDX = 7,
460 NES_NIC_RQ_WQE_FRAG3_LOW_IDX = 8,
461 NES_NIC_RQ_WQE_FRAG3_HIGH_IDX = 9,
462};
463
464enum nes_nic_sq_wqe_word_idx {
465 NES_NIC_SQ_WQE_MISC_IDX = 0,
466 NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX = 1,
467 NES_NIC_SQ_WQE_LSO_INFO_IDX = 2,
468 NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX = 3,
469 NES_NIC_SQ_WQE_LENGTH_2_1_IDX = 4,
470 NES_NIC_SQ_WQE_LENGTH_4_3_IDX = 5,
471 NES_NIC_SQ_WQE_FRAG0_LOW_IDX = 6,
472 NES_NIC_SQ_WQE_FRAG0_HIGH_IDX = 7,
473 NES_NIC_SQ_WQE_FRAG1_LOW_IDX = 8,
474 NES_NIC_SQ_WQE_FRAG1_HIGH_IDX = 9,
475 NES_NIC_SQ_WQE_FRAG2_LOW_IDX = 10,
476 NES_NIC_SQ_WQE_FRAG2_HIGH_IDX = 11,
477 NES_NIC_SQ_WQE_FRAG3_LOW_IDX = 12,
478 NES_NIC_SQ_WQE_FRAG3_HIGH_IDX = 13,
479 NES_NIC_SQ_WQE_FRAG4_LOW_IDX = 14,
480 NES_NIC_SQ_WQE_FRAG4_HIGH_IDX = 15,
481};
482
483enum nes_iwarp_sq_wqe_word_idx {
484 NES_IWARP_SQ_WQE_MISC_IDX = 0,
485 NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX = 1,
486 NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX = 2,
487 NES_IWARP_SQ_WQE_COMP_CTX_HIGH_IDX = 3,
488 NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
489 NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
490 NES_IWARP_SQ_WQE_INV_STAG_LOW_IDX = 7,
491 NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX = 8,
492 NES_IWARP_SQ_WQE_RDMA_TO_HIGH_IDX = 9,
493 NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX = 10,
494 NES_IWARP_SQ_WQE_RDMA_STAG_IDX = 11,
495 NES_IWARP_SQ_WQE_IMM_DATA_START_IDX = 12,
496 NES_IWARP_SQ_WQE_FRAG0_LOW_IDX = 16,
497 NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX = 17,
498 NES_IWARP_SQ_WQE_LENGTH0_IDX = 18,
499 NES_IWARP_SQ_WQE_STAG0_IDX = 19,
500 NES_IWARP_SQ_WQE_FRAG1_LOW_IDX = 20,
501 NES_IWARP_SQ_WQE_FRAG1_HIGH_IDX = 21,
502 NES_IWARP_SQ_WQE_LENGTH1_IDX = 22,
503 NES_IWARP_SQ_WQE_STAG1_IDX = 23,
504 NES_IWARP_SQ_WQE_FRAG2_LOW_IDX = 24,
505 NES_IWARP_SQ_WQE_FRAG2_HIGH_IDX = 25,
506 NES_IWARP_SQ_WQE_LENGTH2_IDX = 26,
507 NES_IWARP_SQ_WQE_STAG2_IDX = 27,
508 NES_IWARP_SQ_WQE_FRAG3_LOW_IDX = 28,
509 NES_IWARP_SQ_WQE_FRAG3_HIGH_IDX = 29,
510 NES_IWARP_SQ_WQE_LENGTH3_IDX = 30,
511 NES_IWARP_SQ_WQE_STAG3_IDX = 31,
512};
513
514enum nes_iwarp_sq_bind_wqe_word_idx {
515 NES_IWARP_SQ_BIND_WQE_MR_IDX = 6,
516 NES_IWARP_SQ_BIND_WQE_MW_IDX = 7,
517 NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX = 8,
518 NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX = 9,
519 NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX = 10,
520 NES_IWARP_SQ_BIND_WQE_VA_FBO_HIGH_IDX = 11,
521};
522
523enum nes_iwarp_sq_fmr_wqe_word_idx {
524 NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX = 7,
525 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX = 8,
526 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX = 9,
527 NES_IWARP_SQ_FMR_WQE_VA_FBO_LOW_IDX = 10,
528 NES_IWARP_SQ_FMR_WQE_VA_FBO_HIGH_IDX = 11,
529 NES_IWARP_SQ_FMR_WQE_PBL_ADDR_LOW_IDX = 12,
530 NES_IWARP_SQ_FMR_WQE_PBL_ADDR_HIGH_IDX = 13,
531 NES_IWARP_SQ_FMR_WQE_PBL_LENGTH_IDX = 14,
532};
533
534enum nes_iwarp_sq_locinv_wqe_word_idx {
535 NES_IWARP_SQ_LOCINV_WQE_INV_STAG_IDX = 6,
536};
537
538
539enum nes_iwarp_rq_wqe_word_idx {
540 NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX = 1,
541 NES_IWARP_RQ_WQE_COMP_CTX_LOW_IDX = 2,
542 NES_IWARP_RQ_WQE_COMP_CTX_HIGH_IDX = 3,
543 NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX = 4,
544 NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX = 5,
545 NES_IWARP_RQ_WQE_FRAG0_LOW_IDX = 8,
546 NES_IWARP_RQ_WQE_FRAG0_HIGH_IDX = 9,
547 NES_IWARP_RQ_WQE_LENGTH0_IDX = 10,
548 NES_IWARP_RQ_WQE_STAG0_IDX = 11,
549 NES_IWARP_RQ_WQE_FRAG1_LOW_IDX = 12,
550 NES_IWARP_RQ_WQE_FRAG1_HIGH_IDX = 13,
551 NES_IWARP_RQ_WQE_LENGTH1_IDX = 14,
552 NES_IWARP_RQ_WQE_STAG1_IDX = 15,
553 NES_IWARP_RQ_WQE_FRAG2_LOW_IDX = 16,
554 NES_IWARP_RQ_WQE_FRAG2_HIGH_IDX = 17,
555 NES_IWARP_RQ_WQE_LENGTH2_IDX = 18,
556 NES_IWARP_RQ_WQE_STAG2_IDX = 19,
557 NES_IWARP_RQ_WQE_FRAG3_LOW_IDX = 20,
558 NES_IWARP_RQ_WQE_FRAG3_HIGH_IDX = 21,
559 NES_IWARP_RQ_WQE_LENGTH3_IDX = 22,
560 NES_IWARP_RQ_WQE_STAG3_IDX = 23,
561};
562
563enum nes_nic_sq_wqe_bits {
564 NES_NIC_SQ_WQE_PHDR_CS_READY = (1<<21),
565 NES_NIC_SQ_WQE_LSO_ENABLE = (1<<22),
566 NES_NIC_SQ_WQE_TAGVALUE_ENABLE = (1<<23),
567 NES_NIC_SQ_WQE_DISABLE_CHKSUM = (1<<30),
568 NES_NIC_SQ_WQE_COMPLETION = (1<<31),
569};
570
571enum nes_nic_cqe_word_idx {
572 NES_NIC_CQE_ACCQP_ID_IDX = 0,
573 NES_NIC_CQE_TAG_PKT_TYPE_IDX = 2,
574 NES_NIC_CQE_MISC_IDX = 3,
575};
576
577#define NES_PKT_TYPE_APBVT_BITS 0xC112
578#define NES_PKT_TYPE_APBVT_MASK 0xff3e
579
580#define NES_PKT_TYPE_PVALID_BITS 0x10000000
581#define NES_PKT_TYPE_PVALID_MASK 0x30000000
582
583#define NES_PKT_TYPE_TCPV4_BITS 0x0110
584#define NES_PKT_TYPE_TCPV4_MASK 0x3f30
585
586#define NES_PKT_TYPE_UDPV4_BITS 0x0210
587#define NES_PKT_TYPE_UDPV4_MASK 0x3f30
588
589#define NES_PKT_TYPE_IPV4_BITS 0x0010
590#define NES_PKT_TYPE_IPV4_MASK 0x3f30
591
592#define NES_PKT_TYPE_OTHER_BITS 0x0000
593#define NES_PKT_TYPE_OTHER_MASK 0x0030
594
595#define NES_NIC_CQE_ERRV_SHIFT 16
596enum nes_nic_ev_bits {
597 NES_NIC_ERRV_BITS_MODE = (1<<0),
598 NES_NIC_ERRV_BITS_IPV4_CSUM_ERR = (1<<1),
599 NES_NIC_ERRV_BITS_TCPUDP_CSUM_ERR = (1<<2),
600 NES_NIC_ERRV_BITS_WQE_OVERRUN = (1<<3),
601 NES_NIC_ERRV_BITS_IPH_ERR = (1<<4),
602};
603
604enum nes_nic_cqe_bits {
605 NES_NIC_CQE_ERRV_MASK = (0xff<<NES_NIC_CQE_ERRV_SHIFT),
606 NES_NIC_CQE_SQ = (1<<24),
607 NES_NIC_CQE_ACCQP_PORT = (1<<28),
608 NES_NIC_CQE_ACCQP_VALID = (1<<29),
609 NES_NIC_CQE_TAG_VALID = (1<<30),
610 NES_NIC_CQE_VALID = (1<<31),
611};
612
613enum nes_aeqe_word_idx {
614 NES_AEQE_COMP_CTXT_LOW_IDX = 0,
615 NES_AEQE_COMP_CTXT_HIGH_IDX = 1,
616 NES_AEQE_COMP_QP_CQ_ID_IDX = 2,
617 NES_AEQE_MISC_IDX = 3,
618};
619
620enum nes_aeqe_bits {
621 NES_AEQE_QP = (1<<16),
622 NES_AEQE_CQ = (1<<17),
623 NES_AEQE_SQ = (1<<18),
624 NES_AEQE_INBOUND_RDMA = (1<<19),
625 NES_AEQE_IWARP_STATE_MASK = (7<<20),
626 NES_AEQE_TCP_STATE_MASK = (0xf<<24),
627 NES_AEQE_VALID = (1<<31),
628};
629
630#define NES_AEQE_IWARP_STATE_SHIFT 20
631#define NES_AEQE_TCP_STATE_SHIFT 24
632
633enum nes_aeqe_iwarp_state {
634 NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
635 NES_AEQE_IWARP_STATE_IDLE = 1,
636 NES_AEQE_IWARP_STATE_RTS = 2,
637 NES_AEQE_IWARP_STATE_CLOSING = 3,
638 NES_AEQE_IWARP_STATE_TERMINATE = 5,
639 NES_AEQE_IWARP_STATE_ERROR = 6
640};
641
642enum nes_aeqe_tcp_state {
643 NES_AEQE_TCP_STATE_NON_EXISTANT = 0,
644 NES_AEQE_TCP_STATE_CLOSED = 1,
645 NES_AEQE_TCP_STATE_LISTEN = 2,
646 NES_AEQE_TCP_STATE_SYN_SENT = 3,
647 NES_AEQE_TCP_STATE_SYN_RCVD = 4,
648 NES_AEQE_TCP_STATE_ESTABLISHED = 5,
649 NES_AEQE_TCP_STATE_CLOSE_WAIT = 6,
650 NES_AEQE_TCP_STATE_FIN_WAIT_1 = 7,
651 NES_AEQE_TCP_STATE_CLOSING = 8,
652 NES_AEQE_TCP_STATE_LAST_ACK = 9,
653 NES_AEQE_TCP_STATE_FIN_WAIT_2 = 10,
654 NES_AEQE_TCP_STATE_TIME_WAIT = 11
655};
656
657enum nes_aeqe_aeid {
658 NES_AEQE_AEID_AMP_UNALLOCATED_STAG = 0x0102,
659 NES_AEQE_AEID_AMP_INVALID_STAG = 0x0103,
660 NES_AEQE_AEID_AMP_BAD_QP = 0x0104,
661 NES_AEQE_AEID_AMP_BAD_PD = 0x0105,
662 NES_AEQE_AEID_AMP_BAD_STAG_KEY = 0x0106,
663 NES_AEQE_AEID_AMP_BAD_STAG_INDEX = 0x0107,
664 NES_AEQE_AEID_AMP_BOUNDS_VIOLATION = 0x0108,
665 NES_AEQE_AEID_AMP_RIGHTS_VIOLATION = 0x0109,
666 NES_AEQE_AEID_AMP_TO_WRAP = 0x010a,
667 NES_AEQE_AEID_AMP_FASTREG_SHARED = 0x010b,
668 NES_AEQE_AEID_AMP_FASTREG_VALID_STAG = 0x010c,
669 NES_AEQE_AEID_AMP_FASTREG_MW_STAG = 0x010d,
670 NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS = 0x010e,
671 NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW = 0x010f,
672 NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH = 0x0110,
673 NES_AEQE_AEID_AMP_INVALIDATE_SHARED = 0x0111,
674 NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS = 0x0112,
675 NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS = 0x0113,
676 NES_AEQE_AEID_AMP_MWBIND_VALID_STAG = 0x0114,
677 NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG = 0x0115,
678 NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG = 0x0116,
679 NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG = 0x0117,
680 NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS = 0x0118,
681 NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS = 0x0119,
682 NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT = 0x011a,
683 NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED = 0x011b,
684 NES_AEQE_AEID_BAD_CLOSE = 0x0201,
685 NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE = 0x0202,
686 NES_AEQE_AEID_CQ_OPERATION_ERROR = 0x0203,
687 NES_AEQE_AEID_PRIV_OPERATION_DENIED = 0x0204,
688 NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO = 0x0205,
689 NES_AEQE_AEID_STAG_ZERO_INVALID = 0x0206,
690 NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN = 0x0301,
691 NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID = 0x0302,
692 NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER = 0x0303,
693 NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION = 0x0304,
694 NES_AEQE_AEID_DDP_UBE_INVALID_MO = 0x0305,
695 NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE = 0x0306,
696 NES_AEQE_AEID_DDP_UBE_INVALID_QN = 0x0307,
697 NES_AEQE_AEID_DDP_NO_L_BIT = 0x0308,
698 NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION = 0x0311,
699 NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE = 0x0312,
700 NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST = 0x0313,
701 NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP = 0x0314,
702 NES_AEQE_AEID_INVALID_ARP_ENTRY = 0x0401,
703 NES_AEQE_AEID_INVALID_TCP_OPTION_RCVD = 0x0402,
704 NES_AEQE_AEID_STALE_ARP_ENTRY = 0x0403,
705 NES_AEQE_AEID_LLP_CLOSE_COMPLETE = 0x0501,
706 NES_AEQE_AEID_LLP_CONNECTION_RESET = 0x0502,
707 NES_AEQE_AEID_LLP_FIN_RECEIVED = 0x0503,
708 NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH = 0x0504,
709 NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR = 0x0505,
710 NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE = 0x0506,
711 NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL = 0x0507,
712 NES_AEQE_AEID_LLP_SYN_RECEIVED = 0x0508,
713 NES_AEQE_AEID_LLP_TERMINATE_RECEIVED = 0x0509,
714 NES_AEQE_AEID_LLP_TOO_MANY_RETRIES = 0x050a,
715 NES_AEQE_AEID_LLP_TOO_MANY_KEEPALIVE_RETRIES = 0x050b,
716 NES_AEQE_AEID_RESET_SENT = 0x0601,
717 NES_AEQE_AEID_TERMINATE_SENT = 0x0602,
718 NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC = 0x0700
719};
720
721enum nes_iwarp_sq_opcodes {
722 NES_IWARP_SQ_WQE_WRPDU = (1<<15),
723 NES_IWARP_SQ_WQE_PSH = (1<<21),
724 NES_IWARP_SQ_WQE_STREAMING = (1<<23),
725 NES_IWARP_SQ_WQE_IMM_DATA = (1<<28),
726 NES_IWARP_SQ_WQE_READ_FENCE = (1<<29),
727 NES_IWARP_SQ_WQE_LOCAL_FENCE = (1<<30),
728 NES_IWARP_SQ_WQE_SIGNALED_COMPL = (1<<31),
729};
730
731enum nes_iwarp_sq_wqe_bits {
732 NES_IWARP_SQ_OP_RDMAW = 0,
733 NES_IWARP_SQ_OP_RDMAR = 1,
734 NES_IWARP_SQ_OP_SEND = 3,
735 NES_IWARP_SQ_OP_SENDINV = 4,
736 NES_IWARP_SQ_OP_SENDSE = 5,
737 NES_IWARP_SQ_OP_SENDSEINV = 6,
738 NES_IWARP_SQ_OP_BIND = 8,
739 NES_IWARP_SQ_OP_FAST_REG = 9,
740 NES_IWARP_SQ_OP_LOCINV = 10,
741 NES_IWARP_SQ_OP_RDMAR_LOCINV = 11,
742 NES_IWARP_SQ_OP_NOP = 12,
743};
744
745#define NES_EEPROM_READ_REQUEST (1<<16)
746#define NES_MAC_ADDR_VALID (1<<20)
747
748/*
749 * NES index registers init values.
750 */
751struct nes_init_values {
752 u32 index;
753 u32 data;
754 u8 wrt;
755};
756
757/*
758 * NES registers in BAR0.
759 */
760struct nes_pci_regs {
761 u32 int_status;
762 u32 int_mask;
763 u32 int_pending;
764 u32 intf_int_status;
765 u32 intf_int_mask;
766 u32 other_regs[59]; /* pad out to 256 bytes for now */
767};
768
769#define NES_CQP_SQ_SIZE 128
770#define NES_CCQ_SIZE 128
771#define NES_NIC_WQ_SIZE 512
772#define NES_NIC_CTX_SIZE ((NES_NIC_CTX_RQ_SIZE_512) | (NES_NIC_CTX_SQ_SIZE_512))
773#define NES_NIC_BACK_STORE 0x00038000
774
775struct nes_device;
776
777struct nes_hw_nic_qp_context {
778 __le32 context_words[6];
779};
780
781struct nes_hw_nic_sq_wqe {
782 __le32 wqe_words[16];
783};
784
785struct nes_hw_nic_rq_wqe {
786 __le32 wqe_words[16];
787};
788
789struct nes_hw_nic_cqe {
790 __le32 cqe_words[4];
791};
792
793struct nes_hw_cqp_qp_context {
794 __le32 context_words[4];
795};
796
797struct nes_hw_cqp_wqe {
798 __le32 wqe_words[16];
799};
800
801struct nes_hw_qp_wqe {
802 __le32 wqe_words[32];
803};
804
805struct nes_hw_cqe {
806 __le32 cqe_words[8];
807};
808
809struct nes_hw_ceqe {
810 __le32 ceqe_words[2];
811};
812
813struct nes_hw_aeqe {
814 __le32 aeqe_words[4];
815};
816
817struct nes_cqp_request {
818 union {
819 u64 cqp_callback_context;
820 void *cqp_callback_pointer;
821 };
822 wait_queue_head_t waitq;
823 struct nes_hw_cqp_wqe cqp_wqe;
824 struct list_head list;
825 atomic_t refcount;
826 void (*cqp_callback)(struct nes_device *nesdev, struct nes_cqp_request *cqp_request);
827 u16 major_code;
828 u16 minor_code;
829 u8 waiting;
830 u8 request_done;
831 u8 dynamic;
832 u8 callback;
833};
834
835struct nes_hw_cqp {
836 struct nes_hw_cqp_wqe *sq_vbase;
837 dma_addr_t sq_pbase;
838 spinlock_t lock;
839 wait_queue_head_t waitq;
840 u16 qp_id;
841 u16 sq_head;
842 u16 sq_tail;
843 u16 sq_size;
844};
845
846#define NES_FIRST_FRAG_SIZE 128
847struct nes_first_frag {
848 u8 buffer[NES_FIRST_FRAG_SIZE];
849};
850
851struct nes_hw_nic {
852 struct nes_first_frag *first_frag_vbase; /* virtual address of first frags */
853 struct nes_hw_nic_sq_wqe *sq_vbase; /* virtual address of sq */
854 struct nes_hw_nic_rq_wqe *rq_vbase; /* virtual address of rq */
855 struct sk_buff *tx_skb[NES_NIC_WQ_SIZE];
856 struct sk_buff *rx_skb[NES_NIC_WQ_SIZE];
857 dma_addr_t frag_paddr[NES_NIC_WQ_SIZE];
858 unsigned long first_frag_overflow[BITS_TO_LONGS(NES_NIC_WQ_SIZE)];
859 dma_addr_t sq_pbase; /* PCI memory for host rings */
860 dma_addr_t rq_pbase; /* PCI memory for host rings */
861
862 u16 qp_id;
863 u16 sq_head;
864 u16 sq_tail;
865 u16 sq_size;
866 u16 rq_head;
867 u16 rq_tail;
868 u16 rq_size;
869 u8 replenishing_rq;
870 u8 reserved;
871
872 spinlock_t sq_lock;
873 spinlock_t rq_lock;
874};
875
876struct nes_hw_nic_cq {
877 struct nes_hw_nic_cqe volatile *cq_vbase; /* PCI memory for host rings */
878 void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
879 dma_addr_t cq_pbase; /* PCI memory for host rings */
880 int rx_cqes_completed;
881 int cqe_allocs_pending;
882 int rx_pkts_indicated;
883 u16 cq_head;
884 u16 cq_size;
885 u16 cq_number;
886 u8 cqes_pending;
887};
888
889struct nes_hw_qp {
890 struct nes_hw_qp_wqe *sq_vbase; /* PCI memory for host rings */
891 struct nes_hw_qp_wqe *rq_vbase; /* PCI memory for host rings */
892 void *q2_vbase; /* PCI memory for host rings */
893 dma_addr_t sq_pbase; /* PCI memory for host rings */
894 dma_addr_t rq_pbase; /* PCI memory for host rings */
895 dma_addr_t q2_pbase; /* PCI memory for host rings */
896 u32 qp_id;
897 u16 sq_head;
898 u16 sq_tail;
899 u16 sq_size;
900 u16 rq_head;
901 u16 rq_tail;
902 u16 rq_size;
903 u8 rq_encoded_size;
904 u8 sq_encoded_size;
905};
906
907struct nes_hw_cq {
908 struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */
909 void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
910 dma_addr_t cq_pbase; /* PCI memory for host rings */
911 u16 cq_head;
912 u16 cq_size;
913 u16 cq_number;
914};
915
916struct nes_hw_ceq {
917 struct nes_hw_ceqe volatile *ceq_vbase; /* PCI memory for host rings */
918 dma_addr_t ceq_pbase; /* PCI memory for host rings */
919 u16 ceq_head;
920 u16 ceq_size;
921};
922
923struct nes_hw_aeq {
924 struct nes_hw_aeqe volatile *aeq_vbase; /* PCI memory for host rings */
925 dma_addr_t aeq_pbase; /* PCI memory for host rings */
926 u16 aeq_head;
927 u16 aeq_size;
928};
929
930struct nic_qp_map {
931 u8 qpid;
932 u8 nic_index;
933 u8 logical_port;
934 u8 is_hnic;
935};
936
937#define NES_CQP_ARP_AEQ_INDEX_MASK 0x000f0000
938#define NES_CQP_ARP_AEQ_INDEX_SHIFT 16
939
940#define NES_CQP_APBVT_ADD 0x00008000
941#define NES_CQP_APBVT_NIC_SHIFT 16
942
943#define NES_ARP_ADD 1
944#define NES_ARP_DELETE 2
945#define NES_ARP_RESOLVE 3
946
947#define NES_MAC_SW_IDLE 0
948#define NES_MAC_SW_INTERRUPT 1
949#define NES_MAC_SW_MH 2
950
951struct nes_arp_entry {
952 u32 ip_addr;
953 u8 mac_addr[ETH_ALEN];
954};
955
956#define NES_NIC_FAST_TIMER 96
957#define NES_NIC_FAST_TIMER_LOW 40
958#define NES_NIC_FAST_TIMER_HIGH 1000
959#define DEFAULT_NES_QL_HIGH 256
960#define DEFAULT_NES_QL_LOW 16
961#define DEFAULT_NES_QL_TARGET 64
962#define DEFAULT_JUMBO_NES_QL_LOW 12
963#define DEFAULT_JUMBO_NES_QL_TARGET 40
964#define DEFAULT_JUMBO_NES_QL_HIGH 128
965#define NES_NIC_CQ_DOWNWARD_TREND 8
966
967struct nes_hw_tune_timer {
968 //u16 cq_count;
969 u16 threshold_low;
970 u16 threshold_target;
971 u16 threshold_high;
972 u16 timer_in_use;
973 u16 timer_in_use_old;
974 u16 timer_in_use_min;
975 u16 timer_in_use_max;
976 u8 timer_direction_upward;
977 u8 timer_direction_downward;
978 u16 cq_count_old;
979 u8 cq_direction_downward;
980};
981
982#define NES_TIMER_INT_LIMIT 2
983#define NES_TIMER_INT_LIMIT_DYNAMIC 10
984#define NES_TIMER_ENABLE_LIMIT 4
985#define NES_MAX_LINK_INTERRUPTS 128
986#define NES_MAX_LINK_CHECK 200
987
988struct nes_adapter {
989 u64 fw_ver;
990 unsigned long *allocated_qps;
991 unsigned long *allocated_cqs;
992 unsigned long *allocated_mrs;
993 unsigned long *allocated_pds;
994 unsigned long *allocated_arps;
995 struct nes_qp **qp_table;
996 struct workqueue_struct *work_q;
997
998 struct list_head list;
999 struct list_head active_listeners;
1000 /* list of the netdev's associated with each logical port */
1001 struct list_head nesvnic_list[4];
1002
1003 struct timer_list mh_timer;
1004 struct timer_list lc_timer;
1005 struct work_struct work;
1006 spinlock_t resource_lock;
1007 spinlock_t phy_lock;
1008 spinlock_t pbl_lock;
1009 spinlock_t periodic_timer_lock;
1010
1011 struct nes_arp_entry arp_table[NES_MAX_ARP_TABLE_SIZE];
1012
1013 /* Adapter CEQ and AEQs */
1014 struct nes_hw_ceq ceq[16];
1015 struct nes_hw_aeq aeq[8];
1016
1017 struct nes_hw_tune_timer tune_timer;
1018
1019 unsigned long doorbell_start;
1020
1021 u32 hw_rev;
1022 u32 vendor_id;
1023 u32 vendor_part_id;
1024 u32 device_cap_flags;
1025 u32 tick_delta;
1026 u32 timer_int_req;
1027 u32 arp_table_size;
1028 u32 next_arp_index;
1029
1030 u32 max_mr;
1031 u32 max_256pbl;
1032 u32 max_4kpbl;
1033 u32 free_256pbl;
1034 u32 free_4kpbl;
1035 u32 max_mr_size;
1036 u32 max_qp;
1037 u32 next_qp;
1038 u32 max_irrq;
1039 u32 max_qp_wr;
1040 u32 max_sge;
1041 u32 max_cq;
1042 u32 next_cq;
1043 u32 max_cqe;
1044 u32 max_pd;
1045 u32 base_pd;
1046 u32 next_pd;
1047 u32 hte_index_mask;
1048
1049 /* EEPROM information */
1050 u32 rx_pool_size;
1051 u32 tx_pool_size;
1052 u32 rx_threshold;
1053 u32 tcp_timer_core_clk_divisor;
1054 u32 iwarp_config;
1055 u32 cm_config;
1056 u32 sws_timer_config;
1057 u32 tcp_config1;
1058 u32 wqm_wat;
1059 u32 core_clock;
1060 u32 firmware_version;
1061
1062 u32 nic_rx_eth_route_err;
1063
1064 u32 et_rx_coalesce_usecs;
1065 u32 et_rx_max_coalesced_frames;
1066 u32 et_rx_coalesce_usecs_irq;
1067 u32 et_rx_max_coalesced_frames_irq;
1068 u32 et_pkt_rate_low;
1069 u32 et_rx_coalesce_usecs_low;
1070 u32 et_rx_max_coalesced_frames_low;
1071 u32 et_pkt_rate_high;
1072 u32 et_rx_coalesce_usecs_high;
1073 u32 et_rx_max_coalesced_frames_high;
1074 u32 et_rate_sample_interval;
1075 u32 timer_int_limit;
1076
1077 /* Adapter base MAC address */
1078 u32 mac_addr_low;
1079 u16 mac_addr_high;
1080
1081 u16 firmware_eeprom_offset;
1082 u16 software_eeprom_offset;
1083
1084 u16 max_irrq_wr;
1085
1086 /* pd config for each port */
1087 u16 pd_config_size[4];
1088 u16 pd_config_base[4];
1089
1090 u16 link_interrupt_count[4];
1091
1092 /* the phy index for each port */
1093 u8 phy_index[4];
1094 u8 mac_sw_state[4];
1095 u8 mac_link_down[4];
1096 u8 phy_type[4];
1097
1098 /* PCI information */
1099 unsigned int devfn;
1100 unsigned char bus_number;
1101 unsigned char OneG_Mode;
1102
1103 unsigned char ref_count;
1104 u8 netdev_count;
1105 u8 netdev_max; /* from host nic address count in EEPROM */
1106 u8 port_count;
1107 u8 virtwq;
1108 u8 et_use_adaptive_rx_coalesce;
1109 u8 adapter_fcn_count;
1110};
1111
1112struct nes_pbl {
1113 u64 *pbl_vbase;
1114 dma_addr_t pbl_pbase;
1115 struct page *page;
1116 unsigned long user_base;
1117 u32 pbl_size;
1118 struct list_head list;
1119 /* TODO: need to add list for two level tables */
1120};
1121
1122struct nes_listener {
1123 struct work_struct work;
1124 struct workqueue_struct *wq;
1125 struct nes_vnic *nesvnic;
1126 struct iw_cm_id *cm_id;
1127 struct list_head list;
1128 unsigned long socket;
1129 u8 accept_failed;
1130};
1131
1132struct nes_ib_device;
1133
1134struct nes_vnic {
1135 struct nes_ib_device *nesibdev;
1136 u64 sq_full;
1137 u64 sq_locked;
1138 u64 tso_requests;
1139 u64 segmented_tso_requests;
1140 u64 linearized_skbs;
1141 u64 tx_sw_dropped;
1142 u64 endnode_nstat_rx_discard;
1143 u64 endnode_nstat_rx_octets;
1144 u64 endnode_nstat_rx_frames;
1145 u64 endnode_nstat_tx_octets;
1146 u64 endnode_nstat_tx_frames;
1147 u64 endnode_ipv4_tcp_retransmits;
1148 /* void *mem; */
1149 struct nes_device *nesdev;
1150 struct net_device *netdev;
1151 struct vlan_group *vlan_grp;
1152 atomic_t rx_skbs_needed;
1153 atomic_t rx_skb_timer_running;
1154 int budget;
1155 u32 msg_enable;
1156 /* u32 tx_avail; */
1157 __be32 local_ipaddr;
1158 struct napi_struct napi;
1159 spinlock_t tx_lock; /* could use netdev tx lock? */
1160 struct timer_list rq_wqes_timer;
1161 u32 nic_mem_size;
1162 void *nic_vbase;
1163 dma_addr_t nic_pbase;
1164 struct nes_hw_nic nic;
1165 struct nes_hw_nic_cq nic_cq;
1166 u32 mcrq_qp_id;
1167 struct nes_ucontext *mcrq_ucontext;
1168 struct nes_cqp_request* (*get_cqp_request)(struct nes_device *nesdev);
1169 void (*post_cqp_request)(struct nes_device*, struct nes_cqp_request *, int);
1170 int (*mcrq_mcast_filter)( struct nes_vnic* nesvnic, __u8* dmi_addr );
1171 struct net_device_stats netstats;
1172 /* used to put the netdev on the adapters logical port list */
1173 struct list_head list;
1174 u16 max_frame_size;
1175 u8 netdev_open;
1176 u8 linkup;
1177 u8 logical_port;
1178 u8 netdev_index; /* might not be needed, indexes nesdev->netdev */
1179 u8 perfect_filter_index;
1180 u8 nic_index;
1181 u8 qp_nic_index[4];
1182 u8 next_qp_nic_index;
1183 u8 of_device_registered;
1184 u8 rdma_enabled;
1185 u8 rx_checksum_disabled;
1186};
1187
1188struct nes_ib_device {
1189 struct ib_device ibdev;
1190 struct nes_vnic *nesvnic;
1191
1192 /* Virtual RNIC Limits */
1193 u32 max_mr;
1194 u32 max_qp;
1195 u32 max_cq;
1196 u32 max_pd;
1197 u32 num_mr;
1198 u32 num_qp;
1199 u32 num_cq;
1200 u32 num_pd;
1201};
1202
1203#define nes_vlan_rx vlan_hwaccel_receive_skb
1204#define nes_netif_rx netif_receive_skb
1205
1206#endif /* __NES_HW_H */
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
new file mode 100644
index 000000000000..b6cc265aa9a4
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -0,0 +1,1703 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40#include <linux/if_arp.h>
41#include <linux/if_vlan.h>
42#include <linux/ethtool.h>
43#include <net/tcp.h>
44
45#include <net/inet_common.h>
46#include <linux/inet.h>
47
48#include "nes.h"
49
50static struct nic_qp_map nic_qp_mapping_0[] = {
51 {16,0,0,1},{24,4,0,0},{28,8,0,0},{32,12,0,0},
52 {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0},
53 {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
54 {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
55};
56
57static struct nic_qp_map nic_qp_mapping_1[] = {
58 {18,1,1,1},{25,5,1,0},{29,9,1,0},{33,13,1,0},
59 {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
60};
61
62static struct nic_qp_map nic_qp_mapping_2[] = {
63 {20,2,2,1},{26,6,2,0},{30,10,2,0},{34,14,2,0}
64};
65
66static struct nic_qp_map nic_qp_mapping_3[] = {
67 {22,3,3,1},{27,7,3,0},{31,11,3,0},{35,15,3,0}
68};
69
70static struct nic_qp_map nic_qp_mapping_4[] = {
71 {28,8,0,0},{32,12,0,0}
72};
73
74static struct nic_qp_map nic_qp_mapping_5[] = {
75 {29,9,1,0},{33,13,1,0}
76};
77
78static struct nic_qp_map nic_qp_mapping_6[] = {
79 {30,10,2,0},{34,14,2,0}
80};
81
82static struct nic_qp_map nic_qp_mapping_7[] = {
83 {31,11,3,0},{35,15,3,0}
84};
85
86static struct nic_qp_map *nic_qp_mapping_per_function[] = {
87 nic_qp_mapping_0, nic_qp_mapping_1, nic_qp_mapping_2, nic_qp_mapping_3,
88 nic_qp_mapping_4, nic_qp_mapping_5, nic_qp_mapping_6, nic_qp_mapping_7
89};
90
91static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
92 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
93static int debug = -1;
94
95
96static int nes_netdev_open(struct net_device *);
97static int nes_netdev_stop(struct net_device *);
98static int nes_netdev_start_xmit(struct sk_buff *, struct net_device *);
99static struct net_device_stats *nes_netdev_get_stats(struct net_device *);
100static void nes_netdev_tx_timeout(struct net_device *);
101static int nes_netdev_set_mac_address(struct net_device *, void *);
102static int nes_netdev_change_mtu(struct net_device *, int);
103
104/**
105 * nes_netdev_poll
106 */
107static int nes_netdev_poll(struct napi_struct *napi, int budget)
108{
109 struct nes_vnic *nesvnic = container_of(napi, struct nes_vnic, napi);
110 struct net_device *netdev = nesvnic->netdev;
111 struct nes_device *nesdev = nesvnic->nesdev;
112 struct nes_hw_nic_cq *nescq = &nesvnic->nic_cq;
113
114 nesvnic->budget = budget;
115 nescq->cqes_pending = 0;
116 nescq->rx_cqes_completed = 0;
117 nescq->cqe_allocs_pending = 0;
118 nescq->rx_pkts_indicated = 0;
119
120 nes_nic_ce_handler(nesdev, nescq);
121
122 if (nescq->cqes_pending == 0) {
123 netif_rx_complete(netdev, napi);
124 /* clear out completed cqes and arm */
125 nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
126 nescq->cq_number | (nescq->cqe_allocs_pending << 16));
127 nes_read32(nesdev->regs+NES_CQE_ALLOC);
128 } else {
129 /* clear out completed cqes but don't arm */
130 nes_write32(nesdev->regs+NES_CQE_ALLOC,
131 nescq->cq_number | (nescq->cqe_allocs_pending << 16));
132 nes_debug(NES_DBG_NETDEV, "%s: exiting with work pending\n",
133 nesvnic->netdev->name);
134 }
135 return nescq->rx_pkts_indicated;
136}
137
138
139/**
140 * nes_netdev_open - Activate the network interface; ifconfig
141 * ethx up.
142 */
143static int nes_netdev_open(struct net_device *netdev)
144{
145 u32 macaddr_low;
146 u16 macaddr_high;
147 struct nes_vnic *nesvnic = netdev_priv(netdev);
148 struct nes_device *nesdev = nesvnic->nesdev;
149 int ret;
150 int i;
151 struct nes_vnic *first_nesvnic;
152 u32 nic_active_bit;
153 u32 nic_active;
154
155 assert(nesdev != NULL);
156
157 first_nesvnic = list_entry(nesdev->nesadapter->nesvnic_list[nesdev->mac_index].next,
158 struct nes_vnic, list);
159
160 if (netif_msg_ifup(nesvnic))
161 printk(KERN_INFO PFX "%s: enabling interface\n", netdev->name);
162
163 ret = nes_init_nic_qp(nesdev, netdev);
164 if (ret) {
165 return ret;
166 }
167
168 netif_carrier_off(netdev);
169 netif_stop_queue(netdev);
170
171 if ((!nesvnic->of_device_registered) && (nesvnic->rdma_enabled)) {
172 nesvnic->nesibdev = nes_init_ofa_device(netdev);
173 if (nesvnic->nesibdev == NULL) {
174 printk(KERN_ERR PFX "%s: nesvnic->nesibdev alloc failed", netdev->name);
175 } else {
176 nesvnic->nesibdev->nesvnic = nesvnic;
177 ret = nes_register_ofa_device(nesvnic->nesibdev);
178 if (ret) {
179 printk(KERN_ERR PFX "%s: Unable to register RDMA device, ret = %d\n",
180 netdev->name, ret);
181 }
182 }
183 }
184 /* Set packet filters */
185 nic_active_bit = 1 << nesvnic->nic_index;
186 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
187 nic_active |= nic_active_bit;
188 nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
189 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
190 nic_active |= nic_active_bit;
191 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
192 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
193 nic_active |= nic_active_bit;
194 nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
195
196 macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
197 macaddr_high += (u16)netdev->dev_addr[1];
198 macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
199 macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
200 macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
201 macaddr_low += (u32)netdev->dev_addr[5];
202
203 /* Program the various MAC regs */
204 for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
205 if (nesvnic->qp_nic_index[i] == 0xf) {
206 break;
207 }
208 nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
209 " (Addr:%08X) = %08X, HIGH = %08X.\n",
210 i, nesvnic->qp_nic_index[i],
211 NES_IDX_PERFECT_FILTER_LOW+((nesvnic->perfect_filter_index + i) * 8),
212 macaddr_low,
213 (u32)macaddr_high | NES_MAC_ADDR_VALID |
214 ((((u32)nesvnic->nic_index) << 16)));
215 nes_write_indexed(nesdev,
216 NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
217 macaddr_low);
218 nes_write_indexed(nesdev,
219 NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
220 (u32)macaddr_high | NES_MAC_ADDR_VALID |
221 ((((u32)nesvnic->nic_index) << 16)));
222 }
223
224
225 nes_write32(nesdev->regs+NES_CQE_ALLOC, NES_CQE_ALLOC_NOTIFY_NEXT |
226 nesvnic->nic_cq.cq_number);
227 nes_read32(nesdev->regs+NES_CQE_ALLOC);
228
229 if (first_nesvnic->linkup) {
230 /* Enable network packets */
231 nesvnic->linkup = 1;
232 netif_start_queue(netdev);
233 netif_carrier_on(netdev);
234 }
235 napi_enable(&nesvnic->napi);
236 nesvnic->netdev_open = 1;
237
238 return 0;
239}
240
241
242/**
243 * nes_netdev_stop
244 */
245static int nes_netdev_stop(struct net_device *netdev)
246{
247 struct nes_vnic *nesvnic = netdev_priv(netdev);
248 struct nes_device *nesdev = nesvnic->nesdev;
249 u32 nic_active_mask;
250 u32 nic_active;
251
252 nes_debug(NES_DBG_SHUTDOWN, "nesvnic=%p, nesdev=%p, netdev=%p %s\n",
253 nesvnic, nesdev, netdev, netdev->name);
254 if (nesvnic->netdev_open == 0)
255 return 0;
256
257 if (netif_msg_ifdown(nesvnic))
258 printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name);
259
260 /* Disable network packets */
261 napi_disable(&nesvnic->napi);
262 netif_stop_queue(netdev);
263 if ((nesdev->netdev[0] == netdev) & (nesvnic->logical_port == nesdev->mac_index)) {
264 nes_write_indexed(nesdev,
265 NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
266 }
267
268 nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
269 nes_write_indexed(nesdev, NES_IDX_PERFECT_FILTER_HIGH+
270 (nesvnic->perfect_filter_index*8), 0);
271 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_ACTIVE);
272 nic_active &= nic_active_mask;
273 nes_write_indexed(nesdev, NES_IDX_NIC_ACTIVE, nic_active);
274 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
275 nic_active &= nic_active_mask;
276 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
277 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE);
278 nic_active &= nic_active_mask;
279 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ENABLE, nic_active);
280 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
281 nic_active &= nic_active_mask;
282 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
283 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON);
284 nic_active &= nic_active_mask;
285 nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
286
287
288 if (nesvnic->of_device_registered) {
289 nes_destroy_ofa_device(nesvnic->nesibdev);
290 nesvnic->nesibdev = NULL;
291 nesvnic->of_device_registered = 0;
292 }
293 nes_destroy_nic_qp(nesvnic);
294
295 nesvnic->netdev_open = 0;
296
297 return 0;
298}
299
300
301/**
302 * nes_nic_send
303 */
304static int nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
305{
306 struct nes_vnic *nesvnic = netdev_priv(netdev);
307 struct nes_device *nesdev = nesvnic->nesdev;
308 struct nes_hw_nic *nesnic = &nesvnic->nic;
309 struct nes_hw_nic_sq_wqe *nic_sqe;
310 struct tcphdr *tcph;
311 __le16 *wqe_fragment_length;
312 u32 wqe_misc;
313 u16 wqe_fragment_index = 1; /* first fragment (0) is used by copy buffer */
314 u16 skb_fragment_index;
315 dma_addr_t bus_address;
316
317 nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
318 wqe_fragment_length = (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
319
320 /* setup the VLAN tag if present */
321 if (vlan_tx_tag_present(skb)) {
322 nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
323 netdev->name, vlan_tx_tag_get(skb));
324 wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
325 wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
326 } else
327 wqe_misc = 0;
328
329 /* bump past the vlan tag */
330 wqe_fragment_length++;
331 /* wqe_fragment_address = (u64 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX]; */
332
333 if (skb->ip_summed == CHECKSUM_PARTIAL) {
334 tcph = tcp_hdr(skb);
335 if (1) {
336 if (skb_is_gso(skb)) {
337 /* nes_debug(NES_DBG_NIC_TX, "%s: TSO request... seg size = %u\n",
338 netdev->name, skb_is_gso(skb)); */
339 wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE |
340 NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
341 set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
342 ((u32)tcph->doff) |
343 (((u32)(((unsigned char *)tcph) - skb->data)) << 4));
344 } else {
345 wqe_misc |= NES_NIC_SQ_WQE_COMPLETION;
346 }
347 }
348 } else { /* CHECKSUM_HW */
349 wqe_misc |= NES_NIC_SQ_WQE_DISABLE_CHKSUM | NES_NIC_SQ_WQE_COMPLETION;
350 }
351
352 set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
353 skb->len);
354 memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
355 skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE), skb_headlen(skb)));
356 wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
357 skb_headlen(skb)));
358 wqe_fragment_length[1] = 0;
359 if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
360 if ((skb_shinfo(skb)->nr_frags + 1) > 4) {
361 nes_debug(NES_DBG_NIC_TX, "%s: Packet with %u fragments not sent, skb_headlen=%u\n",
362 netdev->name, skb_shinfo(skb)->nr_frags + 2, skb_headlen(skb));
363 kfree_skb(skb);
364 nesvnic->tx_sw_dropped++;
365 return NETDEV_TX_LOCKED;
366 }
367 set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
368 bus_address = pci_map_single(nesdev->pcidev, skb->data + NES_FIRST_FRAG_SIZE,
369 skb_headlen(skb) - NES_FIRST_FRAG_SIZE, PCI_DMA_TODEVICE);
370 wqe_fragment_length[wqe_fragment_index++] =
371 cpu_to_le16(skb_headlen(skb) - NES_FIRST_FRAG_SIZE);
372 wqe_fragment_length[wqe_fragment_index] = 0;
373 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
374 ((u64)(bus_address)));
375 nesnic->tx_skb[nesnic->sq_head] = skb;
376 }
377
378 if (skb_headlen(skb) == skb->len) {
379 if (skb_headlen(skb) <= NES_FIRST_FRAG_SIZE) {
380 nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_2_1_IDX] = 0;
381 nesnic->tx_skb[nesnic->sq_head] = NULL;
382 dev_kfree_skb(skb);
383 }
384 } else {
385 /* Deal with Fragments */
386 nesnic->tx_skb[nesnic->sq_head] = skb;
387 for (skb_fragment_index = 0; skb_fragment_index < skb_shinfo(skb)->nr_frags;
388 skb_fragment_index++) {
389 bus_address = pci_map_page( nesdev->pcidev,
390 skb_shinfo(skb)->frags[skb_fragment_index].page,
391 skb_shinfo(skb)->frags[skb_fragment_index].page_offset,
392 skb_shinfo(skb)->frags[skb_fragment_index].size,
393 PCI_DMA_TODEVICE);
394 wqe_fragment_length[wqe_fragment_index] =
395 cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size);
396 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
397 bus_address);
398 wqe_fragment_index++;
399 if (wqe_fragment_index < 5)
400 wqe_fragment_length[wqe_fragment_index] = 0;
401 }
402 }
403
404 set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX, wqe_misc);
405 nesnic->sq_head++;
406 nesnic->sq_head &= nesnic->sq_size - 1;
407
408 return NETDEV_TX_OK;
409}
410
411
412/**
413 * nes_netdev_start_xmit
414 */
415static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
416{
417 struct nes_vnic *nesvnic = netdev_priv(netdev);
418 struct nes_device *nesdev = nesvnic->nesdev;
419 struct nes_hw_nic *nesnic = &nesvnic->nic;
420 struct nes_hw_nic_sq_wqe *nic_sqe;
421 struct tcphdr *tcph;
422 /* struct udphdr *udph; */
423#define NES_MAX_TSO_FRAGS 18
424 /* 64K segment plus overflow on each side */
425 dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
426 dma_addr_t bus_address;
427 u32 tso_frag_index;
428 u32 tso_frag_count;
429 u32 tso_wqe_length;
430 u32 curr_tcp_seq;
431 u32 wqe_count=1;
432 u32 send_rc;
433 struct iphdr *iph;
434 unsigned long flags;
435 __le16 *wqe_fragment_length;
436 u32 nr_frags;
437 u32 original_first_length;
438// u64 *wqe_fragment_address;
439 /* first fragment (0) is used by copy buffer */
440 u16 wqe_fragment_index=1;
441 u16 hoffset;
442 u16 nhoffset;
443 u16 wqes_needed;
444 u16 wqes_available;
445 u32 old_head;
446 u32 wqe_misc;
447
448 /* nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
449 " (%u frags), tso_size=%u\n",
450 netdev->name, skb->len, skb_headlen(skb),
451 skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
452 */
453
454 if (!netif_carrier_ok(netdev))
455 return NETDEV_TX_OK;
456
457 if (netif_queue_stopped(netdev))
458 return NETDEV_TX_BUSY;
459
460 local_irq_save(flags);
461 if (!spin_trylock(&nesnic->sq_lock)) {
462 local_irq_restore(flags);
463 nesvnic->sq_locked++;
464 return NETDEV_TX_LOCKED;
465 }
466
467 /* Check if SQ is full */
468 if ((((nesnic->sq_tail+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) == 1) {
469 if (!netif_queue_stopped(netdev)) {
470 netif_stop_queue(netdev);
471 barrier();
472 if ((((((volatile u16)nesnic->sq_tail)+(nesnic->sq_size*2))-nesnic->sq_head) & (nesnic->sq_size - 1)) != 1) {
473 netif_start_queue(netdev);
474 goto sq_no_longer_full;
475 }
476 }
477 nesvnic->sq_full++;
478 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
479 return NETDEV_TX_BUSY;
480 }
481
482sq_no_longer_full:
483 nr_frags = skb_shinfo(skb)->nr_frags;
484 if (skb_headlen(skb) > NES_FIRST_FRAG_SIZE) {
485 nr_frags++;
486 }
487 /* Check if too many fragments */
488 if (unlikely((nr_frags > 4))) {
489 if (skb_is_gso(skb)) {
490 nesvnic->segmented_tso_requests++;
491 nesvnic->tso_requests++;
492 old_head = nesnic->sq_head;
493 /* Basically 4 fragments available per WQE with extended fragments */
494 wqes_needed = nr_frags >> 2;
495 wqes_needed += (nr_frags&3)?1:0;
496 wqes_available = (((nesnic->sq_tail+nesnic->sq_size)-nesnic->sq_head) - 1) &
497 (nesnic->sq_size - 1);
498
499 if (unlikely(wqes_needed > wqes_available)) {
500 if (!netif_queue_stopped(netdev)) {
501 netif_stop_queue(netdev);
502 barrier();
503 wqes_available = (((((volatile u16)nesnic->sq_tail)+nesnic->sq_size)-nesnic->sq_head) - 1) &
504 (nesnic->sq_size - 1);
505 if (wqes_needed <= wqes_available) {
506 netif_start_queue(netdev);
507 goto tso_sq_no_longer_full;
508 }
509 }
510 nesvnic->sq_full++;
511 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
512 nes_debug(NES_DBG_NIC_TX, "%s: HNIC SQ full- TSO request has too many frags!\n",
513 netdev->name);
514 return NETDEV_TX_BUSY;
515 }
516tso_sq_no_longer_full:
517 /* Map all the buffers */
518 for (tso_frag_count=0; tso_frag_count < skb_shinfo(skb)->nr_frags;
519 tso_frag_count++) {
520 tso_bus_address[tso_frag_count] = pci_map_page( nesdev->pcidev,
521 skb_shinfo(skb)->frags[tso_frag_count].page,
522 skb_shinfo(skb)->frags[tso_frag_count].page_offset,
523 skb_shinfo(skb)->frags[tso_frag_count].size,
524 PCI_DMA_TODEVICE);
525 }
526
527 tso_frag_index = 0;
528 curr_tcp_seq = ntohl(tcp_hdr(skb)->seq);
529 hoffset = skb_transport_header(skb) - skb->data;
530 nhoffset = skb_network_header(skb) - skb->data;
531 original_first_length = hoffset + ((((struct tcphdr *)skb_transport_header(skb))->doff)<<2);
532
533 for (wqe_count=0; wqe_count<((u32)wqes_needed); wqe_count++) {
534 tso_wqe_length = 0;
535 nic_sqe = &nesnic->sq_vbase[nesnic->sq_head];
536 wqe_fragment_length =
537 (__le16 *)&nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
538 /* setup the VLAN tag if present */
539 if (vlan_tx_tag_present(skb)) {
540 nes_debug(NES_DBG_NIC_TX, "%s: VLAN packet to send... VLAN = %08X\n",
541 netdev->name, vlan_tx_tag_get(skb) );
542 wqe_misc = NES_NIC_SQ_WQE_TAGVALUE_ENABLE;
543 wqe_fragment_length[0] = (__force __le16) vlan_tx_tag_get(skb);
544 } else
545 wqe_misc = 0;
546
547 /* bump past the vlan tag */
548 wqe_fragment_length++;
549
550 /* Assumes header totally fits in allocated buffer and is in first fragment */
551 if (original_first_length > NES_FIRST_FRAG_SIZE) {
552 nes_debug(NES_DBG_NIC_TX, "ERROR: SKB header too big, headlen=%u, FIRST_FRAG_SIZE=%u\n",
553 original_first_length, NES_FIRST_FRAG_SIZE);
554 nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
555 " (%u frags), tso_size=%u\n",
556 netdev->name,
557 skb->len, skb_headlen(skb),
558 skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
559 }
560 memcpy(&nesnic->first_frag_vbase[nesnic->sq_head].buffer,
561 skb->data, min(((unsigned int)NES_FIRST_FRAG_SIZE),
562 original_first_length));
563 iph = (struct iphdr *)
564 (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[nhoffset]);
565 tcph = (struct tcphdr *)
566 (&nesnic->first_frag_vbase[nesnic->sq_head].buffer[hoffset]);
567 if ((wqe_count+1)!=(u32)wqes_needed) {
568 tcph->fin = 0;
569 tcph->psh = 0;
570 tcph->rst = 0;
571 tcph->urg = 0;
572 }
573 if (wqe_count) {
574 tcph->syn = 0;
575 }
576 tcph->seq = htonl(curr_tcp_seq);
577 wqe_fragment_length[0] = cpu_to_le16(min(((unsigned int)NES_FIRST_FRAG_SIZE),
578 original_first_length));
579
580 wqe_fragment_index = 1;
581 if ((wqe_count==0) && (skb_headlen(skb) > original_first_length)) {
582 set_bit(nesnic->sq_head, nesnic->first_frag_overflow);
583 bus_address = pci_map_single(nesdev->pcidev, skb->data + original_first_length,
584 skb_headlen(skb) - original_first_length, PCI_DMA_TODEVICE);
585 wqe_fragment_length[wqe_fragment_index++] =
586 cpu_to_le16(skb_headlen(skb) - original_first_length);
587 wqe_fragment_length[wqe_fragment_index] = 0;
588 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
589 bus_address);
590 }
591 while (wqe_fragment_index < 5) {
592 wqe_fragment_length[wqe_fragment_index] =
593 cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size);
594 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index),
595 (u64)tso_bus_address[tso_frag_index]);
596 wqe_fragment_index++;
597 tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size;
598 if (wqe_fragment_index < 5)
599 wqe_fragment_length[wqe_fragment_index] = 0;
600 if (tso_frag_index == tso_frag_count)
601 break;
602 }
603 if ((wqe_count+1) == (u32)wqes_needed) {
604 nesnic->tx_skb[nesnic->sq_head] = skb;
605 } else {
606 nesnic->tx_skb[nesnic->sq_head] = NULL;
607 }
608 wqe_misc |= NES_NIC_SQ_WQE_COMPLETION | (u16)skb_is_gso(skb);
609 if ((tso_wqe_length + original_first_length) > skb_is_gso(skb)) {
610 wqe_misc |= NES_NIC_SQ_WQE_LSO_ENABLE;
611 } else {
612 iph->tot_len = htons(tso_wqe_length + original_first_length - nhoffset);
613 }
614
615 set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_MISC_IDX,
616 wqe_misc);
617 set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_LSO_INFO_IDX,
618 ((u32)tcph->doff) | (((u32)hoffset) << 4));
619
620 set_wqe_32bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_TOTAL_LENGTH_IDX,
621 tso_wqe_length + original_first_length);
622 curr_tcp_seq += tso_wqe_length;
623 nesnic->sq_head++;
624 nesnic->sq_head &= nesnic->sq_size-1;
625 }
626 } else {
627 nesvnic->linearized_skbs++;
628 hoffset = skb_transport_header(skb) - skb->data;
629 nhoffset = skb_network_header(skb) - skb->data;
630 skb_linearize(skb);
631 skb_set_transport_header(skb, hoffset);
632 skb_set_network_header(skb, nhoffset);
633 send_rc = nes_nic_send(skb, netdev);
634 if (send_rc != NETDEV_TX_OK) {
635 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
636 return NETDEV_TX_OK;
637 }
638 }
639 } else {
640 send_rc = nes_nic_send(skb, netdev);
641 if (send_rc != NETDEV_TX_OK) {
642 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
643 return NETDEV_TX_OK;
644 }
645 }
646
647 barrier();
648
649 if (wqe_count)
650 nes_write32(nesdev->regs+NES_WQE_ALLOC,
651 (wqe_count << 24) | (1 << 23) | nesvnic->nic.qp_id);
652
653 netdev->trans_start = jiffies;
654 spin_unlock_irqrestore(&nesnic->sq_lock, flags);
655
656 return NETDEV_TX_OK;
657}
658
659
660/**
661 * nes_netdev_get_stats
662 */
663static struct net_device_stats *nes_netdev_get_stats(struct net_device *netdev)
664{
665 struct nes_vnic *nesvnic = netdev_priv(netdev);
666 struct nes_device *nesdev = nesvnic->nesdev;
667 u64 u64temp;
668 u32 u32temp;
669
670 u32temp = nes_read_indexed(nesdev,
671 NES_IDX_ENDNODE0_NSTAT_RX_DISCARD + (nesvnic->nic_index*0x200));
672 nesvnic->netstats.rx_dropped += u32temp;
673 nesvnic->endnode_nstat_rx_discard += u32temp;
674
675 u64temp = (u64)nes_read_indexed(nesdev,
676 NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO + (nesvnic->nic_index*0x200));
677 u64temp += ((u64)nes_read_indexed(nesdev,
678 NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
679
680 nesvnic->endnode_nstat_rx_octets += u64temp;
681 nesvnic->netstats.rx_bytes += u64temp;
682
683 u64temp = (u64)nes_read_indexed(nesdev,
684 NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO + (nesvnic->nic_index*0x200));
685 u64temp += ((u64)nes_read_indexed(nesdev,
686 NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
687
688 nesvnic->endnode_nstat_rx_frames += u64temp;
689 nesvnic->netstats.rx_packets += u64temp;
690
691 u64temp = (u64)nes_read_indexed(nesdev,
692 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO + (nesvnic->nic_index*0x200));
693 u64temp += ((u64)nes_read_indexed(nesdev,
694 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI + (nesvnic->nic_index*0x200))) << 32;
695
696 nesvnic->endnode_nstat_tx_octets += u64temp;
697 nesvnic->netstats.tx_bytes += u64temp;
698
699 u64temp = (u64)nes_read_indexed(nesdev,
700 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO + (nesvnic->nic_index*0x200));
701 u64temp += ((u64)nes_read_indexed(nesdev,
702 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI + (nesvnic->nic_index*0x200))) << 32;
703
704 nesvnic->endnode_nstat_tx_frames += u64temp;
705 nesvnic->netstats.tx_packets += u64temp;
706
707 u32temp = nes_read_indexed(nesdev,
708 NES_IDX_MAC_RX_SHORT_FRAMES + (nesvnic->nesdev->mac_index*0x200));
709 nesvnic->netstats.rx_dropped += u32temp;
710 nesvnic->nesdev->mac_rx_errors += u32temp;
711 nesvnic->nesdev->mac_rx_short_frames += u32temp;
712
713 u32temp = nes_read_indexed(nesdev,
714 NES_IDX_MAC_RX_OVERSIZED_FRAMES + (nesvnic->nesdev->mac_index*0x200));
715 nesvnic->netstats.rx_dropped += u32temp;
716 nesvnic->nesdev->mac_rx_errors += u32temp;
717 nesvnic->nesdev->mac_rx_oversized_frames += u32temp;
718
719 u32temp = nes_read_indexed(nesdev,
720 NES_IDX_MAC_RX_JABBER_FRAMES + (nesvnic->nesdev->mac_index*0x200));
721 nesvnic->netstats.rx_dropped += u32temp;
722 nesvnic->nesdev->mac_rx_errors += u32temp;
723 nesvnic->nesdev->mac_rx_jabber_frames += u32temp;
724
725 u32temp = nes_read_indexed(nesdev,
726 NES_IDX_MAC_RX_SYMBOL_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
727 nesvnic->netstats.rx_dropped += u32temp;
728 nesvnic->nesdev->mac_rx_errors += u32temp;
729 nesvnic->nesdev->mac_rx_symbol_err_frames += u32temp;
730
731 u32temp = nes_read_indexed(nesdev,
732 NES_IDX_MAC_RX_LENGTH_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
733 nesvnic->netstats.rx_length_errors += u32temp;
734 nesvnic->nesdev->mac_rx_errors += u32temp;
735
736 u32temp = nes_read_indexed(nesdev,
737 NES_IDX_MAC_RX_CRC_ERR_FRAMES + (nesvnic->nesdev->mac_index*0x200));
738 nesvnic->nesdev->mac_rx_errors += u32temp;
739 nesvnic->nesdev->mac_rx_crc_errors += u32temp;
740 nesvnic->netstats.rx_crc_errors += u32temp;
741
742 u32temp = nes_read_indexed(nesdev,
743 NES_IDX_MAC_TX_ERRORS + (nesvnic->nesdev->mac_index*0x200));
744 nesvnic->nesdev->mac_tx_errors += u32temp;
745 nesvnic->netstats.tx_errors += u32temp;
746
747 return &nesvnic->netstats;
748}
749
750
751/**
752 * nes_netdev_tx_timeout
753 */
754static void nes_netdev_tx_timeout(struct net_device *netdev)
755{
756 struct nes_vnic *nesvnic = netdev_priv(netdev);
757
758 if (netif_msg_timer(nesvnic))
759 nes_debug(NES_DBG_NIC_TX, "%s: tx timeout\n", netdev->name);
760}
761
762
763/**
764 * nes_netdev_set_mac_address
765 */
766static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
767{
768 struct nes_vnic *nesvnic = netdev_priv(netdev);
769 struct nes_device *nesdev = nesvnic->nesdev;
770 struct sockaddr *mac_addr = p;
771 int i;
772 u32 macaddr_low;
773 u16 macaddr_high;
774
775 if (!is_valid_ether_addr(mac_addr->sa_data))
776 return -EADDRNOTAVAIL;
777
778 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
779 printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
780 __FUNCTION__, netdev->addr_len,
781 mac_addr->sa_data[0], mac_addr->sa_data[1],
782 mac_addr->sa_data[2], mac_addr->sa_data[3],
783 mac_addr->sa_data[4], mac_addr->sa_data[5]);
784 macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
785 macaddr_high += (u16)netdev->dev_addr[1];
786 macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
787 macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
788 macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
789 macaddr_low += (u32)netdev->dev_addr[5];
790
791 for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
792 if (nesvnic->qp_nic_index[i] == 0xf) {
793 break;
794 }
795 nes_write_indexed(nesdev,
796 NES_IDX_PERFECT_FILTER_LOW + (nesvnic->qp_nic_index[i] * 8),
797 macaddr_low);
798 nes_write_indexed(nesdev,
799 NES_IDX_PERFECT_FILTER_HIGH + (nesvnic->qp_nic_index[i] * 8),
800 (u32)macaddr_high | NES_MAC_ADDR_VALID |
801 ((((u32)nesvnic->nic_index) << 16)));
802 }
803 return 0;
804}
805
806
807/**
808 * nes_netdev_set_multicast_list
809 */
810void nes_netdev_set_multicast_list(struct net_device *netdev)
811{
812 struct nes_vnic *nesvnic = netdev_priv(netdev);
813 struct nes_device *nesdev = nesvnic->nesdev;
814 struct dev_mc_list *multicast_addr;
815 u32 nic_active_bit;
816 u32 nic_active;
817 u32 perfect_filter_register_address;
818 u32 macaddr_low;
819 u16 macaddr_high;
820 u8 mc_all_on = 0;
821 u8 mc_index;
822 int mc_nic_index = -1;
823
824 nic_active_bit = 1 << nesvnic->nic_index;
825
826 if (netdev->flags & IFF_PROMISC) {
827 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
828 nic_active |= nic_active_bit;
829 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
830 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
831 nic_active |= nic_active_bit;
832 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
833 mc_all_on = 1;
834 } else if ((netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > NES_MULTICAST_PF_MAX) ||
835 (nesvnic->nic_index > 3)) {
836 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
837 nic_active |= nic_active_bit;
838 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
839 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
840 nic_active &= ~nic_active_bit;
841 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
842 mc_all_on = 1;
843 } else {
844 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
845 nic_active &= ~nic_active_bit;
846 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
847 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
848 nic_active &= ~nic_active_bit;
849 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
850 }
851
852 nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
853 netdev->mc_count, (netdev->flags & IFF_PROMISC)?1:0,
854 (netdev->flags & IFF_ALLMULTI)?1:0);
855 if (!mc_all_on) {
856 multicast_addr = netdev->mc_list;
857 perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 0x80;
858 perfect_filter_register_address += nesvnic->nic_index*0x40;
859 for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) {
860 while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0))
861 multicast_addr = multicast_addr->next;
862
863 if (mc_nic_index < 0)
864 mc_nic_index = nesvnic->nic_index;
865 if (multicast_addr) {
866 nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n",
867 multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1],
868 multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3],
869 multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5],
870 perfect_filter_register_address+(mc_index * 8), mc_nic_index);
871 macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
872 macaddr_high += (u16)multicast_addr->dmi_addr[1];
873 macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
874 macaddr_low += ((u32)multicast_addr->dmi_addr[3]) << 16;
875 macaddr_low += ((u32)multicast_addr->dmi_addr[4]) << 8;
876 macaddr_low += (u32)multicast_addr->dmi_addr[5];
877 nes_write_indexed(nesdev,
878 perfect_filter_register_address+(mc_index * 8),
879 macaddr_low);
880 nes_write_indexed(nesdev,
881 perfect_filter_register_address+4+(mc_index * 8),
882 (u32)macaddr_high | NES_MAC_ADDR_VALID |
883 ((((u32)(1<<mc_nic_index)) << 16)));
884 multicast_addr = multicast_addr->next;
885 } else {
886 nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
887 perfect_filter_register_address+(mc_index * 8));
888 nes_write_indexed(nesdev,
889 perfect_filter_register_address+4+(mc_index * 8),
890 0);
891 }
892 }
893 }
894}
895
896
897/**
898 * nes_netdev_change_mtu
899 */
900static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
901{
902 struct nes_vnic *nesvnic = netdev_priv(netdev);
903 struct nes_device *nesdev = nesvnic->nesdev;
904 int ret = 0;
905 u8 jumbomode=0;
906
907 if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
908 return -EINVAL;
909
910 netdev->mtu = new_mtu;
911 nesvnic->max_frame_size = new_mtu+ETH_HLEN;
912
913 if (netdev->mtu > 1500) {
914 jumbomode=1;
915 }
916 nes_nic_init_timer_defaults(nesdev, jumbomode);
917
918 if (netif_running(netdev)) {
919 nes_netdev_stop(netdev);
920 nes_netdev_open(netdev);
921 }
922
923 return ret;
924}
925
926
927/**
928 * nes_netdev_exit - destroy network device
929 */
930void nes_netdev_exit(struct nes_vnic *nesvnic)
931{
932 struct net_device *netdev = nesvnic->netdev;
933 struct nes_ib_device *nesibdev = nesvnic->nesibdev;
934
935 nes_debug(NES_DBG_SHUTDOWN, "\n");
936
937 // destroy the ibdevice if RDMA enabled
938 if ((nesvnic->rdma_enabled)&&(nesvnic->of_device_registered)) {
939 nes_destroy_ofa_device( nesibdev );
940 nesvnic->of_device_registered = 0;
941 nesvnic->nesibdev = NULL;
942 }
943 unregister_netdev(netdev);
944 nes_debug(NES_DBG_SHUTDOWN, "\n");
945}
946
947
948#define NES_ETHTOOL_STAT_COUNT 55
949static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = {
950 "Link Change Interrupts",
951 "Linearized SKBs",
952 "T/GSO Requests",
953 "Pause Frames Sent",
954 "Pause Frames Received",
955 "Internal Routing Errors",
956 "SQ SW Dropped SKBs",
957 "SQ Locked",
958 "SQ Full",
959 "Segmented TSO Requests",
960 "Rx Symbol Errors",
961 "Rx Jabber Errors",
962 "Rx Oversized Frames",
963 "Rx Short Frames",
964 "Endnode Rx Discards",
965 "Endnode Rx Octets",
966 "Endnode Rx Frames",
967 "Endnode Tx Octets",
968 "Endnode Tx Frames",
969 "mh detected",
970 "mh pauses",
971 "Retransmission Count",
972 "CM Connects",
973 "CM Accepts",
974 "Disconnects",
975 "Connected Events",
976 "Connect Requests",
977 "CM Rejects",
978 "ModifyQP Timeouts",
979 "CreateQPs",
980 "SW DestroyQPs",
981 "DestroyQPs",
982 "CM Closes",
983 "CM Packets Sent",
984 "CM Packets Bounced",
985 "CM Packets Created",
986 "CM Packets Rcvd",
987 "CM Packets Dropped",
988 "CM Packets Retrans",
989 "CM Listens Created",
990 "CM Listens Destroyed",
991 "CM Backlog Drops",
992 "CM Loopbacks",
993 "CM Nodes Created",
994 "CM Nodes Destroyed",
995 "CM Accel Drops",
996 "CM Resets Received",
997 "Timer Inits",
998 "CQ Depth 1",
999 "CQ Depth 4",
1000 "CQ Depth 16",
1001 "CQ Depth 24",
1002 "CQ Depth 32",
1003 "CQ Depth 128",
1004 "CQ Depth 256",
1005};
1006
1007
1008/**
1009 * nes_netdev_get_rx_csum
1010 */
1011static u32 nes_netdev_get_rx_csum (struct net_device *netdev)
1012{
1013 struct nes_vnic *nesvnic = netdev_priv(netdev);
1014
1015 if (nesvnic->rx_checksum_disabled)
1016 return 0;
1017 else
1018 return 1;
1019}
1020
1021
1022/**
1023 * nes_netdev_set_rc_csum
1024 */
1025static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable)
1026{
1027 struct nes_vnic *nesvnic = netdev_priv(netdev);
1028
1029 if (enable)
1030 nesvnic->rx_checksum_disabled = 0;
1031 else
1032 nesvnic->rx_checksum_disabled = 1;
1033 return 0;
1034}
1035
1036
1037/**
1038 * nes_netdev_get_stats_count
1039 */
1040static int nes_netdev_get_stats_count(struct net_device *netdev)
1041{
1042 return NES_ETHTOOL_STAT_COUNT;
1043}
1044
1045
1046/**
1047 * nes_netdev_get_strings
1048 */
1049static void nes_netdev_get_strings(struct net_device *netdev, u32 stringset,
1050 u8 *ethtool_strings)
1051{
1052 if (stringset == ETH_SS_STATS)
1053 memcpy(ethtool_strings,
1054 &nes_ethtool_stringset,
1055 sizeof(nes_ethtool_stringset));
1056}
1057
1058
1059/**
1060 * nes_netdev_get_ethtool_stats
1061 */
1062static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
1063 struct ethtool_stats *target_ethtool_stats, u64 *target_stat_values)
1064{
1065 u64 u64temp;
1066 struct nes_vnic *nesvnic = netdev_priv(netdev);
1067 struct nes_device *nesdev = nesvnic->nesdev;
1068 u32 nic_count;
1069 u32 u32temp;
1070
1071 target_ethtool_stats->n_stats = NES_ETHTOOL_STAT_COUNT;
1072 target_stat_values[0] = nesvnic->nesdev->link_status_interrupts;
1073 target_stat_values[1] = nesvnic->linearized_skbs;
1074 target_stat_values[2] = nesvnic->tso_requests;
1075
1076 u32temp = nes_read_indexed(nesdev,
1077 NES_IDX_MAC_TX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1078 nesvnic->nesdev->mac_pause_frames_sent += u32temp;
1079 target_stat_values[3] = nesvnic->nesdev->mac_pause_frames_sent;
1080
1081 u32temp = nes_read_indexed(nesdev,
1082 NES_IDX_MAC_RX_PAUSE_FRAMES + (nesvnic->nesdev->mac_index*0x200));
1083 nesvnic->nesdev->mac_pause_frames_received += u32temp;
1084
1085 u32temp = nes_read_indexed(nesdev,
1086 NES_IDX_PORT_RX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
1087 nesvnic->nesdev->port_rx_discards += u32temp;
1088 nesvnic->netstats.rx_dropped += u32temp;
1089
1090 u32temp = nes_read_indexed(nesdev,
1091 NES_IDX_PORT_TX_DISCARDS + (nesvnic->nesdev->mac_index*0x40));
1092 nesvnic->nesdev->port_tx_discards += u32temp;
1093 nesvnic->netstats.tx_dropped += u32temp;
1094
1095 for (nic_count = 0; nic_count < NES_MAX_PORT_COUNT; nic_count++) {
1096 if (nesvnic->qp_nic_index[nic_count] == 0xf)
1097 break;
1098
1099 u32temp = nes_read_indexed(nesdev,
1100 NES_IDX_ENDNODE0_NSTAT_RX_DISCARD +
1101 (nesvnic->qp_nic_index[nic_count]*0x200));
1102 nesvnic->netstats.rx_dropped += u32temp;
1103 nesvnic->endnode_nstat_rx_discard += u32temp;
1104
1105 u64temp = (u64)nes_read_indexed(nesdev,
1106 NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_LO +
1107 (nesvnic->qp_nic_index[nic_count]*0x200));
1108 u64temp += ((u64)nes_read_indexed(nesdev,
1109 NES_IDX_ENDNODE0_NSTAT_RX_OCTETS_HI +
1110 (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
1111
1112 nesvnic->endnode_nstat_rx_octets += u64temp;
1113 nesvnic->netstats.rx_bytes += u64temp;
1114
1115 u64temp = (u64)nes_read_indexed(nesdev,
1116 NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_LO +
1117 (nesvnic->qp_nic_index[nic_count]*0x200));
1118 u64temp += ((u64)nes_read_indexed(nesdev,
1119 NES_IDX_ENDNODE0_NSTAT_RX_FRAMES_HI +
1120 (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
1121
1122 nesvnic->endnode_nstat_rx_frames += u64temp;
1123 nesvnic->netstats.rx_packets += u64temp;
1124
1125 u64temp = (u64)nes_read_indexed(nesdev,
1126 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_LO +
1127 (nesvnic->qp_nic_index[nic_count]*0x200));
1128 u64temp += ((u64)nes_read_indexed(nesdev,
1129 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI +
1130 (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
1131
1132 nesvnic->endnode_nstat_tx_octets += u64temp;
1133 nesvnic->netstats.tx_bytes += u64temp;
1134
1135 u64temp = (u64)nes_read_indexed(nesdev,
1136 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO +
1137 (nesvnic->qp_nic_index[nic_count]*0x200));
1138 u64temp += ((u64)nes_read_indexed(nesdev,
1139 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI +
1140 (nesvnic->qp_nic_index[nic_count]*0x200))) << 32;
1141
1142 nesvnic->endnode_nstat_tx_frames += u64temp;
1143 nesvnic->netstats.tx_packets += u64temp;
1144
1145 u32temp = nes_read_indexed(nesdev,
1146 NES_IDX_IPV4_TCP_REXMITS + (nesvnic->qp_nic_index[nic_count]*0x200));
1147 nesvnic->endnode_ipv4_tcp_retransmits += u32temp;
1148 }
1149
1150 target_stat_values[4] = nesvnic->nesdev->mac_pause_frames_received;
1151 target_stat_values[5] = nesdev->nesadapter->nic_rx_eth_route_err;
1152 target_stat_values[6] = nesvnic->tx_sw_dropped;
1153 target_stat_values[7] = nesvnic->sq_locked;
1154 target_stat_values[8] = nesvnic->sq_full;
1155 target_stat_values[9] = nesvnic->segmented_tso_requests;
1156 target_stat_values[10] = nesvnic->nesdev->mac_rx_symbol_err_frames;
1157 target_stat_values[11] = nesvnic->nesdev->mac_rx_jabber_frames;
1158 target_stat_values[12] = nesvnic->nesdev->mac_rx_oversized_frames;
1159 target_stat_values[13] = nesvnic->nesdev->mac_rx_short_frames;
1160 target_stat_values[14] = nesvnic->endnode_nstat_rx_discard;
1161 target_stat_values[15] = nesvnic->endnode_nstat_rx_octets;
1162 target_stat_values[16] = nesvnic->endnode_nstat_rx_frames;
1163 target_stat_values[17] = nesvnic->endnode_nstat_tx_octets;
1164 target_stat_values[18] = nesvnic->endnode_nstat_tx_frames;
1165 target_stat_values[19] = mh_detected;
1166 target_stat_values[20] = mh_pauses_sent;
1167 target_stat_values[21] = nesvnic->endnode_ipv4_tcp_retransmits;
1168 target_stat_values[22] = atomic_read(&cm_connects);
1169 target_stat_values[23] = atomic_read(&cm_accepts);
1170 target_stat_values[24] = atomic_read(&cm_disconnects);
1171 target_stat_values[25] = atomic_read(&cm_connecteds);
1172 target_stat_values[26] = atomic_read(&cm_connect_reqs);
1173 target_stat_values[27] = atomic_read(&cm_rejects);
1174 target_stat_values[28] = atomic_read(&mod_qp_timouts);
1175 target_stat_values[29] = atomic_read(&qps_created);
1176 target_stat_values[30] = atomic_read(&sw_qps_destroyed);
1177 target_stat_values[31] = atomic_read(&qps_destroyed);
1178 target_stat_values[32] = atomic_read(&cm_closes);
1179 target_stat_values[33] = cm_packets_sent;
1180 target_stat_values[34] = cm_packets_bounced;
1181 target_stat_values[35] = cm_packets_created;
1182 target_stat_values[36] = cm_packets_received;
1183 target_stat_values[37] = cm_packets_dropped;
1184 target_stat_values[38] = cm_packets_retrans;
1185 target_stat_values[39] = cm_listens_created;
1186 target_stat_values[40] = cm_listens_destroyed;
1187 target_stat_values[41] = cm_backlog_drops;
1188 target_stat_values[42] = atomic_read(&cm_loopbacks);
1189 target_stat_values[43] = atomic_read(&cm_nodes_created);
1190 target_stat_values[44] = atomic_read(&cm_nodes_destroyed);
1191 target_stat_values[45] = atomic_read(&cm_accel_dropped_pkts);
1192 target_stat_values[46] = atomic_read(&cm_resets_recvd);
1193 target_stat_values[47] = int_mod_timer_init;
1194 target_stat_values[48] = int_mod_cq_depth_1;
1195 target_stat_values[49] = int_mod_cq_depth_4;
1196 target_stat_values[50] = int_mod_cq_depth_16;
1197 target_stat_values[51] = int_mod_cq_depth_24;
1198 target_stat_values[52] = int_mod_cq_depth_32;
1199 target_stat_values[53] = int_mod_cq_depth_128;
1200 target_stat_values[54] = int_mod_cq_depth_256;
1201
1202}
1203
1204
1205/**
1206 * nes_netdev_get_drvinfo
1207 */
1208static void nes_netdev_get_drvinfo(struct net_device *netdev,
1209 struct ethtool_drvinfo *drvinfo)
1210{
1211 struct nes_vnic *nesvnic = netdev_priv(netdev);
1212
1213 strcpy(drvinfo->driver, DRV_NAME);
1214 strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
1215 strcpy(drvinfo->fw_version, "TBD");
1216 strcpy(drvinfo->version, DRV_VERSION);
1217 drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
1218 drvinfo->testinfo_len = 0;
1219 drvinfo->eedump_len = 0;
1220 drvinfo->regdump_len = 0;
1221}
1222
1223
1224/**
1225 * nes_netdev_set_coalesce
1226 */
1227static int nes_netdev_set_coalesce(struct net_device *netdev,
1228 struct ethtool_coalesce *et_coalesce)
1229{
1230 struct nes_vnic *nesvnic = netdev_priv(netdev);
1231 struct nes_device *nesdev = nesvnic->nesdev;
1232 struct nes_adapter *nesadapter = nesdev->nesadapter;
1233 struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
1234 unsigned long flags;
1235
1236 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
1237 if (et_coalesce->rx_max_coalesced_frames_low) {
1238 shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low;
1239 }
1240 if (et_coalesce->rx_max_coalesced_frames_irq) {
1241 shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq;
1242 }
1243 if (et_coalesce->rx_max_coalesced_frames_high) {
1244 shared_timer->threshold_high = et_coalesce->rx_max_coalesced_frames_high;
1245 }
1246 if (et_coalesce->rx_coalesce_usecs_low) {
1247 shared_timer->timer_in_use_min = et_coalesce->rx_coalesce_usecs_low;
1248 }
1249 if (et_coalesce->rx_coalesce_usecs_high) {
1250 shared_timer->timer_in_use_max = et_coalesce->rx_coalesce_usecs_high;
1251 }
1252 spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
1253
1254 /* using this to drive total interrupt moderation */
1255 nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq;
1256 if (et_coalesce->use_adaptive_rx_coalesce) {
1257 nesadapter->et_use_adaptive_rx_coalesce = 1;
1258 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
1259 nesadapter->et_rx_coalesce_usecs_irq = 0;
1260 if (et_coalesce->pkt_rate_low) {
1261 nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low;
1262 }
1263 } else {
1264 nesadapter->et_use_adaptive_rx_coalesce = 0;
1265 nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
1266 if (nesadapter->et_rx_coalesce_usecs_irq) {
1267 nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
1268 0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8)));
1269 }
1270 }
1271 return 0;
1272}
1273
1274
1275/**
1276 * nes_netdev_get_coalesce
1277 */
1278static int nes_netdev_get_coalesce(struct net_device *netdev,
1279 struct ethtool_coalesce *et_coalesce)
1280{
1281 struct nes_vnic *nesvnic = netdev_priv(netdev);
1282 struct nes_device *nesdev = nesvnic->nesdev;
1283 struct nes_adapter *nesadapter = nesdev->nesadapter;
1284 struct ethtool_coalesce temp_et_coalesce;
1285 struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
1286 unsigned long flags;
1287
1288 memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce));
1289 temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq;
1290 temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce;
1291 temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval;
1292 temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low;
1293 spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
1294 temp_et_coalesce.rx_max_coalesced_frames_low = shared_timer->threshold_low;
1295 temp_et_coalesce.rx_max_coalesced_frames_irq = shared_timer->threshold_target;
1296 temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high;
1297 temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min;
1298 temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max;
1299 if (nesadapter->et_use_adaptive_rx_coalesce) {
1300 temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use;
1301 }
1302 spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
1303 memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce));
1304 return 0;
1305}
1306
1307
1308/**
1309 * nes_netdev_get_pauseparam
1310 */
1311static void nes_netdev_get_pauseparam(struct net_device *netdev,
1312 struct ethtool_pauseparam *et_pauseparam)
1313{
1314 struct nes_vnic *nesvnic = netdev_priv(netdev);
1315
1316 et_pauseparam->autoneg = 0;
1317 et_pauseparam->rx_pause = (nesvnic->nesdev->disable_rx_flow_control == 0) ? 1:0;
1318 et_pauseparam->tx_pause = (nesvnic->nesdev->disable_tx_flow_control == 0) ? 1:0;
1319}
1320
1321
1322/**
1323 * nes_netdev_set_pauseparam
1324 */
1325static int nes_netdev_set_pauseparam(struct net_device *netdev,
1326 struct ethtool_pauseparam *et_pauseparam)
1327{
1328 struct nes_vnic *nesvnic = netdev_priv(netdev);
1329 struct nes_device *nesdev = nesvnic->nesdev;
1330 u32 u32temp;
1331
1332 if (et_pauseparam->autoneg) {
1333 /* TODO: should return unsupported */
1334 return 0;
1335 }
1336 if ((et_pauseparam->tx_pause == 1) && (nesdev->disable_tx_flow_control == 1)) {
1337 u32temp = nes_read_indexed(nesdev,
1338 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1339 u32temp |= NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1340 nes_write_indexed(nesdev,
1341 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
1342 nesdev->disable_tx_flow_control = 0;
1343 } else if ((et_pauseparam->tx_pause == 0) && (nesdev->disable_tx_flow_control == 0)) {
1344 u32temp = nes_read_indexed(nesdev,
1345 NES_IDX_MAC_TX_CONFIG + (nesdev->mac_index*0x200));
1346 u32temp &= ~NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE;
1347 nes_write_indexed(nesdev,
1348 NES_IDX_MAC_TX_CONFIG_ENABLE_PAUSE + (nesdev->mac_index*0x200), u32temp);
1349 nesdev->disable_tx_flow_control = 1;
1350 }
1351 if ((et_pauseparam->rx_pause == 1) && (nesdev->disable_rx_flow_control == 1)) {
1352 u32temp = nes_read_indexed(nesdev,
1353 NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
1354 u32temp &= ~NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
1355 nes_write_indexed(nesdev,
1356 NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
1357 nesdev->disable_rx_flow_control = 0;
1358 } else if ((et_pauseparam->rx_pause == 0) && (nesdev->disable_rx_flow_control == 0)) {
1359 u32temp = nes_read_indexed(nesdev,
1360 NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40));
1361 u32temp |= NES_IDX_MPP_DEBUG_PORT_DISABLE_PAUSE;
1362 nes_write_indexed(nesdev,
1363 NES_IDX_MPP_DEBUG + (nesdev->mac_index*0x40), u32temp);
1364 nesdev->disable_rx_flow_control = 1;
1365 }
1366
1367 return 0;
1368}
1369
1370
1371/**
1372 * nes_netdev_get_settings
1373 */
1374static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
1375{
1376 struct nes_vnic *nesvnic = netdev_priv(netdev);
1377 struct nes_device *nesdev = nesvnic->nesdev;
1378 struct nes_adapter *nesadapter = nesdev->nesadapter;
1379 u16 phy_data;
1380
1381 et_cmd->duplex = DUPLEX_FULL;
1382 et_cmd->port = PORT_MII;
1383 if (nesadapter->OneG_Mode) {
1384 et_cmd->supported = SUPPORTED_1000baseT_Full|SUPPORTED_Autoneg;
1385 et_cmd->advertising = ADVERTISED_1000baseT_Full|ADVERTISED_Autoneg;
1386 et_cmd->speed = SPEED_1000;
1387 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
1388 &phy_data);
1389 if (phy_data&0x1000) {
1390 et_cmd->autoneg = AUTONEG_ENABLE;
1391 } else {
1392 et_cmd->autoneg = AUTONEG_DISABLE;
1393 }
1394 et_cmd->transceiver = XCVR_EXTERNAL;
1395 et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
1396 } else {
1397 if (nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
1398 et_cmd->transceiver = XCVR_EXTERNAL;
1399 et_cmd->port = PORT_FIBRE;
1400 et_cmd->supported = SUPPORTED_FIBRE;
1401 et_cmd->advertising = ADVERTISED_FIBRE;
1402 et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
1403 } else {
1404 et_cmd->transceiver = XCVR_INTERNAL;
1405 et_cmd->supported = SUPPORTED_10000baseT_Full;
1406 et_cmd->advertising = ADVERTISED_10000baseT_Full;
1407 et_cmd->phy_address = nesdev->mac_index;
1408 }
1409 et_cmd->speed = SPEED_10000;
1410 et_cmd->autoneg = AUTONEG_DISABLE;
1411 }
1412 et_cmd->maxtxpkt = 511;
1413 et_cmd->maxrxpkt = 511;
1414 return 0;
1415}
1416
1417
1418/**
1419 * nes_netdev_set_settings
1420 */
1421static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd)
1422{
1423 struct nes_vnic *nesvnic = netdev_priv(netdev);
1424 struct nes_device *nesdev = nesvnic->nesdev;
1425 struct nes_adapter *nesadapter = nesdev->nesadapter;
1426 u16 phy_data;
1427
1428 if (nesadapter->OneG_Mode) {
1429 nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
1430 &phy_data);
1431 if (et_cmd->autoneg) {
1432 /* Turn on Full duplex, Autoneg, and restart autonegotiation */
1433 phy_data |= 0x1300;
1434 } else {
1435 // Turn off autoneg
1436 phy_data &= ~0x1000;
1437 }
1438 nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
1439 phy_data);
1440 }
1441
1442 return 0;
1443}
1444
1445
1446static struct ethtool_ops nes_ethtool_ops = {
1447 .get_link = ethtool_op_get_link,
1448 .get_settings = nes_netdev_get_settings,
1449 .set_settings = nes_netdev_set_settings,
1450 .get_tx_csum = ethtool_op_get_tx_csum,
1451 .get_rx_csum = nes_netdev_get_rx_csum,
1452 .get_sg = ethtool_op_get_sg,
1453 .get_strings = nes_netdev_get_strings,
1454 .get_stats_count = nes_netdev_get_stats_count,
1455 .get_ethtool_stats = nes_netdev_get_ethtool_stats,
1456 .get_drvinfo = nes_netdev_get_drvinfo,
1457 .get_coalesce = nes_netdev_get_coalesce,
1458 .set_coalesce = nes_netdev_set_coalesce,
1459 .get_pauseparam = nes_netdev_get_pauseparam,
1460 .set_pauseparam = nes_netdev_set_pauseparam,
1461 .set_tx_csum = ethtool_op_set_tx_csum,
1462 .set_rx_csum = nes_netdev_set_rx_csum,
1463 .set_sg = ethtool_op_set_sg,
1464 .get_tso = ethtool_op_get_tso,
1465 .set_tso = ethtool_op_set_tso,
1466};
1467
1468
1469static void nes_netdev_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1470{
1471 struct nes_vnic *nesvnic = netdev_priv(netdev);
1472 struct nes_device *nesdev = nesvnic->nesdev;
1473 u32 u32temp;
1474
1475 nesvnic->vlan_grp = grp;
1476
1477 /* Enable/Disable VLAN Stripping */
1478 u32temp = nes_read_indexed(nesdev, NES_IDX_PCIX_DIAG);
1479 if (grp)
1480 u32temp &= 0xfdffffff;
1481 else
1482 u32temp |= 0x02000000;
1483
1484 nes_write_indexed(nesdev, NES_IDX_PCIX_DIAG, u32temp);
1485}
1486
1487
1488/**
1489 * nes_netdev_init - initialize network device
1490 */
1491struct net_device *nes_netdev_init(struct nes_device *nesdev,
1492 void __iomem *mmio_addr)
1493{
1494 u64 u64temp;
1495 struct nes_vnic *nesvnic = NULL;
1496 struct net_device *netdev;
1497 struct nic_qp_map *curr_qp_map;
1498 u32 u32temp;
1499 u16 phy_data;
1500 u16 temp_phy_data;
1501
1502 netdev = alloc_etherdev(sizeof(struct nes_vnic));
1503 if (!netdev) {
1504 printk(KERN_ERR PFX "nesvnic etherdev alloc failed");
1505 return NULL;
1506 }
1507
1508 nes_debug(NES_DBG_INIT, "netdev = %p, %s\n", netdev, netdev->name);
1509
1510 SET_NETDEV_DEV(netdev, &nesdev->pcidev->dev);
1511
1512 nesvnic = netdev_priv(netdev);
1513 memset(nesvnic, 0, sizeof(*nesvnic));
1514
1515 netdev->open = nes_netdev_open;
1516 netdev->stop = nes_netdev_stop;
1517 netdev->hard_start_xmit = nes_netdev_start_xmit;
1518 netdev->get_stats = nes_netdev_get_stats;
1519 netdev->tx_timeout = nes_netdev_tx_timeout;
1520 netdev->set_mac_address = nes_netdev_set_mac_address;
1521 netdev->set_multicast_list = nes_netdev_set_multicast_list;
1522 netdev->change_mtu = nes_netdev_change_mtu;
1523 netdev->watchdog_timeo = NES_TX_TIMEOUT;
1524 netdev->irq = nesdev->pcidev->irq;
1525 netdev->mtu = ETH_DATA_LEN;
1526 netdev->hard_header_len = ETH_HLEN;
1527 netdev->addr_len = ETH_ALEN;
1528 netdev->type = ARPHRD_ETHER;
1529 netdev->features = NETIF_F_HIGHDMA;
1530 netdev->ethtool_ops = &nes_ethtool_ops;
1531 netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
1532 nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
1533 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1534 netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
1535 netdev->features |= NETIF_F_LLTX;
1536
1537 /* Fill in the port structure */
1538 nesvnic->netdev = netdev;
1539 nesvnic->nesdev = nesdev;
1540 nesvnic->msg_enable = netif_msg_init(debug, default_msg);
1541 nesvnic->netdev_index = nesdev->netdev_count;
1542 nesvnic->perfect_filter_index = nesdev->nesadapter->netdev_count;
1543 nesvnic->max_frame_size = netdev->mtu+netdev->hard_header_len;
1544
1545 curr_qp_map = nic_qp_mapping_per_function[PCI_FUNC(nesdev->pcidev->devfn)];
1546 nesvnic->nic.qp_id = curr_qp_map[nesdev->netdev_count].qpid;
1547 nesvnic->nic_index = curr_qp_map[nesdev->netdev_count].nic_index;
1548 nesvnic->logical_port = curr_qp_map[nesdev->netdev_count].logical_port;
1549
1550 /* Setup the burned in MAC address */
1551 u64temp = (u64)nesdev->nesadapter->mac_addr_low;
1552 u64temp += ((u64)nesdev->nesadapter->mac_addr_high) << 32;
1553 u64temp += nesvnic->nic_index;
1554 netdev->dev_addr[0] = (u8)(u64temp>>40);
1555 netdev->dev_addr[1] = (u8)(u64temp>>32);
1556 netdev->dev_addr[2] = (u8)(u64temp>>24);
1557 netdev->dev_addr[3] = (u8)(u64temp>>16);
1558 netdev->dev_addr[4] = (u8)(u64temp>>8);
1559 netdev->dev_addr[5] = (u8)u64temp;
1560 memcpy(netdev->perm_addr, netdev->dev_addr, 6);
1561
1562 if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) {
1563 netdev->features |= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
1564 netdev->features |= NETIF_F_GSO | NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
1565 } else {
1566 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1567 }
1568
1569 nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
1570 " nic_index = %d, logical_port = %d, mac_index = %d.\n",
1571 nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
1572 nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
1573
1574 if (nesvnic->nesdev->nesadapter->port_count == 1) {
1575 nesvnic->qp_nic_index[0] = nesvnic->nic_index;
1576 nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
1577 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
1578 nesvnic->qp_nic_index[2] = 0xf;
1579 nesvnic->qp_nic_index[3] = 0xf;
1580 } else {
1581 nesvnic->qp_nic_index[2] = nesvnic->nic_index + 2;
1582 nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
1583 }
1584 } else {
1585 if (nesvnic->nesdev->nesadapter->port_count == 2) {
1586 nesvnic->qp_nic_index[0] = nesvnic->nic_index;
1587 nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2;
1588 nesvnic->qp_nic_index[2] = 0xf;
1589 nesvnic->qp_nic_index[3] = 0xf;
1590 } else {
1591 nesvnic->qp_nic_index[0] = nesvnic->nic_index;
1592 nesvnic->qp_nic_index[1] = 0xf;
1593 nesvnic->qp_nic_index[2] = 0xf;
1594 nesvnic->qp_nic_index[3] = 0xf;
1595 }
1596 }
1597 nesvnic->next_qp_nic_index = 0;
1598
1599 if (nesdev->netdev_count == 0) {
1600 nesvnic->rdma_enabled = 1;
1601 } else {
1602 nesvnic->rdma_enabled = 0;
1603 }
1604 nesvnic->nic_cq.cq_number = nesvnic->nic.qp_id;
1605 spin_lock_init(&nesvnic->tx_lock);
1606 nesdev->netdev[nesdev->netdev_count] = netdev;
1607
1608 nes_debug(NES_DBG_INIT, "Adding nesvnic (%p) to the adapters nesvnic_list for MAC%d.\n",
1609 nesvnic, nesdev->mac_index);
1610 list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
1611
1612 if ((nesdev->netdev_count == 0) &&
1613 (PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index)) {
1614 nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
1615 NES_IDX_PHY_PCS_CONTROL_STATUS0+(0x200*(nesvnic->logical_port&1)));
1616 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1617 (0x200*(nesvnic->logical_port&1)));
1618 u32temp |= 0x00200000;
1619 nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1620 (0x200*(nesvnic->logical_port&1)), u32temp);
1621 u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
1622 (0x200*(nesvnic->logical_port&1)) );
1623 if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
1624 if (nesdev->nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
1625 nes_init_phy(nesdev);
1626 nes_read_10G_phy_reg(nesdev, 1,
1627 nesdev->nesadapter->phy_index[nesvnic->logical_port]);
1628 temp_phy_data = (u16)nes_read_indexed(nesdev,
1629 NES_IDX_MAC_MDIO_CONTROL);
1630 u32temp = 20;
1631 do {
1632 nes_read_10G_phy_reg(nesdev, 1,
1633 nesdev->nesadapter->phy_index[nesvnic->logical_port]);
1634 phy_data = (u16)nes_read_indexed(nesdev,
1635 NES_IDX_MAC_MDIO_CONTROL);
1636 if ((phy_data == temp_phy_data) || (!(--u32temp)))
1637 break;
1638 temp_phy_data = phy_data;
1639 } while (1);
1640 if (phy_data & 4) {
1641 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1642 nesvnic->linkup = 1;
1643 } else {
1644 nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n");
1645 }
1646 } else {
1647 nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
1648 nesvnic->linkup = 1;
1649 }
1650 }
1651 nes_debug(NES_DBG_INIT, "Setting up MAC interrupt mask.\n");
1652 /* clear the MAC interrupt status, assumes direct logical to physical mapping */
1653 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port));
1654 nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
1655 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS+(0x200*nesvnic->logical_port), u32temp);
1656
1657 if (nesdev->nesadapter->phy_type[nesvnic->logical_port] != NES_PHY_TYPE_IRIS)
1658 nes_init_phy(nesdev);
1659
1660 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesvnic->logical_port),
1661 ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
1662 NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
1663 }
1664
1665 return netdev;
1666}
1667
1668
1669/**
1670 * nes_netdev_destroy - destroy network device structure
1671 */
1672void nes_netdev_destroy(struct net_device *netdev)
1673{
1674 struct nes_vnic *nesvnic = netdev_priv(netdev);
1675
1676 /* make sure 'stop' method is called by Linux stack */
1677 /* nes_netdev_stop(netdev); */
1678
1679 list_del(&nesvnic->list);
1680
1681 if (nesvnic->of_device_registered) {
1682 nes_destroy_ofa_device(nesvnic->nesibdev);
1683 }
1684
1685 free_netdev(netdev);
1686}
1687
1688
1689/**
1690 * nes_nic_cm_xmit -- CM calls this to send out pkts
1691 */
1692int nes_nic_cm_xmit(struct sk_buff *skb, struct net_device *netdev)
1693{
1694 int ret;
1695
1696 skb->dev = netdev;
1697 ret = dev_queue_xmit(skb);
1698 if (ret) {
1699 nes_debug(NES_DBG_CM, "Bad return code from dev_queue_xmit %d\n", ret);
1700 }
1701
1702 return ret;
1703}
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
new file mode 100644
index 000000000000..e64306bce80b
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -0,0 +1,112 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect. All rights reserved.
3 * Copyright (c) 2005 Topspin Communications. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 */
36
37#ifndef NES_USER_H
38#define NES_USER_H
39
40#include <linux/types.h>
41
42#define NES_ABI_USERSPACE_VER 1
43#define NES_ABI_KERNEL_VER 1
44
45/*
46 * Make sure that all structs defined in this file remain laid out so
47 * that they pack the same way on 32-bit and 64-bit architectures (to
48 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
49 * In particular do not use pointer types -- pass pointers in __u64
50 * instead.
51 */
52
53struct nes_alloc_ucontext_req {
54 __u32 reserved32;
55 __u8 userspace_ver;
56 __u8 reserved8[3];
57};
58
59struct nes_alloc_ucontext_resp {
60 __u32 max_pds; /* maximum pds allowed for this user process */
61 __u32 max_qps; /* maximum qps allowed for this user process */
62 __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
63 __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
64 __u8 kernel_ver;
65 __u8 reserved[2];
66};
67
68struct nes_alloc_pd_resp {
69 __u32 pd_id;
70 __u32 mmap_db_index;
71};
72
73struct nes_create_cq_req {
74 __u64 user_cq_buffer;
75 __u32 mcrqf;
76 __u8 reserved[4];
77};
78
79struct nes_create_qp_req {
80 __u64 user_wqe_buffers;
81};
82
83enum iwnes_memreg_type {
84 IWNES_MEMREG_TYPE_MEM = 0x0000,
85 IWNES_MEMREG_TYPE_QP = 0x0001,
86 IWNES_MEMREG_TYPE_CQ = 0x0002,
87 IWNES_MEMREG_TYPE_MW = 0x0003,
88 IWNES_MEMREG_TYPE_FMR = 0x0004,
89};
90
91struct nes_mem_reg_req {
92 __u32 reg_type; /* indicates if id is memory, QP or CQ */
93 __u32 reserved;
94};
95
96struct nes_create_cq_resp {
97 __u32 cq_id;
98 __u32 cq_size;
99 __u32 mmap_db_index;
100 __u32 reserved;
101};
102
103struct nes_create_qp_resp {
104 __u32 qp_id;
105 __u32 actual_sq_size;
106 __u32 actual_rq_size;
107 __u32 mmap_sq_db_index;
108 __u32 mmap_rq_db_index;
109 __u32 nes_drv_opt;
110};
111
112#endif /* NES_USER_H */
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
new file mode 100644
index 000000000000..c4ec6ac63461
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -0,0 +1,917 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/if_vlan.h>
41#include <linux/crc32.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/init.h>
46
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/byteorder.h>
50
51#include "nes.h"
52
53
54
55static u16 nes_read16_eeprom(void __iomem *addr, u16 offset);
56
57u32 mh_detected;
58u32 mh_pauses_sent;
59
60/**
61 * nes_read_eeprom_values -
62 */
63int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesadapter)
64{
65 u32 mac_addr_low;
66 u16 mac_addr_high;
67 u16 eeprom_data;
68 u16 eeprom_offset;
69 u16 next_section_address;
70 u16 sw_section_ver;
71 u8 major_ver = 0;
72 u8 minor_ver = 0;
73
74 /* TODO: deal with EEPROM endian issues */
75 if (nesadapter->firmware_eeprom_offset == 0) {
76 /* Read the EEPROM Parameters */
77 eeprom_data = nes_read16_eeprom(nesdev->regs, 0);
78 nes_debug(NES_DBG_HW, "EEPROM Offset 0 = 0x%04X\n", eeprom_data);
79 eeprom_offset = 2 + (((eeprom_data & 0x007f) << 3) <<
80 ((eeprom_data & 0x0080) >> 7));
81 nes_debug(NES_DBG_HW, "Firmware Offset = 0x%04X\n", eeprom_offset);
82 nesadapter->firmware_eeprom_offset = eeprom_offset;
83 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
84 if (eeprom_data != 0x5746) {
85 nes_debug(NES_DBG_HW, "Not a valid Firmware Image = 0x%04X\n", eeprom_data);
86 return -1;
87 }
88
89 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
90 nes_debug(NES_DBG_HW, "EEPROM Offset %u = 0x%04X\n",
91 eeprom_offset + 2, eeprom_data);
92 eeprom_offset += ((eeprom_data & 0x00ff) << 3) << ((eeprom_data & 0x0100) >> 8);
93 nes_debug(NES_DBG_HW, "Software Offset = 0x%04X\n", eeprom_offset);
94 nesadapter->software_eeprom_offset = eeprom_offset;
95 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 4);
96 if (eeprom_data != 0x5753) {
97 printk("Not a valid Software Image = 0x%04X\n", eeprom_data);
98 return -1;
99 }
100 sw_section_ver = nes_read16_eeprom(nesdev->regs, nesadapter->software_eeprom_offset + 6);
101 nes_debug(NES_DBG_HW, "Software section version number = 0x%04X\n",
102 sw_section_ver);
103
104 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
105 nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
106 eeprom_offset + 2, eeprom_data);
107 next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
108 ((eeprom_data & 0x0100) >> 8));
109 eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
110 if (eeprom_data != 0x414d) {
111 nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
112 eeprom_data);
113 goto no_fw_rev;
114 }
115 eeprom_offset = next_section_address;
116
117 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
118 nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
119 eeprom_offset + 2, eeprom_data);
120 next_section_address = eeprom_offset + (((eeprom_data & 0x00ff) << 3) <<
121 ((eeprom_data & 0x0100) >> 8));
122 eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
123 if (eeprom_data != 0x4f52) {
124 nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x4f52 but was 0x%04X\n",
125 eeprom_data);
126 goto no_fw_rev;
127 }
128 eeprom_offset = next_section_address;
129
130 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
131 nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
132 eeprom_offset + 2, eeprom_data);
133 next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
134 eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
135 if (eeprom_data != 0x5746) {
136 nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5746 but was 0x%04X\n",
137 eeprom_data);
138 goto no_fw_rev;
139 }
140 eeprom_offset = next_section_address;
141
142 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
143 nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
144 eeprom_offset + 2, eeprom_data);
145 next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
146 eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
147 if (eeprom_data != 0x5753) {
148 nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x5753 but was 0x%04X\n",
149 eeprom_data);
150 goto no_fw_rev;
151 }
152 eeprom_offset = next_section_address;
153
154 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
155 nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
156 eeprom_offset + 2, eeprom_data);
157 next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
158 eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
159 if (eeprom_data != 0x414d) {
160 nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x414d but was 0x%04X\n",
161 eeprom_data);
162 goto no_fw_rev;
163 }
164 eeprom_offset = next_section_address;
165
166 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset + 2);
167 nes_debug(NES_DBG_HW, "EEPROM Offset %u (next section) = 0x%04X\n",
168 eeprom_offset + 2, eeprom_data);
169 next_section_address = eeprom_offset + ((eeprom_data & 0x00ff) << 3);
170 eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 4);
171 if (eeprom_data != 0x464e) {
172 nes_debug(NES_DBG_HW, "EEPROM Changed offset should be 0x464e but was 0x%04X\n",
173 eeprom_data);
174 goto no_fw_rev;
175 }
176 eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 8);
177 printk(PFX "Firmware version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data);
178 major_ver = (u8)(eeprom_data >> 8);
179 minor_ver = (u8)(eeprom_data);
180
181 if (nes_drv_opt & NES_DRV_OPT_DISABLE_VIRT_WQ) {
182 nes_debug(NES_DBG_HW, "Virtual WQs have been disabled\n");
183 } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
184 nesadapter->virtwq = 1;
185 }
186 nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
187 (u32)((u8)eeprom_data);
188
189no_fw_rev:
190 /* eeprom is valid */
191 eeprom_offset = nesadapter->software_eeprom_offset;
192 eeprom_offset += 8;
193 nesadapter->netdev_max = (u8)nes_read16_eeprom(nesdev->regs, eeprom_offset);
194 eeprom_offset += 2;
195 mac_addr_high = nes_read16_eeprom(nesdev->regs, eeprom_offset);
196 eeprom_offset += 2;
197 mac_addr_low = (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
198 eeprom_offset += 2;
199 mac_addr_low <<= 16;
200 mac_addr_low += (u32)nes_read16_eeprom(nesdev->regs, eeprom_offset);
201 nes_debug(NES_DBG_HW, "Base MAC Address = 0x%04X%08X\n",
202 mac_addr_high, mac_addr_low);
203 nes_debug(NES_DBG_HW, "MAC Address count = %u\n", nesadapter->netdev_max);
204
205 nesadapter->mac_addr_low = mac_addr_low;
206 nesadapter->mac_addr_high = mac_addr_high;
207
208 /* Read the Phy Type array */
209 eeprom_offset += 10;
210 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
211 nesadapter->phy_type[0] = (u8)(eeprom_data >> 8);
212 nesadapter->phy_type[1] = (u8)eeprom_data;
213
214 /* Read the port array */
215 eeprom_offset += 2;
216 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
217 nesadapter->phy_type[2] = (u8)(eeprom_data >> 8);
218 nesadapter->phy_type[3] = (u8)eeprom_data;
219 /* port_count is set by soft reset reg */
220 nes_debug(NES_DBG_HW, "port_count = %u, port 0 -> %u, port 1 -> %u,"
221 " port 2 -> %u, port 3 -> %u\n",
222 nesadapter->port_count,
223 nesadapter->phy_type[0], nesadapter->phy_type[1],
224 nesadapter->phy_type[2], nesadapter->phy_type[3]);
225
226 /* Read PD config array */
227 eeprom_offset += 10;
228 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
229 nesadapter->pd_config_size[0] = eeprom_data;
230 eeprom_offset += 2;
231 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
232 nesadapter->pd_config_base[0] = eeprom_data;
233 nes_debug(NES_DBG_HW, "PD0 config, size=0x%04x, base=0x%04x\n",
234 nesadapter->pd_config_size[0], nesadapter->pd_config_base[0]);
235
236 eeprom_offset += 2;
237 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
238 nesadapter->pd_config_size[1] = eeprom_data;
239 eeprom_offset += 2;
240 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
241 nesadapter->pd_config_base[1] = eeprom_data;
242 nes_debug(NES_DBG_HW, "PD1 config, size=0x%04x, base=0x%04x\n",
243 nesadapter->pd_config_size[1], nesadapter->pd_config_base[1]);
244
245 eeprom_offset += 2;
246 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
247 nesadapter->pd_config_size[2] = eeprom_data;
248 eeprom_offset += 2;
249 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
250 nesadapter->pd_config_base[2] = eeprom_data;
251 nes_debug(NES_DBG_HW, "PD2 config, size=0x%04x, base=0x%04x\n",
252 nesadapter->pd_config_size[2], nesadapter->pd_config_base[2]);
253
254 eeprom_offset += 2;
255 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
256 nesadapter->pd_config_size[3] = eeprom_data;
257 eeprom_offset += 2;
258 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
259 nesadapter->pd_config_base[3] = eeprom_data;
260 nes_debug(NES_DBG_HW, "PD3 config, size=0x%04x, base=0x%04x\n",
261 nesadapter->pd_config_size[3], nesadapter->pd_config_base[3]);
262
263 /* Read Rx Pool Size */
264 eeprom_offset += 22; /* 46 */
265 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
266 eeprom_offset += 2;
267 nesadapter->rx_pool_size = (((u32)eeprom_data) << 16) +
268 nes_read16_eeprom(nesdev->regs, eeprom_offset);
269 nes_debug(NES_DBG_HW, "rx_pool_size = 0x%08X\n", nesadapter->rx_pool_size);
270
271 eeprom_offset += 2;
272 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
273 eeprom_offset += 2;
274 nesadapter->tx_pool_size = (((u32)eeprom_data) << 16) +
275 nes_read16_eeprom(nesdev->regs, eeprom_offset);
276 nes_debug(NES_DBG_HW, "tx_pool_size = 0x%08X\n", nesadapter->tx_pool_size);
277
278 eeprom_offset += 2;
279 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
280 eeprom_offset += 2;
281 nesadapter->rx_threshold = (((u32)eeprom_data) << 16) +
282 nes_read16_eeprom(nesdev->regs, eeprom_offset);
283 nes_debug(NES_DBG_HW, "rx_threshold = 0x%08X\n", nesadapter->rx_threshold);
284
285 eeprom_offset += 2;
286 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
287 eeprom_offset += 2;
288 nesadapter->tcp_timer_core_clk_divisor = (((u32)eeprom_data) << 16) +
289 nes_read16_eeprom(nesdev->regs, eeprom_offset);
290 nes_debug(NES_DBG_HW, "tcp_timer_core_clk_divisor = 0x%08X\n",
291 nesadapter->tcp_timer_core_clk_divisor);
292
293 eeprom_offset += 2;
294 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
295 eeprom_offset += 2;
296 nesadapter->iwarp_config = (((u32)eeprom_data) << 16) +
297 nes_read16_eeprom(nesdev->regs, eeprom_offset);
298 nes_debug(NES_DBG_HW, "iwarp_config = 0x%08X\n", nesadapter->iwarp_config);
299
300 eeprom_offset += 2;
301 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
302 eeprom_offset += 2;
303 nesadapter->cm_config = (((u32)eeprom_data) << 16) +
304 nes_read16_eeprom(nesdev->regs, eeprom_offset);
305 nes_debug(NES_DBG_HW, "cm_config = 0x%08X\n", nesadapter->cm_config);
306
307 eeprom_offset += 2;
308 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
309 eeprom_offset += 2;
310 nesadapter->sws_timer_config = (((u32)eeprom_data) << 16) +
311 nes_read16_eeprom(nesdev->regs, eeprom_offset);
312 nes_debug(NES_DBG_HW, "sws_timer_config = 0x%08X\n", nesadapter->sws_timer_config);
313
314 eeprom_offset += 2;
315 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
316 eeprom_offset += 2;
317 nesadapter->tcp_config1 = (((u32)eeprom_data) << 16) +
318 nes_read16_eeprom(nesdev->regs, eeprom_offset);
319 nes_debug(NES_DBG_HW, "tcp_config1 = 0x%08X\n", nesadapter->tcp_config1);
320
321 eeprom_offset += 2;
322 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
323 eeprom_offset += 2;
324 nesadapter->wqm_wat = (((u32)eeprom_data) << 16) +
325 nes_read16_eeprom(nesdev->regs, eeprom_offset);
326 nes_debug(NES_DBG_HW, "wqm_wat = 0x%08X\n", nesadapter->wqm_wat);
327
328 eeprom_offset += 2;
329 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
330 eeprom_offset += 2;
331 nesadapter->core_clock = (((u32)eeprom_data) << 16) +
332 nes_read16_eeprom(nesdev->regs, eeprom_offset);
333 nes_debug(NES_DBG_HW, "core_clock = 0x%08X\n", nesadapter->core_clock);
334
335 if ((sw_section_ver) && (nesadapter->hw_rev != NE020_REV)) {
336 eeprom_offset += 2;
337 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
338 nesadapter->phy_index[0] = (eeprom_data & 0xff00)>>8;
339 nesadapter->phy_index[1] = eeprom_data & 0x00ff;
340 eeprom_offset += 2;
341 eeprom_data = nes_read16_eeprom(nesdev->regs, eeprom_offset);
342 nesadapter->phy_index[2] = (eeprom_data & 0xff00)>>8;
343 nesadapter->phy_index[3] = eeprom_data & 0x00ff;
344 } else {
345 nesadapter->phy_index[0] = 4;
346 nesadapter->phy_index[1] = 5;
347 nesadapter->phy_index[2] = 6;
348 nesadapter->phy_index[3] = 7;
349 }
350 nes_debug(NES_DBG_HW, "Phy address map = 0 > %u, 1 > %u, 2 > %u, 3 > %u\n",
351 nesadapter->phy_index[0],nesadapter->phy_index[1],
352 nesadapter->phy_index[2],nesadapter->phy_index[3]);
353 }
354
355 return 0;
356}
357
358
359/**
360 * nes_read16_eeprom
361 */
362static u16 nes_read16_eeprom(void __iomem *addr, u16 offset)
363{
364 writel(NES_EEPROM_READ_REQUEST + (offset >> 1),
365 (void __iomem *)addr + NES_EEPROM_COMMAND);
366
367 do {
368 } while (readl((void __iomem *)addr + NES_EEPROM_COMMAND) &
369 NES_EEPROM_READ_REQUEST);
370
371 return readw((void __iomem *)addr + NES_EEPROM_DATA);
372}
373
374
375/**
376 * nes_write_1G_phy_reg
377 */
378void nes_write_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 data)
379{
380 struct nes_adapter *nesadapter = nesdev->nesadapter;
381 u32 u32temp;
382 u32 counter;
383 unsigned long flags;
384
385 spin_lock_irqsave(&nesadapter->phy_lock, flags);
386
387 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
388 0x50020000 | data | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
389 for (counter = 0; counter < 100 ; counter++) {
390 udelay(30);
391 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
392 if (u32temp & 1) {
393 /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
394 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
395 break;
396 }
397 }
398 if (!(u32temp & 1))
399 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
400 u32temp);
401
402 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
403}
404
405
406/**
407 * nes_read_1G_phy_reg
408 * This routine only issues the read, the data must be read
409 * separately.
410 */
411void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16 *data)
412{
413 struct nes_adapter *nesadapter = nesdev->nesadapter;
414 u32 u32temp;
415 u32 counter;
416 unsigned long flags;
417
418 /* nes_debug(NES_DBG_PHY, "phy addr = %d, mac_index = %d\n",
419 phy_addr, nesdev->mac_index); */
420 spin_lock_irqsave(&nesadapter->phy_lock, flags);
421
422 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
423 0x60020000 | ((u32)phy_reg << 18) | ((u32)phy_addr << 23));
424 for (counter = 0; counter < 100 ; counter++) {
425 udelay(30);
426 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
427 if (u32temp & 1) {
428 /* nes_debug(NES_DBG_PHY, "Phy interrupt status = 0x%X.\n", u32temp); */
429 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
430 break;
431 }
432 }
433 if (!(u32temp & 1)) {
434 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
435 u32temp);
436 *data = 0xffff;
437 } else {
438 *data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
439 }
440 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
441}
442
443
444/**
445 * nes_write_10G_phy_reg
446 */
447void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg,
448 u8 phy_addr, u16 data)
449{
450 u32 dev_addr;
451 u32 port_addr;
452 u32 u32temp;
453 u32 counter;
454
455 dev_addr = 1;
456 port_addr = phy_addr;
457
458 /* set address */
459 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
460 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
461 for (counter = 0; counter < 100 ; counter++) {
462 udelay(30);
463 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
464 if (u32temp & 1) {
465 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
466 break;
467 }
468 }
469 if (!(u32temp & 1))
470 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
471 u32temp);
472
473 /* set data */
474 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
475 0x10020000 | (u32)data | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
476 for (counter = 0; counter < 100 ; counter++) {
477 udelay(30);
478 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
479 if (u32temp & 1) {
480 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
481 break;
482 }
483 }
484 if (!(u32temp & 1))
485 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
486 u32temp);
487}
488
489
490/**
491 * nes_read_10G_phy_reg
492 * This routine only issues the read, the data must be read
493 * separately.
494 */
495void nes_read_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg, u8 phy_addr)
496{
497 u32 dev_addr;
498 u32 port_addr;
499 u32 u32temp;
500 u32 counter;
501
502 dev_addr = 1;
503 port_addr = phy_addr;
504
505 /* set address */
506 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
507 0x00020000 | (u32)phy_reg | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
508 for (counter = 0; counter < 100 ; counter++) {
509 udelay(30);
510 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
511 if (u32temp & 1) {
512 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
513 break;
514 }
515 }
516 if (!(u32temp & 1))
517 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
518 u32temp);
519
520 /* issue read */
521 nes_write_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL,
522 0x30020000 | (((u32)dev_addr) << 18) | (((u32)port_addr) << 23));
523 for (counter = 0; counter < 100 ; counter++) {
524 udelay(30);
525 u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS);
526 if (u32temp & 1) {
527 nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS, 1);
528 break;
529 }
530 }
531 if (!(u32temp & 1))
532 nes_debug(NES_DBG_PHY, "Phy is not responding. interrupt status = 0x%X.\n",
533 u32temp);
534}
535
536
537/**
538 * nes_get_cqp_request
539 */
540struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
541{
542 unsigned long flags;
543 struct nes_cqp_request *cqp_request = NULL;
544
545 if (!list_empty(&nesdev->cqp_avail_reqs)) {
546 spin_lock_irqsave(&nesdev->cqp.lock, flags);
547 cqp_request = list_entry(nesdev->cqp_avail_reqs.next,
548 struct nes_cqp_request, list);
549 list_del_init(&cqp_request->list);
550 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
551 } else {
552 cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL);
553 if (cqp_request) {
554 cqp_request->dynamic = 1;
555 INIT_LIST_HEAD(&cqp_request->list);
556 }
557 }
558
559 if (cqp_request) {
560 init_waitqueue_head(&cqp_request->waitq);
561 cqp_request->waiting = 0;
562 cqp_request->request_done = 0;
563 cqp_request->callback = 0;
564 init_waitqueue_head(&cqp_request->waitq);
565 nes_debug(NES_DBG_CQP, "Got cqp request %p from the available list \n",
566 cqp_request);
567 } else
568 printk(KERN_ERR PFX "%s: Could not allocated a CQP request.\n",
569 __FUNCTION__);
570
571 return cqp_request;
572}
573
574
575/**
576 * nes_post_cqp_request
577 */
578void nes_post_cqp_request(struct nes_device *nesdev,
579 struct nes_cqp_request *cqp_request, int ring_doorbell)
580{
581 struct nes_hw_cqp_wqe *cqp_wqe;
582 unsigned long flags;
583 u32 cqp_head;
584 u64 u64temp;
585
586 spin_lock_irqsave(&nesdev->cqp.lock, flags);
587
588 if (((((nesdev->cqp.sq_tail+(nesdev->cqp.sq_size*2))-nesdev->cqp.sq_head) &
589 (nesdev->cqp.sq_size - 1)) != 1)
590 && (list_empty(&nesdev->cqp_pending_reqs))) {
591 cqp_head = nesdev->cqp.sq_head++;
592 nesdev->cqp.sq_head &= nesdev->cqp.sq_size-1;
593 cqp_wqe = &nesdev->cqp.sq_vbase[cqp_head];
594 memcpy(cqp_wqe, &cqp_request->cqp_wqe, sizeof(*cqp_wqe));
595 barrier();
596 u64temp = (unsigned long)cqp_request;
597 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_COMP_SCRATCH_LOW_IDX,
598 u64temp);
599 nes_debug(NES_DBG_CQP, "CQP request (opcode 0x%02X), line 1 = 0x%08X put on CQPs SQ,"
600 " request = %p, cqp_head = %u, cqp_tail = %u, cqp_size = %u,"
601 " waiting = %d, refcount = %d.\n",
602 le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
603 le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX]), cqp_request,
604 nesdev->cqp.sq_head, nesdev->cqp.sq_tail, nesdev->cqp.sq_size,
605 cqp_request->waiting, atomic_read(&cqp_request->refcount));
606 barrier();
607 if (ring_doorbell) {
608 /* Ring doorbell (1 WQEs) */
609 nes_write32(nesdev->regs+NES_WQE_ALLOC, 0x01800000 | nesdev->cqp.qp_id);
610 }
611
612 barrier();
613 } else {
614 nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X), line 1 = 0x%08X"
615 " put on the pending queue.\n",
616 cqp_request,
617 le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f,
618 le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_ID_IDX]));
619 list_add_tail(&cqp_request->list, &nesdev->cqp_pending_reqs);
620 }
621
622 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
623
624 return;
625}
626
627
628/**
629 * nes_arp_table
630 */
631int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 action)
632{
633 struct nes_adapter *nesadapter = nesdev->nesadapter;
634 int arp_index;
635 int err = 0;
636
637 for (arp_index = 0; (u32) arp_index < nesadapter->arp_table_size; arp_index++) {
638 if (nesadapter->arp_table[arp_index].ip_addr == ip_addr)
639 break;
640 }
641
642 if (action == NES_ARP_ADD) {
643 if (arp_index != nesadapter->arp_table_size) {
644 return -1;
645 }
646
647 arp_index = 0;
648 err = nes_alloc_resource(nesadapter, nesadapter->allocated_arps,
649 nesadapter->arp_table_size, (u32 *)&arp_index, &nesadapter->next_arp_index);
650 if (err) {
651 nes_debug(NES_DBG_NETDEV, "nes_alloc_resource returned error = %u\n", err);
652 return err;
653 }
654 nes_debug(NES_DBG_NETDEV, "ADD, arp_index=%d\n", arp_index);
655
656 nesadapter->arp_table[arp_index].ip_addr = ip_addr;
657 memcpy(nesadapter->arp_table[arp_index].mac_addr, mac_addr, ETH_ALEN);
658 return arp_index;
659 }
660
661 /* DELETE or RESOLVE */
662 if (arp_index == nesadapter->arp_table_size) {
663 nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n");
664 return -1;
665 }
666
667 if (action == NES_ARP_RESOLVE) {
668 nes_debug(NES_DBG_NETDEV, "RESOLVE, arp_index=%d\n", arp_index);
669 return arp_index;
670 }
671
672 if (action == NES_ARP_DELETE) {
673 nes_debug(NES_DBG_NETDEV, "DELETE, arp_index=%d\n", arp_index);
674 nesadapter->arp_table[arp_index].ip_addr = 0;
675 memset(nesadapter->arp_table[arp_index].mac_addr, 0x00, ETH_ALEN);
676 nes_free_resource(nesadapter, nesadapter->allocated_arps, arp_index);
677 return arp_index;
678 }
679
680 return -1;
681}
682
683
684/**
685 * nes_mh_fix
686 */
687void nes_mh_fix(unsigned long parm)
688{
689 unsigned long flags;
690 struct nes_device *nesdev = (struct nes_device *)parm;
691 struct nes_adapter *nesadapter = nesdev->nesadapter;
692 struct nes_vnic *nesvnic;
693 u32 used_chunks_tx;
694 u32 temp_used_chunks_tx;
695 u32 temp_last_used_chunks_tx;
696 u32 used_chunks_mask;
697 u32 mac_tx_frames_low;
698 u32 mac_tx_frames_high;
699 u32 mac_tx_pauses;
700 u32 serdes_status;
701 u32 reset_value;
702 u32 tx_control;
703 u32 tx_config;
704 u32 tx_pause_quanta;
705 u32 rx_control;
706 u32 rx_config;
707 u32 mac_exact_match;
708 u32 mpp_debug;
709 u32 i=0;
710 u32 chunks_tx_progress = 0;
711
712 spin_lock_irqsave(&nesadapter->phy_lock, flags);
713 if ((nesadapter->mac_sw_state[0] != NES_MAC_SW_IDLE) || (nesadapter->mac_link_down[0])) {
714 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
715 goto no_mh_work;
716 }
717 nesadapter->mac_sw_state[0] = NES_MAC_SW_MH;
718 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
719 do {
720 mac_tx_frames_low = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_LOW);
721 mac_tx_frames_high = nes_read_indexed(nesdev, NES_IDX_MAC_TX_FRAMES_HIGH);
722 mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
723 used_chunks_tx = nes_read_indexed(nesdev, NES_IDX_USED_CHUNKS_TX);
724 nesdev->mac_pause_frames_sent += mac_tx_pauses;
725 used_chunks_mask = 0;
726 temp_used_chunks_tx = used_chunks_tx;
727 temp_last_used_chunks_tx = nesdev->last_used_chunks_tx;
728
729 if (nesdev->netdev[0]) {
730 nesvnic = netdev_priv(nesdev->netdev[0]);
731 } else {
732 break;
733 }
734
735 for (i=0; i<4; i++) {
736 used_chunks_mask <<= 8;
737 if (nesvnic->qp_nic_index[i] != 0xff) {
738 used_chunks_mask |= 0xff;
739 if ((temp_used_chunks_tx&0xff)<(temp_last_used_chunks_tx&0xff)) {
740 chunks_tx_progress = 1;
741 }
742 }
743 temp_used_chunks_tx >>= 8;
744 temp_last_used_chunks_tx >>= 8;
745 }
746 if ((mac_tx_frames_low) || (mac_tx_frames_high) ||
747 (!(used_chunks_tx&used_chunks_mask)) ||
748 (!(nesdev->last_used_chunks_tx&used_chunks_mask)) ||
749 (chunks_tx_progress) ) {
750 nesdev->last_used_chunks_tx = used_chunks_tx;
751 break;
752 }
753 nesdev->last_used_chunks_tx = used_chunks_tx;
754 barrier();
755
756 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000005);
757 mh_pauses_sent++;
758 mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
759 if (mac_tx_pauses) {
760 nesdev->mac_pause_frames_sent += mac_tx_pauses;
761 break;
762 }
763
764 tx_control = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONTROL);
765 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
766 tx_pause_quanta = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA);
767 rx_control = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONTROL);
768 rx_config = nes_read_indexed(nesdev, NES_IDX_MAC_RX_CONFIG);
769 mac_exact_match = nes_read_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM);
770 mpp_debug = nes_read_indexed(nesdev, NES_IDX_MPP_DEBUG);
771
772 /* one last ditch effort to avoid a false positive */
773 mac_tx_pauses = nes_read_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_FRAMES);
774 if (mac_tx_pauses) {
775 nesdev->last_mac_tx_pauses = nesdev->mac_pause_frames_sent;
776 nes_debug(NES_DBG_HW, "failsafe caught slow outbound pause\n");
777 break;
778 }
779 mh_detected++;
780
781 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, 0x00000000);
782 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, 0x00000000);
783 reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET);
784
785 nes_write32(nesdev->regs+NES_SOFTWARE_RESET, reset_value | 0x0000001d);
786
787 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET)
788 & 0x00000040) != 0x00000040) && (i++ < 5000)) {
789 /* mdelay(1); */
790 }
791
792 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
793 serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0);
794
795 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x000bdef7);
796 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_DRIVE0, 0x9ce73000);
797 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_MODE0, 0x0ff00000);
798 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_SIGDET0, 0x00000000);
799 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_BYPASS0, 0x00000000);
800 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_LOOPBACK_CONTROL0, 0x00000000);
801 if (nesadapter->OneG_Mode) {
802 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0182222);
803 } else {
804 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_CONTROL0, 0xf0042222);
805 }
806 serdes_status = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_RX_EQ_STATUS0);
807 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000ff);
808
809 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONTROL, tx_control);
810 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
811 nes_write_indexed(nesdev, NES_IDX_MAC_TX_PAUSE_QUANTA, tx_pause_quanta);
812 nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONTROL, rx_control);
813 nes_write_indexed(nesdev, NES_IDX_MAC_RX_CONFIG, rx_config);
814 nes_write_indexed(nesdev, NES_IDX_MAC_EXACT_MATCH_BOTTOM, mac_exact_match);
815 nes_write_indexed(nesdev, NES_IDX_MPP_DEBUG, mpp_debug);
816
817 } while (0);
818
819 nesadapter->mac_sw_state[0] = NES_MAC_SW_IDLE;
820no_mh_work:
821 nesdev->nesadapter->mh_timer.expires = jiffies + (HZ/5);
822 add_timer(&nesdev->nesadapter->mh_timer);
823}
824
825/**
826 * nes_clc
827 */
828void nes_clc(unsigned long parm)
829{
830 unsigned long flags;
831 struct nes_device *nesdev = (struct nes_device *)parm;
832 struct nes_adapter *nesadapter = nesdev->nesadapter;
833
834 spin_lock_irqsave(&nesadapter->phy_lock, flags);
835 nesadapter->link_interrupt_count[0] = 0;
836 nesadapter->link_interrupt_count[1] = 0;
837 nesadapter->link_interrupt_count[2] = 0;
838 nesadapter->link_interrupt_count[3] = 0;
839 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
840
841 nesadapter->lc_timer.expires = jiffies + 3600 * HZ; /* 1 hour */
842 add_timer(&nesadapter->lc_timer);
843}
844
845
846/**
847 * nes_dump_mem
848 */
849void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length)
850{
851 char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
852 'a', 'b', 'c', 'd', 'e', 'f'};
853 char *ptr;
854 char hex_buf[80];
855 char ascii_buf[20];
856 int num_char;
857 int num_ascii;
858 int num_hex;
859
860 if (!(nes_debug_level & dump_debug_level)) {
861 return;
862 }
863
864 ptr = addr;
865 if (length > 0x100) {
866 nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100);
867 length = 0x100;
868 }
869 nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length);
870
871 memset(ascii_buf, 0, 20);
872 memset(hex_buf, 0, 80);
873
874 num_ascii = 0;
875 num_hex = 0;
876 for (num_char = 0; num_char < length; num_char++) {
877 if (num_ascii == 8) {
878 ascii_buf[num_ascii++] = ' ';
879 hex_buf[num_hex++] = '-';
880 hex_buf[num_hex++] = ' ';
881 }
882
883 if (*ptr < 0x20 || *ptr > 0x7e)
884 ascii_buf[num_ascii++] = '.';
885 else
886 ascii_buf[num_ascii++] = *ptr;
887 hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)];
888 hex_buf[num_hex++] = xlate[*ptr & 0x0f];
889 hex_buf[num_hex++] = ' ';
890 ptr++;
891
892 if (num_ascii >= 17) {
893 /* output line and reset */
894 nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
895 memset(ascii_buf, 0, 20);
896 memset(hex_buf, 0, 80);
897 num_ascii = 0;
898 num_hex = 0;
899 }
900 }
901
902 /* output the rest */
903 if (num_ascii) {
904 while (num_ascii < 17) {
905 if (num_ascii == 8) {
906 hex_buf[num_hex++] = ' ';
907 hex_buf[num_hex++] = ' ';
908 }
909 hex_buf[num_hex++] = ' ';
910 hex_buf[num_hex++] = ' ';
911 hex_buf[num_hex++] = ' ';
912 num_ascii++;
913 }
914
915 nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
916 }
917}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
new file mode 100644
index 000000000000..ffd4b425567f
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -0,0 +1,3917 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/module.h>
35#include <linux/moduleparam.h>
36#include <linux/random.h>
37#include <linux/highmem.h>
38#include <asm/byteorder.h>
39
40#include <rdma/ib_verbs.h>
41#include <rdma/iw_cm.h>
42#include <rdma/ib_user_verbs.h>
43
44#include "nes.h"
45
46#include <rdma/ib_umem.h>
47
48atomic_t mod_qp_timouts;
49atomic_t qps_created;
50atomic_t sw_qps_destroyed;
51
52
53/**
54 * nes_alloc_mw
55 */
56static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {
57 unsigned long flags;
58 struct nes_pd *nespd = to_nespd(ibpd);
59 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
60 struct nes_device *nesdev = nesvnic->nesdev;
61 struct nes_adapter *nesadapter = nesdev->nesadapter;
62 struct nes_cqp_request *cqp_request;
63 struct nes_mr *nesmr;
64 struct ib_mw *ibmw;
65 struct nes_hw_cqp_wqe *cqp_wqe;
66 int ret;
67 u32 stag;
68 u32 stag_index = 0;
69 u32 next_stag_index = 0;
70 u32 driver_key = 0;
71 u8 stag_key = 0;
72
73 get_random_bytes(&next_stag_index, sizeof(next_stag_index));
74 stag_key = (u8)next_stag_index;
75
76 driver_key = 0;
77
78 next_stag_index >>= 8;
79 next_stag_index %= nesadapter->max_mr;
80
81 ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
82 nesadapter->max_mr, &stag_index, &next_stag_index);
83 if (ret) {
84 return ERR_PTR(ret);
85 }
86
87 nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
88 if (!nesmr) {
89 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
90 return ERR_PTR(-ENOMEM);
91 }
92
93 stag = stag_index << 8;
94 stag |= driver_key;
95 stag += (u32)stag_key;
96
97 nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
98 stag, stag_index);
99
100 /* Register the region with the adapter */
101 cqp_request = nes_get_cqp_request(nesdev);
102 if (cqp_request == NULL) {
103 kfree(nesmr);
104 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
105 return ERR_PTR(-ENOMEM);
106 }
107
108 cqp_request->waiting = 1;
109 cqp_wqe = &cqp_request->cqp_wqe;
110
111 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
112 cpu_to_le32( NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_RIGHTS_REMOTE_READ |
113 NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_VA_TO |
114 NES_CQP_STAG_REM_ACC_EN);
115
116 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
117 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
118 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
119
120 atomic_set(&cqp_request->refcount, 2);
121 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
122
123 /* Wait for CQP */
124 ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
125 NES_EVENT_TIMEOUT);
126 nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
127 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
128 stag, ret, cqp_request->major_code, cqp_request->minor_code);
129 if ((!ret) || (cqp_request->major_code)) {
130 if (atomic_dec_and_test(&cqp_request->refcount)) {
131 if (cqp_request->dynamic) {
132 kfree(cqp_request);
133 } else {
134 spin_lock_irqsave(&nesdev->cqp.lock, flags);
135 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
136 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
137 }
138 }
139 kfree(nesmr);
140 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
141 if (!ret) {
142 return ERR_PTR(-ETIME);
143 } else {
144 return ERR_PTR(-ENOMEM);
145 }
146 } else {
147 if (atomic_dec_and_test(&cqp_request->refcount)) {
148 if (cqp_request->dynamic) {
149 kfree(cqp_request);
150 } else {
151 spin_lock_irqsave(&nesdev->cqp.lock, flags);
152 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
153 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
154 }
155 }
156 }
157
158 nesmr->ibmw.rkey = stag;
159 nesmr->mode = IWNES_MEMREG_TYPE_MW;
160 ibmw = &nesmr->ibmw;
161 nesmr->pbl_4k = 0;
162 nesmr->pbls_used = 0;
163
164 return ibmw;
165}
166
167
168/**
169 * nes_dealloc_mw
170 */
171static int nes_dealloc_mw(struct ib_mw *ibmw)
172{
173 struct nes_mr *nesmr = to_nesmw(ibmw);
174 struct nes_vnic *nesvnic = to_nesvnic(ibmw->device);
175 struct nes_device *nesdev = nesvnic->nesdev;
176 struct nes_adapter *nesadapter = nesdev->nesadapter;
177 struct nes_hw_cqp_wqe *cqp_wqe;
178 struct nes_cqp_request *cqp_request;
179 int err = 0;
180 unsigned long flags;
181 int ret;
182
183 /* Deallocate the window with the adapter */
184 cqp_request = nes_get_cqp_request(nesdev);
185 if (cqp_request == NULL) {
186 nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
187 return -ENOMEM;
188 }
189 cqp_request->waiting = 1;
190 cqp_wqe = &cqp_request->cqp_wqe;
191 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
192 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, NES_CQP_DEALLOCATE_STAG);
193 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ibmw->rkey);
194
195 atomic_set(&cqp_request->refcount, 2);
196 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
197
198 /* Wait for CQP */
199 nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X to complete.\n",
200 ibmw->rkey);
201 ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
202 NES_EVENT_TIMEOUT);
203 nes_debug(NES_DBG_MR, "Deallocate STag completed, wait_event_timeout ret = %u,"
204 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
205 ret, cqp_request->major_code, cqp_request->minor_code);
206 if ((!ret) || (cqp_request->major_code)) {
207 if (atomic_dec_and_test(&cqp_request->refcount)) {
208 if (cqp_request->dynamic) {
209 kfree(cqp_request);
210 } else {
211 spin_lock_irqsave(&nesdev->cqp.lock, flags);
212 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
213 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
214 }
215 }
216 if (!ret) {
217 err = -ETIME;
218 } else {
219 err = -EIO;
220 }
221 } else {
222 if (atomic_dec_and_test(&cqp_request->refcount)) {
223 if (cqp_request->dynamic) {
224 kfree(cqp_request);
225 } else {
226 spin_lock_irqsave(&nesdev->cqp.lock, flags);
227 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
228 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
229 }
230 }
231 }
232
233 nes_free_resource(nesadapter, nesadapter->allocated_mrs,
234 (ibmw->rkey & 0x0fffff00) >> 8);
235 kfree(nesmr);
236
237 return err;
238}
239
240
241/**
242 * nes_bind_mw
243 */
244static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
245 struct ib_mw_bind *ibmw_bind)
246{
247 u64 u64temp;
248 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
249 struct nes_device *nesdev = nesvnic->nesdev;
250 /* struct nes_mr *nesmr = to_nesmw(ibmw); */
251 struct nes_qp *nesqp = to_nesqp(ibqp);
252 struct nes_hw_qp_wqe *wqe;
253 unsigned long flags = 0;
254 u32 head;
255 u32 wqe_misc = 0;
256 u32 qsize;
257
258 if (nesqp->ibqp_state > IB_QPS_RTS)
259 return -EINVAL;
260
261 spin_lock_irqsave(&nesqp->lock, flags);
262
263 head = nesqp->hwqp.sq_head;
264 qsize = nesqp->hwqp.sq_tail;
265
266 /* Check for SQ overflow */
267 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
268 spin_unlock_irqrestore(&nesqp->lock, flags);
269 return -EINVAL;
270 }
271
272 wqe = &nesqp->hwqp.sq_vbase[head];
273 /* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */
274 nes_fill_init_qp_wqe(wqe, nesqp, head);
275 u64temp = ibmw_bind->wr_id;
276 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp);
277 wqe_misc = NES_IWARP_SQ_OP_BIND;
278
279 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
280
281 if (ibmw_bind->send_flags & IB_SEND_SIGNALED)
282 wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
283
284 if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_WRITE) {
285 wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE;
286 }
287 if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_READ) {
288 wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ;
289 }
290
291 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc);
292 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, ibmw_bind->mr->lkey);
293 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey);
294 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX,
295 ibmw_bind->length);
296 wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0;
297 u64temp = (u64)ibmw_bind->addr;
298 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp);
299
300 head++;
301 if (head >= qsize)
302 head = 0;
303
304 nesqp->hwqp.sq_head = head;
305 barrier();
306
307 nes_write32(nesdev->regs+NES_WQE_ALLOC,
308 (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
309
310 spin_unlock_irqrestore(&nesqp->lock, flags);
311
312 return 0;
313}
314
315
316/**
317 * nes_alloc_fmr
318 */
319static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
320 int ibmr_access_flags,
321 struct ib_fmr_attr *ibfmr_attr)
322{
323 unsigned long flags;
324 struct nes_pd *nespd = to_nespd(ibpd);
325 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
326 struct nes_device *nesdev = nesvnic->nesdev;
327 struct nes_adapter *nesadapter = nesdev->nesadapter;
328 struct nes_fmr *nesfmr;
329 struct nes_cqp_request *cqp_request;
330 struct nes_hw_cqp_wqe *cqp_wqe;
331 int ret;
332 u32 stag;
333 u32 stag_index = 0;
334 u32 next_stag_index = 0;
335 u32 driver_key = 0;
336 u32 opcode = 0;
337 u8 stag_key = 0;
338 int i=0;
339 struct nes_vpbl vpbl;
340
341 get_random_bytes(&next_stag_index, sizeof(next_stag_index));
342 stag_key = (u8)next_stag_index;
343
344 driver_key = 0;
345
346 next_stag_index >>= 8;
347 next_stag_index %= nesadapter->max_mr;
348
349 ret = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
350 nesadapter->max_mr, &stag_index, &next_stag_index);
351 if (ret) {
352 goto failed_resource_alloc;
353 }
354
355 nesfmr = kzalloc(sizeof(*nesfmr), GFP_KERNEL);
356 if (!nesfmr) {
357 ret = -ENOMEM;
358 goto failed_fmr_alloc;
359 }
360
361 nesfmr->nesmr.mode = IWNES_MEMREG_TYPE_FMR;
362 if (ibfmr_attr->max_pages == 1) {
363 /* use zero length PBL */
364 nesfmr->nesmr.pbl_4k = 0;
365 nesfmr->nesmr.pbls_used = 0;
366 } else if (ibfmr_attr->max_pages <= 32) {
367 /* use PBL 256 */
368 nesfmr->nesmr.pbl_4k = 0;
369 nesfmr->nesmr.pbls_used = 1;
370 } else if (ibfmr_attr->max_pages <= 512) {
371 /* use 4K PBLs */
372 nesfmr->nesmr.pbl_4k = 1;
373 nesfmr->nesmr.pbls_used = 1;
374 } else {
375 /* use two level 4K PBLs */
376 /* add support for two level 256B PBLs */
377 nesfmr->nesmr.pbl_4k = 1;
378 nesfmr->nesmr.pbls_used = 1 + (ibfmr_attr->max_pages >> 9) +
379 ((ibfmr_attr->max_pages & 511) ? 1 : 0);
380 }
381 /* Register the region with the adapter */
382 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
383
384 /* track PBL resources */
385 if (nesfmr->nesmr.pbls_used != 0) {
386 if (nesfmr->nesmr.pbl_4k) {
387 if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
388 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
389 ret = -ENOMEM;
390 goto failed_vpbl_alloc;
391 } else {
392 nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
393 }
394 } else {
395 if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
396 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
397 ret = -ENOMEM;
398 goto failed_vpbl_alloc;
399 } else {
400 nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
401 }
402 }
403 }
404
405 /* one level pbl */
406 if (nesfmr->nesmr.pbls_used == 0) {
407 nesfmr->root_vpbl.pbl_vbase = NULL;
408 nes_debug(NES_DBG_MR, "zero level pbl \n");
409 } else if (nesfmr->nesmr.pbls_used == 1) {
410 /* can change it to kmalloc & dma_map_single */
411 nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
412 &nesfmr->root_vpbl.pbl_pbase);
413 if (!nesfmr->root_vpbl.pbl_vbase) {
414 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
415 ret = -ENOMEM;
416 goto failed_vpbl_alloc;
417 }
418 nesfmr->leaf_pbl_cnt = 0;
419 nes_debug(NES_DBG_MR, "one level pbl, root_vpbl.pbl_vbase=%p \n",
420 nesfmr->root_vpbl.pbl_vbase);
421 }
422 /* two level pbl */
423 else {
424 nesfmr->root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
425 &nesfmr->root_vpbl.pbl_pbase);
426 if (!nesfmr->root_vpbl.pbl_vbase) {
427 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
428 ret = -ENOMEM;
429 goto failed_vpbl_alloc;
430 }
431
432 nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
433 if (!nesfmr->root_vpbl.leaf_vpbl) {
434 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
435 ret = -ENOMEM;
436 goto failed_leaf_vpbl_alloc;
437 }
438
439 nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
440 nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
441 " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
442 nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
443
444 for (i=0; i<nesfmr->leaf_pbl_cnt; i++)
445 nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase = NULL;
446
447 for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
448 vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
449 &vpbl.pbl_pbase);
450
451 if (!vpbl.pbl_vbase) {
452 ret = -ENOMEM;
453 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
454 goto failed_leaf_vpbl_pages_alloc;
455 }
456
457 nesfmr->root_vpbl.pbl_vbase[i].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
458 nesfmr->root_vpbl.pbl_vbase[i].pa_high = cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
459 nesfmr->root_vpbl.leaf_vpbl[i] = vpbl;
460
461 nes_debug(NES_DBG_MR, "pbase_low=0x%x, pbase_high=0x%x, vpbl=%p\n",
462 nesfmr->root_vpbl.pbl_vbase[i].pa_low,
463 nesfmr->root_vpbl.pbl_vbase[i].pa_high,
464 &nesfmr->root_vpbl.leaf_vpbl[i]);
465 }
466 }
467 nesfmr->ib_qp = NULL;
468 nesfmr->access_rights =0;
469
470 stag = stag_index << 8;
471 stag |= driver_key;
472 stag += (u32)stag_key;
473
474 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
475 cqp_request = nes_get_cqp_request(nesdev);
476 if (cqp_request == NULL) {
477 nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
478 ret = -ENOMEM;
479 goto failed_leaf_vpbl_pages_alloc;
480 }
481 cqp_request->waiting = 1;
482 cqp_wqe = &cqp_request->cqp_wqe;
483
484 nes_debug(NES_DBG_MR, "Registering STag 0x%08X, index = 0x%08X\n",
485 stag, stag_index);
486
487 opcode = NES_CQP_ALLOCATE_STAG | NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
488
489 if (nesfmr->nesmr.pbl_4k == 1)
490 opcode |= NES_CQP_STAG_PBL_BLK_SIZE;
491
492 if (ibmr_access_flags & IB_ACCESS_REMOTE_WRITE) {
493 opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE |
494 NES_CQP_STAG_RIGHTS_LOCAL_WRITE | NES_CQP_STAG_REM_ACC_EN;
495 nesfmr->access_rights |=
496 NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_RIGHTS_LOCAL_WRITE |
497 NES_CQP_STAG_REM_ACC_EN;
498 }
499
500 if (ibmr_access_flags & IB_ACCESS_REMOTE_READ) {
501 opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ |
502 NES_CQP_STAG_RIGHTS_LOCAL_READ | NES_CQP_STAG_REM_ACC_EN;
503 nesfmr->access_rights |=
504 NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_RIGHTS_LOCAL_READ |
505 NES_CQP_STAG_REM_ACC_EN;
506 }
507
508 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
509 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
510 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX, (nespd->pd_id & 0x00007fff));
511 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
512
513 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX] =
514 cpu_to_le32((nesfmr->nesmr.pbls_used>1) ?
515 (nesfmr->nesmr.pbls_used-1) : nesfmr->nesmr.pbls_used);
516
517 atomic_set(&cqp_request->refcount, 2);
518 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
519
520 /* Wait for CQP */
521 ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
522 NES_EVENT_TIMEOUT);
523 nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
524 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
525 stag, ret, cqp_request->major_code, cqp_request->minor_code);
526
527 if ((!ret) || (cqp_request->major_code)) {
528 if (atomic_dec_and_test(&cqp_request->refcount)) {
529 if (cqp_request->dynamic) {
530 kfree(cqp_request);
531 } else {
532 spin_lock_irqsave(&nesdev->cqp.lock, flags);
533 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
534 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
535 }
536 }
537 ret = (!ret) ? -ETIME : -EIO;
538 goto failed_leaf_vpbl_pages_alloc;
539 } else {
540 if (atomic_dec_and_test(&cqp_request->refcount)) {
541 if (cqp_request->dynamic) {
542 kfree(cqp_request);
543 } else {
544 spin_lock_irqsave(&nesdev->cqp.lock, flags);
545 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
546 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
547 }
548 }
549 }
550
551 nesfmr->nesmr.ibfmr.lkey = stag;
552 nesfmr->nesmr.ibfmr.rkey = stag;
553 nesfmr->attr = *ibfmr_attr;
554
555 return &nesfmr->nesmr.ibfmr;
556
557 failed_leaf_vpbl_pages_alloc:
558 /* unroll all allocated pages */
559 for (i=0; i<nesfmr->leaf_pbl_cnt; i++) {
560 if (nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase) {
561 pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
562 nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
563 }
564 }
565 if (nesfmr->root_vpbl.leaf_vpbl)
566 kfree(nesfmr->root_vpbl.leaf_vpbl);
567
568 failed_leaf_vpbl_alloc:
569 if (nesfmr->leaf_pbl_cnt == 0) {
570 if (nesfmr->root_vpbl.pbl_vbase)
571 pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
572 nesfmr->root_vpbl.pbl_pbase);
573 } else
574 pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
575 nesfmr->root_vpbl.pbl_pbase);
576
577 failed_vpbl_alloc:
578 kfree(nesfmr);
579
580 failed_fmr_alloc:
581 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
582
583 failed_resource_alloc:
584 return ERR_PTR(ret);
585}
586
587
588/**
589 * nes_dealloc_fmr
590 */
591static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
592{
593 struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr);
594 struct nes_fmr *nesfmr = to_nesfmr(nesmr);
595 struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
596 struct nes_device *nesdev = nesvnic->nesdev;
597 struct nes_mr temp_nesmr = *nesmr;
598 int i = 0;
599
600 temp_nesmr.ibmw.device = ibfmr->device;
601 temp_nesmr.ibmw.pd = ibfmr->pd;
602 temp_nesmr.ibmw.rkey = ibfmr->rkey;
603 temp_nesmr.ibmw.uobject = NULL;
604
605 /* free the resources */
606 if (nesfmr->leaf_pbl_cnt == 0) {
607 /* single PBL case */
608 if (nesfmr->root_vpbl.pbl_vbase)
609 pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.pbl_vbase,
610 nesfmr->root_vpbl.pbl_pbase);
611 } else {
612 for (i = 0; i < nesfmr->leaf_pbl_cnt; i++) {
613 pci_free_consistent(nesdev->pcidev, 4096, nesfmr->root_vpbl.leaf_vpbl[i].pbl_vbase,
614 nesfmr->root_vpbl.leaf_vpbl[i].pbl_pbase);
615 }
616 kfree(nesfmr->root_vpbl.leaf_vpbl);
617 pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
618 nesfmr->root_vpbl.pbl_pbase);
619 }
620
621 return nes_dealloc_mw(&temp_nesmr.ibmw);
622}
623
624
625/**
626 * nes_map_phys_fmr
627 */
628static int nes_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
629 int list_len, u64 iova)
630{
631 return 0;
632}
633
634
635/**
636 * nes_unmap_frm
637 */
638static int nes_unmap_fmr(struct list_head *ibfmr_list)
639{
640 return 0;
641}
642
643
644
645/**
646 * nes_query_device
647 */
648static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
649{
650 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
651 struct nes_device *nesdev = nesvnic->nesdev;
652 struct nes_ib_device *nesibdev = nesvnic->nesibdev;
653
654 memset(props, 0, sizeof(*props));
655 memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6);
656
657 props->fw_ver = nesdev->nesadapter->fw_ver;
658 props->device_cap_flags = nesdev->nesadapter->device_cap_flags;
659 props->vendor_id = nesdev->nesadapter->vendor_id;
660 props->vendor_part_id = nesdev->nesadapter->vendor_part_id;
661 props->hw_ver = nesdev->nesadapter->hw_rev;
662 props->max_mr_size = 0x80000000;
663 props->max_qp = nesibdev->max_qp;
664 props->max_qp_wr = nesdev->nesadapter->max_qp_wr - 2;
665 props->max_sge = nesdev->nesadapter->max_sge;
666 props->max_cq = nesibdev->max_cq;
667 props->max_cqe = nesdev->nesadapter->max_cqe - 1;
668 props->max_mr = nesibdev->max_mr;
669 props->max_mw = nesibdev->max_mr;
670 props->max_pd = nesibdev->max_pd;
671 props->max_sge_rd = 1;
672 switch (nesdev->nesadapter->max_irrq_wr) {
673 case 0:
674 props->max_qp_rd_atom = 1;
675 break;
676 case 1:
677 props->max_qp_rd_atom = 4;
678 break;
679 case 2:
680 props->max_qp_rd_atom = 16;
681 break;
682 case 3:
683 props->max_qp_rd_atom = 32;
684 break;
685 default:
686 props->max_qp_rd_atom = 0;
687 }
688 props->max_qp_init_rd_atom = props->max_qp_wr;
689 props->atomic_cap = IB_ATOMIC_NONE;
690 props->max_map_per_fmr = 1;
691
692 return 0;
693}
694
695
696/**
697 * nes_query_port
698 */
699static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
700{
701 memset(props, 0, sizeof(*props));
702
703 props->max_mtu = IB_MTU_2048;
704 props->active_mtu = IB_MTU_2048;
705 props->lid = 1;
706 props->lmc = 0;
707 props->sm_lid = 0;
708 props->sm_sl = 0;
709 props->state = IB_PORT_ACTIVE;
710 props->phys_state = 0;
711 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
712 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
713 props->gid_tbl_len = 1;
714 props->pkey_tbl_len = 1;
715 props->qkey_viol_cntr = 0;
716 props->active_width = IB_WIDTH_4X;
717 props->active_speed = 1;
718 props->max_msg_sz = 0x80000000;
719
720 return 0;
721}
722
723
724/**
725 * nes_modify_port
726 */
727static int nes_modify_port(struct ib_device *ibdev, u8 port,
728 int port_modify_mask, struct ib_port_modify *props)
729{
730 return 0;
731}
732
733
734/**
735 * nes_query_pkey
736 */
737static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
738{
739 *pkey = 0;
740 return 0;
741}
742
743
744/**
745 * nes_query_gid
746 */
747static int nes_query_gid(struct ib_device *ibdev, u8 port,
748 int index, union ib_gid *gid)
749{
750 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
751
752 memset(&(gid->raw[0]), 0, sizeof(gid->raw));
753 memcpy(&(gid->raw[0]), nesvnic->netdev->dev_addr, 6);
754
755 return 0;
756}
757
758
759/**
760 * nes_alloc_ucontext - Allocate the user context data structure. This keeps track
761 * of all objects associated with a particular user-mode client.
762 */
763static struct ib_ucontext *nes_alloc_ucontext(struct ib_device *ibdev,
764 struct ib_udata *udata)
765{
766 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
767 struct nes_device *nesdev = nesvnic->nesdev;
768 struct nes_adapter *nesadapter = nesdev->nesadapter;
769 struct nes_alloc_ucontext_req req;
770 struct nes_alloc_ucontext_resp uresp;
771 struct nes_ucontext *nes_ucontext;
772 struct nes_ib_device *nesibdev = nesvnic->nesibdev;
773
774
775 if (ib_copy_from_udata(&req, udata, sizeof(struct nes_alloc_ucontext_req))) {
776 printk(KERN_ERR PFX "Invalid structure size on allocate user context.\n");
777 return ERR_PTR(-EINVAL);
778 }
779
780 if (req.userspace_ver != NES_ABI_USERSPACE_VER) {
781 printk(KERN_ERR PFX "Invalid userspace driver version detected. Detected version %d, should be %d\n",
782 req.userspace_ver, NES_ABI_USERSPACE_VER);
783 return ERR_PTR(-EINVAL);
784 }
785
786
787 memset(&uresp, 0, sizeof uresp);
788
789 uresp.max_qps = nesibdev->max_qp;
790 uresp.max_pds = nesibdev->max_pd;
791 uresp.wq_size = nesdev->nesadapter->max_qp_wr * 2;
792 uresp.virtwq = nesadapter->virtwq;
793 uresp.kernel_ver = NES_ABI_KERNEL_VER;
794
795 nes_ucontext = kzalloc(sizeof *nes_ucontext, GFP_KERNEL);
796 if (!nes_ucontext)
797 return ERR_PTR(-ENOMEM);
798
799 nes_ucontext->nesdev = nesdev;
800 nes_ucontext->mmap_wq_offset = uresp.max_pds;
801 nes_ucontext->mmap_cq_offset = nes_ucontext->mmap_wq_offset +
802 ((sizeof(struct nes_hw_qp_wqe) * uresp.max_qps * 2) + PAGE_SIZE-1) /
803 PAGE_SIZE;
804
805
806 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
807 kfree(nes_ucontext);
808 return ERR_PTR(-EFAULT);
809 }
810
811 INIT_LIST_HEAD(&nes_ucontext->cq_reg_mem_list);
812 INIT_LIST_HEAD(&nes_ucontext->qp_reg_mem_list);
813 atomic_set(&nes_ucontext->usecnt, 1);
814 return &nes_ucontext->ibucontext;
815}
816
817
818/**
819 * nes_dealloc_ucontext
820 */
821static int nes_dealloc_ucontext(struct ib_ucontext *context)
822{
823 /* struct nes_vnic *nesvnic = to_nesvnic(context->device); */
824 /* struct nes_device *nesdev = nesvnic->nesdev; */
825 struct nes_ucontext *nes_ucontext = to_nesucontext(context);
826
827 if (!atomic_dec_and_test(&nes_ucontext->usecnt))
828 return 0;
829 kfree(nes_ucontext);
830 return 0;
831}
832
833
834/**
835 * nes_mmap
836 */
837static int nes_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
838{
839 unsigned long index;
840 struct nes_vnic *nesvnic = to_nesvnic(context->device);
841 struct nes_device *nesdev = nesvnic->nesdev;
842 /* struct nes_adapter *nesadapter = nesdev->nesadapter; */
843 struct nes_ucontext *nes_ucontext;
844 struct nes_qp *nesqp;
845
846 nes_ucontext = to_nesucontext(context);
847
848
849 if (vma->vm_pgoff >= nes_ucontext->mmap_wq_offset) {
850 index = (vma->vm_pgoff - nes_ucontext->mmap_wq_offset) * PAGE_SIZE;
851 index /= ((sizeof(struct nes_hw_qp_wqe) * nesdev->nesadapter->max_qp_wr * 2) +
852 PAGE_SIZE-1) & (~(PAGE_SIZE-1));
853 if (!test_bit(index, nes_ucontext->allocated_wqs)) {
854 nes_debug(NES_DBG_MMAP, "wq %lu not allocated\n", index);
855 return -EFAULT;
856 }
857 nesqp = nes_ucontext->mmap_nesqp[index];
858 if (nesqp == NULL) {
859 nes_debug(NES_DBG_MMAP, "wq %lu has a NULL QP base.\n", index);
860 return -EFAULT;
861 }
862 if (remap_pfn_range(vma, vma->vm_start,
863 virt_to_phys(nesqp->hwqp.sq_vbase) >> PAGE_SHIFT,
864 vma->vm_end - vma->vm_start,
865 vma->vm_page_prot)) {
866 nes_debug(NES_DBG_MMAP, "remap_pfn_range failed.\n");
867 return -EAGAIN;
868 }
869 vma->vm_private_data = nesqp;
870 return 0;
871 } else {
872 index = vma->vm_pgoff;
873 if (!test_bit(index, nes_ucontext->allocated_doorbells))
874 return -EFAULT;
875
876 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
877 if (io_remap_pfn_range(vma, vma->vm_start,
878 (nesdev->doorbell_start +
879 ((nes_ucontext->mmap_db_index[index] - nesdev->base_doorbell_index) * 4096))
880 >> PAGE_SHIFT, PAGE_SIZE, vma->vm_page_prot))
881 return -EAGAIN;
882 vma->vm_private_data = nes_ucontext;
883 return 0;
884 }
885
886 return -ENOSYS;
887}
888
889
890/**
891 * nes_alloc_pd
892 */
893static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
894 struct ib_ucontext *context, struct ib_udata *udata)
895{
896 struct nes_pd *nespd;
897 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
898 struct nes_device *nesdev = nesvnic->nesdev;
899 struct nes_adapter *nesadapter = nesdev->nesadapter;
900 struct nes_ucontext *nesucontext;
901 struct nes_alloc_pd_resp uresp;
902 u32 pd_num = 0;
903 int err;
904
905 nes_debug(NES_DBG_PD, "nesvnic=%p, netdev=%p %s, ibdev=%p, context=%p, netdev refcnt=%u\n",
906 nesvnic, nesdev->netdev[0], nesdev->netdev[0]->name, ibdev, context,
907 atomic_read(&nesvnic->netdev->refcnt));
908
909 err = nes_alloc_resource(nesadapter, nesadapter->allocated_pds,
910 nesadapter->max_pd, &pd_num, &nesadapter->next_pd);
911 if (err) {
912 return ERR_PTR(err);
913 }
914
915 nespd = kzalloc(sizeof (struct nes_pd), GFP_KERNEL);
916 if (!nespd) {
917 nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
918 return ERR_PTR(-ENOMEM);
919 }
920
921 nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
922 nespd, nesvnic->nesibdev->ibdev.name);
923
924 nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
925
926 if (context) {
927 nesucontext = to_nesucontext(context);
928 nespd->mmap_db_index = find_next_zero_bit(nesucontext->allocated_doorbells,
929 NES_MAX_USER_DB_REGIONS, nesucontext->first_free_db);
930 nes_debug(NES_DBG_PD, "find_first_zero_biton doorbells returned %u, mapping pd_id %u.\n",
931 nespd->mmap_db_index, nespd->pd_id);
932 if (nespd->mmap_db_index > NES_MAX_USER_DB_REGIONS) {
933 nes_debug(NES_DBG_PD, "mmap_db_index > MAX\n");
934 nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
935 kfree(nespd);
936 return ERR_PTR(-ENOMEM);
937 }
938
939 uresp.pd_id = nespd->pd_id;
940 uresp.mmap_db_index = nespd->mmap_db_index;
941 if (ib_copy_to_udata(udata, &uresp, sizeof (struct nes_alloc_pd_resp))) {
942 nes_free_resource(nesadapter, nesadapter->allocated_pds, pd_num);
943 kfree(nespd);
944 return ERR_PTR(-EFAULT);
945 }
946
947 set_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
948 nesucontext->mmap_db_index[nespd->mmap_db_index] = nespd->pd_id;
949 nesucontext->first_free_db = nespd->mmap_db_index + 1;
950 }
951
952 nes_debug(NES_DBG_PD, "PD%u structure located @%p.\n", nespd->pd_id, nespd);
953 return &nespd->ibpd;
954}
955
956
957/**
958 * nes_dealloc_pd
959 */
960static int nes_dealloc_pd(struct ib_pd *ibpd)
961{
962 struct nes_ucontext *nesucontext;
963 struct nes_pd *nespd = to_nespd(ibpd);
964 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
965 struct nes_device *nesdev = nesvnic->nesdev;
966 struct nes_adapter *nesadapter = nesdev->nesadapter;
967
968 if ((ibpd->uobject) && (ibpd->uobject->context)) {
969 nesucontext = to_nesucontext(ibpd->uobject->context);
970 nes_debug(NES_DBG_PD, "Clearing bit %u from allocated doorbells\n",
971 nespd->mmap_db_index);
972 clear_bit(nespd->mmap_db_index, nesucontext->allocated_doorbells);
973 nesucontext->mmap_db_index[nespd->mmap_db_index] = 0;
974 if (nesucontext->first_free_db > nespd->mmap_db_index) {
975 nesucontext->first_free_db = nespd->mmap_db_index;
976 }
977 }
978
979 nes_debug(NES_DBG_PD, "Deallocating PD%u structure located @%p.\n",
980 nespd->pd_id, nespd);
981 nes_free_resource(nesadapter, nesadapter->allocated_pds,
982 (nespd->pd_id-nesadapter->base_pd)>>(PAGE_SHIFT-12));
983 kfree(nespd);
984
985 return 0;
986}
987
988
989/**
990 * nes_create_ah
991 */
992static struct ib_ah *nes_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
993{
994 return ERR_PTR(-ENOSYS);
995}
996
997
998/**
999 * nes_destroy_ah
1000 */
1001static int nes_destroy_ah(struct ib_ah *ah)
1002{
1003 return -ENOSYS;
1004}
1005
1006
1007/**
1008 * nes_get_encoded_size
1009 */
1010static inline u8 nes_get_encoded_size(int *size)
1011{
1012 u8 encoded_size = 0;
1013 if (*size <= 32) {
1014 *size = 32;
1015 encoded_size = 1;
1016 } else if (*size <= 128) {
1017 *size = 128;
1018 encoded_size = 2;
1019 } else if (*size <= 512) {
1020 *size = 512;
1021 encoded_size = 3;
1022 }
1023 return (encoded_size);
1024}
1025
1026
1027
1028/**
1029 * nes_setup_virt_qp
1030 */
1031static int nes_setup_virt_qp(struct nes_qp *nesqp, struct nes_pbl *nespbl,
1032 struct nes_vnic *nesvnic, int sq_size, int rq_size)
1033{
1034 unsigned long flags;
1035 void *mem;
1036 __le64 *pbl = NULL;
1037 __le64 *tpbl;
1038 __le64 *pblbuffer;
1039 struct nes_device *nesdev = nesvnic->nesdev;
1040 struct nes_adapter *nesadapter = nesdev->nesadapter;
1041 u32 pbl_entries;
1042 u8 rq_pbl_entries;
1043 u8 sq_pbl_entries;
1044
1045 pbl_entries = nespbl->pbl_size >> 3;
1046 nes_debug(NES_DBG_QP, "Userspace PBL, pbl_size=%u, pbl_entries = %d pbl_vbase=%p, pbl_pbase=%p\n",
1047 nespbl->pbl_size, pbl_entries,
1048 (void *)nespbl->pbl_vbase,
1049 (void *)nespbl->pbl_pbase);
1050 pbl = (__le64 *) nespbl->pbl_vbase; /* points to first pbl entry */
1051 /* now lets set the sq_vbase as well as rq_vbase addrs we will assign */
1052 /* the first pbl to be fro the rq_vbase... */
1053 rq_pbl_entries = (rq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
1054 sq_pbl_entries = (sq_size * sizeof(struct nes_hw_qp_wqe)) >> 12;
1055 nesqp->hwqp.sq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32);
1056 if (!nespbl->page) {
1057 nes_debug(NES_DBG_QP, "QP nespbl->page is NULL \n");
1058 kfree(nespbl);
1059 return -ENOMEM;
1060 }
1061
1062 nesqp->hwqp.sq_vbase = kmap(nespbl->page);
1063 nesqp->page = nespbl->page;
1064 if (!nesqp->hwqp.sq_vbase) {
1065 nes_debug(NES_DBG_QP, "QP sq_vbase kmap failed\n");
1066 kfree(nespbl);
1067 return -ENOMEM;
1068 }
1069
1070 /* Now to get to sq.. we need to calculate how many */
1071 /* PBL entries were used by the rq.. */
1072 pbl += sq_pbl_entries;
1073 nesqp->hwqp.rq_pbase = (le32_to_cpu(((__le32 *)pbl)[0])) | ((u64)((le32_to_cpu(((__le32 *)pbl)[1]))) << 32);
1074 /* nesqp->hwqp.rq_vbase = bus_to_virt(*pbl); */
1075 /*nesqp->hwqp.rq_vbase = phys_to_virt(*pbl); */
1076
1077 nes_debug(NES_DBG_QP, "QP sq_vbase= %p sq_pbase=%p rq_vbase=%p rq_pbase=%p\n",
1078 nesqp->hwqp.sq_vbase, (void *)nesqp->hwqp.sq_pbase,
1079 nesqp->hwqp.rq_vbase, (void *)nesqp->hwqp.rq_pbase);
1080 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1081 if (!nesadapter->free_256pbl) {
1082 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
1083 nespbl->pbl_pbase);
1084 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1085 kunmap(nesqp->page);
1086 kfree(nespbl);
1087 return -ENOMEM;
1088 }
1089 nesadapter->free_256pbl--;
1090 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1091
1092 nesqp->pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 256, &nesqp->pbl_pbase);
1093 pblbuffer = nesqp->pbl_vbase;
1094 if (!nesqp->pbl_vbase) {
1095 /* memory allocated during nes_reg_user_mr() */
1096 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
1097 nespbl->pbl_pbase);
1098 kfree(nespbl);
1099 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1100 nesadapter->free_256pbl++;
1101 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1102 kunmap(nesqp->page);
1103 return -ENOMEM;
1104 }
1105 memset(nesqp->pbl_vbase, 0, 256);
1106 /* fill in the page address in the pbl buffer.. */
1107 tpbl = pblbuffer + 16;
1108 pbl = (__le64 *)nespbl->pbl_vbase;
1109 while (sq_pbl_entries--)
1110 *tpbl++ = *pbl++;
1111 tpbl = pblbuffer;
1112 while (rq_pbl_entries--)
1113 *tpbl++ = *pbl++;
1114
1115 /* done with memory allocated during nes_reg_user_mr() */
1116 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
1117 nespbl->pbl_pbase);
1118 kfree(nespbl);
1119
1120 nesqp->qp_mem_size =
1121 max((u32)sizeof(struct nes_qp_context), ((u32)256)) + 256; /* this is Q2 */
1122 /* Round up to a multiple of a page */
1123 nesqp->qp_mem_size += PAGE_SIZE - 1;
1124 nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
1125
1126 mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
1127 &nesqp->hwqp.q2_pbase);
1128
1129 if (!mem) {
1130 pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase);
1131 nesqp->pbl_vbase = NULL;
1132 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1133 nesadapter->free_256pbl++;
1134 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1135 kunmap(nesqp->page);
1136 return -ENOMEM;
1137 }
1138 nesqp->hwqp.q2_vbase = mem;
1139 mem += 256;
1140 memset(nesqp->hwqp.q2_vbase, 0, 256);
1141 nesqp->nesqp_context = mem;
1142 memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
1143 nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
1144
1145 return 0;
1146}
1147
1148
1149/**
1150 * nes_setup_mmap_qp
1151 */
1152static int nes_setup_mmap_qp(struct nes_qp *nesqp, struct nes_vnic *nesvnic,
1153 int sq_size, int rq_size)
1154{
1155 void *mem;
1156 struct nes_device *nesdev = nesvnic->nesdev;
1157
1158 nesqp->qp_mem_size = (sizeof(struct nes_hw_qp_wqe) * sq_size) +
1159 (sizeof(struct nes_hw_qp_wqe) * rq_size) +
1160 max((u32)sizeof(struct nes_qp_context), ((u32)256)) +
1161 256; /* this is Q2 */
1162 /* Round up to a multiple of a page */
1163 nesqp->qp_mem_size += PAGE_SIZE - 1;
1164 nesqp->qp_mem_size &= ~(PAGE_SIZE - 1);
1165
1166 mem = pci_alloc_consistent(nesdev->pcidev, nesqp->qp_mem_size,
1167 &nesqp->hwqp.sq_pbase);
1168 if (!mem)
1169 return -ENOMEM;
1170 nes_debug(NES_DBG_QP, "PCI consistent memory for "
1171 "host descriptor rings located @ %p (pa = 0x%08lX.) size = %u.\n",
1172 mem, (unsigned long)nesqp->hwqp.sq_pbase, nesqp->qp_mem_size);
1173
1174 memset(mem, 0, nesqp->qp_mem_size);
1175
1176 nesqp->hwqp.sq_vbase = mem;
1177 mem += sizeof(struct nes_hw_qp_wqe) * sq_size;
1178
1179 nesqp->hwqp.rq_vbase = mem;
1180 nesqp->hwqp.rq_pbase = nesqp->hwqp.sq_pbase +
1181 sizeof(struct nes_hw_qp_wqe) * sq_size;
1182 mem += sizeof(struct nes_hw_qp_wqe) * rq_size;
1183
1184 nesqp->hwqp.q2_vbase = mem;
1185 nesqp->hwqp.q2_pbase = nesqp->hwqp.rq_pbase +
1186 sizeof(struct nes_hw_qp_wqe) * rq_size;
1187 mem += 256;
1188 memset(nesqp->hwqp.q2_vbase, 0, 256);
1189
1190 nesqp->nesqp_context = mem;
1191 nesqp->nesqp_context_pbase = nesqp->hwqp.q2_pbase + 256;
1192 memset(nesqp->nesqp_context, 0, sizeof(*nesqp->nesqp_context));
1193 return 0;
1194}
1195
1196
1197/**
1198 * nes_free_qp_mem() is to free up the qp's pci_alloc_consistent() memory.
1199 */
1200static inline void nes_free_qp_mem(struct nes_device *nesdev,
1201 struct nes_qp *nesqp, int virt_wqs)
1202{
1203 unsigned long flags;
1204 struct nes_adapter *nesadapter = nesdev->nesadapter;
1205 if (!virt_wqs) {
1206 pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size,
1207 nesqp->hwqp.sq_vbase, nesqp->hwqp.sq_pbase);
1208 }else {
1209 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1210 nesadapter->free_256pbl++;
1211 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1212 pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase);
1213 pci_free_consistent(nesdev->pcidev, 256, nesqp->pbl_vbase, nesqp->pbl_pbase );
1214 nesqp->pbl_vbase = NULL;
1215 kunmap(nesqp->page);
1216 }
1217}
1218
1219
1220/**
1221 * nes_create_qp
1222 */
1223static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
1224 struct ib_qp_init_attr *init_attr, struct ib_udata *udata)
1225{
1226 u64 u64temp= 0;
1227 u64 u64nesqp = 0;
1228 struct nes_pd *nespd = to_nespd(ibpd);
1229 struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
1230 struct nes_device *nesdev = nesvnic->nesdev;
1231 struct nes_adapter *nesadapter = nesdev->nesadapter;
1232 struct nes_qp *nesqp;
1233 struct nes_cq *nescq;
1234 struct nes_ucontext *nes_ucontext;
1235 struct nes_hw_cqp_wqe *cqp_wqe;
1236 struct nes_cqp_request *cqp_request;
1237 struct nes_create_qp_req req;
1238 struct nes_create_qp_resp uresp;
1239 struct nes_pbl *nespbl = NULL;
1240 u32 qp_num = 0;
1241 u32 opcode = 0;
1242 /* u32 counter = 0; */
1243 void *mem;
1244 unsigned long flags;
1245 int ret;
1246 int err;
1247 int virt_wqs = 0;
1248 int sq_size;
1249 int rq_size;
1250 u8 sq_encoded_size;
1251 u8 rq_encoded_size;
1252 /* int counter; */
1253
1254 atomic_inc(&qps_created);
1255 switch (init_attr->qp_type) {
1256 case IB_QPT_RC:
1257 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
1258 init_attr->cap.max_inline_data = 0;
1259 } else {
1260 init_attr->cap.max_inline_data = 64;
1261 }
1262 sq_size = init_attr->cap.max_send_wr;
1263 rq_size = init_attr->cap.max_recv_wr;
1264
1265 // check if the encoded sizes are OK or not...
1266 sq_encoded_size = nes_get_encoded_size(&sq_size);
1267 rq_encoded_size = nes_get_encoded_size(&rq_size);
1268
1269 if ((!sq_encoded_size) || (!rq_encoded_size)) {
1270 nes_debug(NES_DBG_QP, "ERROR bad rq (%u) or sq (%u) size\n",
1271 rq_size, sq_size);
1272 return ERR_PTR(-EINVAL);
1273 }
1274
1275 init_attr->cap.max_send_wr = sq_size -2;
1276 init_attr->cap.max_recv_wr = rq_size -1;
1277 nes_debug(NES_DBG_QP, "RQ size=%u, SQ Size=%u\n", rq_size, sq_size);
1278
1279 ret = nes_alloc_resource(nesadapter, nesadapter->allocated_qps,
1280 nesadapter->max_qp, &qp_num, &nesadapter->next_qp);
1281 if (ret) {
1282 return ERR_PTR(ret);
1283 }
1284
1285 /* Need 512 (actually now 1024) byte alignment on this structure */
1286 mem = kzalloc(sizeof(*nesqp)+NES_SW_CONTEXT_ALIGN-1, GFP_KERNEL);
1287 if (!mem) {
1288 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1289 nes_debug(NES_DBG_QP, "Unable to allocate QP\n");
1290 return ERR_PTR(-ENOMEM);
1291 }
1292 u64nesqp = (unsigned long)mem;
1293 u64nesqp += ((u64)NES_SW_CONTEXT_ALIGN) - 1;
1294 u64temp = ((u64)NES_SW_CONTEXT_ALIGN) - 1;
1295 u64nesqp &= ~u64temp;
1296 nesqp = (struct nes_qp *)(unsigned long)u64nesqp;
1297 /* nes_debug(NES_DBG_QP, "nesqp=%p, allocated buffer=%p. Rounded to closest %u\n",
1298 nesqp, mem, NES_SW_CONTEXT_ALIGN); */
1299 nesqp->allocated_buffer = mem;
1300
1301 if (udata) {
1302 if (ib_copy_from_udata(&req, udata, sizeof(struct nes_create_qp_req))) {
1303 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1304 kfree(nesqp->allocated_buffer);
1305 nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n");
1306 return NULL;
1307 }
1308 if (req.user_wqe_buffers) {
1309 virt_wqs = 1;
1310 }
1311 if ((ibpd->uobject) && (ibpd->uobject->context)) {
1312 nesqp->user_mode = 1;
1313 nes_ucontext = to_nesucontext(ibpd->uobject->context);
1314 if (virt_wqs) {
1315 err = 1;
1316 list_for_each_entry(nespbl, &nes_ucontext->qp_reg_mem_list, list) {
1317 if (nespbl->user_base == (unsigned long )req.user_wqe_buffers) {
1318 list_del(&nespbl->list);
1319 err = 0;
1320 nes_debug(NES_DBG_QP, "Found PBL for virtual QP. nespbl=%p. user_base=0x%lx\n",
1321 nespbl, nespbl->user_base);
1322 break;
1323 }
1324 }
1325 if (err) {
1326 nes_debug(NES_DBG_QP, "Didn't Find PBL for virtual QP. address = %llx.\n",
1327 (long long unsigned int)req.user_wqe_buffers);
1328 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1329 kfree(nesqp->allocated_buffer);
1330 return ERR_PTR(-ENOMEM);
1331 }
1332 }
1333
1334 nes_ucontext = to_nesucontext(ibpd->uobject->context);
1335 nesqp->mmap_sq_db_index =
1336 find_next_zero_bit(nes_ucontext->allocated_wqs,
1337 NES_MAX_USER_WQ_REGIONS, nes_ucontext->first_free_wq);
1338 /* nes_debug(NES_DBG_QP, "find_first_zero_biton wqs returned %u\n",
1339 nespd->mmap_db_index); */
1340 if (nesqp->mmap_sq_db_index > NES_MAX_USER_WQ_REGIONS) {
1341 nes_debug(NES_DBG_QP,
1342 "db index > max user regions, failing create QP\n");
1343 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1344 if (virt_wqs) {
1345 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
1346 nespbl->pbl_pbase);
1347 kfree(nespbl);
1348 }
1349 kfree(nesqp->allocated_buffer);
1350 return ERR_PTR(-ENOMEM);
1351 }
1352 set_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
1353 nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = nesqp;
1354 nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index + 1;
1355 } else {
1356 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1357 kfree(nesqp->allocated_buffer);
1358 return ERR_PTR(-EFAULT);
1359 }
1360 }
1361 err = (!virt_wqs) ? nes_setup_mmap_qp(nesqp, nesvnic, sq_size, rq_size) :
1362 nes_setup_virt_qp(nesqp, nespbl, nesvnic, sq_size, rq_size);
1363 if (err) {
1364 nes_debug(NES_DBG_QP,
1365 "error geting qp mem code = %d\n", err);
1366 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1367 kfree(nesqp->allocated_buffer);
1368 return ERR_PTR(-ENOMEM);
1369 }
1370
1371 nesqp->hwqp.sq_size = sq_size;
1372 nesqp->hwqp.sq_encoded_size = sq_encoded_size;
1373 nesqp->hwqp.sq_head = 1;
1374 nesqp->hwqp.rq_size = rq_size;
1375 nesqp->hwqp.rq_encoded_size = rq_encoded_size;
1376 /* nes_debug(NES_DBG_QP, "nesqp->nesqp_context_pbase = %p\n",
1377 (void *)nesqp->nesqp_context_pbase);
1378 */
1379 nesqp->hwqp.qp_id = qp_num;
1380 nesqp->ibqp.qp_num = nesqp->hwqp.qp_id;
1381 nesqp->nespd = nespd;
1382
1383 nescq = to_nescq(init_attr->send_cq);
1384 nesqp->nesscq = nescq;
1385 nescq = to_nescq(init_attr->recv_cq);
1386 nesqp->nesrcq = nescq;
1387
1388 nesqp->nesqp_context->misc |= cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) <<
1389 NES_QPCONTEXT_MISC_PCI_FCN_SHIFT);
1390 nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.rq_encoded_size <<
1391 NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT);
1392 nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size <<
1393 NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT);
1394 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN);
1395 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN);
1396 nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number +
1397 ((u32)nesqp->nesrcq->hw_cq.cq_number << 16));
1398 u64temp = (u64)nesqp->hwqp.sq_pbase;
1399 nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
1400 nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
1401
1402
1403 if (!virt_wqs) {
1404 u64temp = (u64)nesqp->hwqp.sq_pbase;
1405 nesqp->nesqp_context->sq_addr_low = cpu_to_le32((u32)u64temp);
1406 nesqp->nesqp_context->sq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
1407 u64temp = (u64)nesqp->hwqp.rq_pbase;
1408 nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
1409 nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
1410 } else {
1411 u64temp = (u64)nesqp->pbl_pbase;
1412 nesqp->nesqp_context->rq_addr_low = cpu_to_le32((u32)u64temp);
1413 nesqp->nesqp_context->rq_addr_high = cpu_to_le32((u32)(u64temp >> 32));
1414 }
1415
1416 /* nes_debug(NES_DBG_QP, "next_qp_nic_index=%u, using nic_index=%d\n",
1417 nesvnic->next_qp_nic_index,
1418 nesvnic->qp_nic_index[nesvnic->next_qp_nic_index]); */
1419 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1420 nesqp->nesqp_context->misc2 |= cpu_to_le32(
1421 (u32)nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] <<
1422 NES_QPCONTEXT_MISC2_NIC_INDEX_SHIFT);
1423 nesvnic->next_qp_nic_index++;
1424 if ((nesvnic->next_qp_nic_index > 3) ||
1425 (nesvnic->qp_nic_index[nesvnic->next_qp_nic_index] == 0xf)) {
1426 nesvnic->next_qp_nic_index = 0;
1427 }
1428 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1429
1430 nesqp->nesqp_context->pd_index_wscale |= cpu_to_le32((u32)nesqp->nespd->pd_id << 16);
1431 u64temp = (u64)nesqp->hwqp.q2_pbase;
1432 nesqp->nesqp_context->q2_addr_low = cpu_to_le32((u32)u64temp);
1433 nesqp->nesqp_context->q2_addr_high = cpu_to_le32((u32)(u64temp >> 32));
1434 nesqp->nesqp_context->aeq_token_low = cpu_to_le32((u32)((unsigned long)(nesqp)));
1435 nesqp->nesqp_context->aeq_token_high = cpu_to_le32((u32)(upper_32_bits((unsigned long)(nesqp))));
1436 nesqp->nesqp_context->ird_ord_sizes = cpu_to_le32(NES_QPCONTEXT_ORDIRD_ALSMM |
1437 ((((u32)nesadapter->max_irrq_wr) <<
1438 NES_QPCONTEXT_ORDIRD_IRDSIZE_SHIFT) & NES_QPCONTEXT_ORDIRD_IRDSIZE_MASK));
1439 if (disable_mpa_crc) {
1440 nes_debug(NES_DBG_QP, "Disabling MPA crc checking due to module option.\n");
1441 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(NES_QPCONTEXT_ORDIRD_RNMC);
1442 }
1443
1444
1445 /* Create the QP */
1446 cqp_request = nes_get_cqp_request(nesdev);
1447 if (cqp_request == NULL) {
1448 nes_debug(NES_DBG_QP, "Failed to get a cqp_request\n");
1449 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1450 nes_free_qp_mem(nesdev, nesqp,virt_wqs);
1451 kfree(nesqp->allocated_buffer);
1452 return ERR_PTR(-ENOMEM);
1453 }
1454 cqp_request->waiting = 1;
1455 cqp_wqe = &cqp_request->cqp_wqe;
1456
1457 if (!virt_wqs) {
1458 opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP |
1459 NES_CQP_QP_IWARP_STATE_IDLE;
1460 } else {
1461 opcode = NES_CQP_CREATE_QP | NES_CQP_QP_TYPE_IWARP | NES_CQP_QP_VIRT_WQS |
1462 NES_CQP_QP_IWARP_STATE_IDLE;
1463 }
1464 opcode |= NES_CQP_QP_CQS_VALID;
1465 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1466 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
1467 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
1468
1469 u64temp = (u64)nesqp->nesqp_context_pbase;
1470 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, u64temp);
1471
1472 atomic_set(&cqp_request->refcount, 2);
1473 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
1474
1475 /* Wait for CQP */
1476 nes_debug(NES_DBG_QP, "Waiting for create iWARP QP%u to complete.\n",
1477 nesqp->hwqp.qp_id);
1478 ret = wait_event_timeout(cqp_request->waitq,
1479 (cqp_request->request_done != 0), NES_EVENT_TIMEOUT);
1480 nes_debug(NES_DBG_QP, "Create iwarp QP%u completed, wait_event_timeout ret=%u,"
1481 " nesdev->cqp_head = %u, nesdev->cqp.sq_tail = %u,"
1482 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
1483 nesqp->hwqp.qp_id, ret, nesdev->cqp.sq_head, nesdev->cqp.sq_tail,
1484 cqp_request->major_code, cqp_request->minor_code);
1485 if ((!ret) || (cqp_request->major_code)) {
1486 if (atomic_dec_and_test(&cqp_request->refcount)) {
1487 if (cqp_request->dynamic) {
1488 kfree(cqp_request);
1489 } else {
1490 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1491 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
1492 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1493 }
1494 }
1495 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1496 nes_free_qp_mem(nesdev, nesqp,virt_wqs);
1497 kfree(nesqp->allocated_buffer);
1498 if (!ret) {
1499 return ERR_PTR(-ETIME);
1500 } else {
1501 return ERR_PTR(-EIO);
1502 }
1503 } else {
1504 if (atomic_dec_and_test(&cqp_request->refcount)) {
1505 if (cqp_request->dynamic) {
1506 kfree(cqp_request);
1507 } else {
1508 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1509 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
1510 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1511 }
1512 }
1513 }
1514
1515 if (ibpd->uobject) {
1516 uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
1517 uresp.actual_sq_size = sq_size;
1518 uresp.actual_rq_size = rq_size;
1519 uresp.qp_id = nesqp->hwqp.qp_id;
1520 uresp.nes_drv_opt = nes_drv_opt;
1521 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
1522 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num);
1523 nes_free_qp_mem(nesdev, nesqp,virt_wqs);
1524 kfree(nesqp->allocated_buffer);
1525 return ERR_PTR(-EFAULT);
1526 }
1527 }
1528
1529 nes_debug(NES_DBG_QP, "QP%u structure located @%p.Size = %u.\n",
1530 nesqp->hwqp.qp_id, nesqp, (u32)sizeof(*nesqp));
1531 spin_lock_init(&nesqp->lock);
1532 init_waitqueue_head(&nesqp->state_waitq);
1533 init_waitqueue_head(&nesqp->kick_waitq);
1534 nes_add_ref(&nesqp->ibqp);
1535 break;
1536 default:
1537 nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type);
1538 return ERR_PTR(-EINVAL);
1539 break;
1540 }
1541
1542 /* update the QP table */
1543 nesdev->nesadapter->qp_table[nesqp->hwqp.qp_id-NES_FIRST_QPN] = nesqp;
1544 nes_debug(NES_DBG_QP, "netdev refcnt=%u\n",
1545 atomic_read(&nesvnic->netdev->refcnt));
1546
1547 return &nesqp->ibqp;
1548}
1549
1550
1551/**
1552 * nes_destroy_qp
1553 */
1554static int nes_destroy_qp(struct ib_qp *ibqp)
1555{
1556 struct nes_qp *nesqp = to_nesqp(ibqp);
1557 /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
1558 struct nes_ucontext *nes_ucontext;
1559 struct ib_qp_attr attr;
1560 struct iw_cm_id *cm_id;
1561 struct iw_cm_event cm_event;
1562 int ret;
1563
1564 atomic_inc(&sw_qps_destroyed);
1565 nesqp->destroyed = 1;
1566
1567 /* Blow away the connection if it exists. */
1568 if (nesqp->ibqp_state >= IB_QPS_INIT && nesqp->ibqp_state <= IB_QPS_RTS) {
1569 /* if (nesqp->ibqp_state == IB_QPS_RTS) { */
1570 attr.qp_state = IB_QPS_ERR;
1571 nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL);
1572 }
1573
1574 if (((nesqp->ibqp_state == IB_QPS_INIT) ||
1575 (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) {
1576 cm_id = nesqp->cm_id;
1577 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
1578 cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT;
1579 cm_event.local_addr = cm_id->local_addr;
1580 cm_event.remote_addr = cm_id->remote_addr;
1581 cm_event.private_data = NULL;
1582 cm_event.private_data_len = 0;
1583
1584 nes_debug(NES_DBG_QP, "Generating a CM Timeout Event for "
1585 "QP%u. cm_id = %p, refcount = %u. \n",
1586 nesqp->hwqp.qp_id, cm_id, atomic_read(&nesqp->refcount));
1587
1588 cm_id->rem_ref(cm_id);
1589 ret = cm_id->event_handler(cm_id, &cm_event);
1590 if (ret)
1591 nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
1592 }
1593
1594
1595 if (nesqp->user_mode) {
1596 if ((ibqp->uobject)&&(ibqp->uobject->context)) {
1597 nes_ucontext = to_nesucontext(ibqp->uobject->context);
1598 clear_bit(nesqp->mmap_sq_db_index, nes_ucontext->allocated_wqs);
1599 nes_ucontext->mmap_nesqp[nesqp->mmap_sq_db_index] = NULL;
1600 if (nes_ucontext->first_free_wq > nesqp->mmap_sq_db_index) {
1601 nes_ucontext->first_free_wq = nesqp->mmap_sq_db_index;
1602 }
1603 }
1604 if (nesqp->pbl_pbase)
1605 kunmap(nesqp->page);
1606 }
1607
1608 nes_rem_ref(&nesqp->ibqp);
1609 return 0;
1610}
1611
1612
1613/**
1614 * nes_create_cq
1615 */
1616static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
1617 int comp_vector,
1618 struct ib_ucontext *context, struct ib_udata *udata)
1619{
1620 u64 u64temp;
1621 struct nes_vnic *nesvnic = to_nesvnic(ibdev);
1622 struct nes_device *nesdev = nesvnic->nesdev;
1623 struct nes_adapter *nesadapter = nesdev->nesadapter;
1624 struct nes_cq *nescq;
1625 struct nes_ucontext *nes_ucontext = NULL;
1626 struct nes_cqp_request *cqp_request;
1627 void *mem = NULL;
1628 struct nes_hw_cqp_wqe *cqp_wqe;
1629 struct nes_pbl *nespbl = NULL;
1630 struct nes_create_cq_req req;
1631 struct nes_create_cq_resp resp;
1632 u32 cq_num = 0;
1633 u32 opcode = 0;
1634 u32 pbl_entries = 1;
1635 int err;
1636 unsigned long flags;
1637 int ret;
1638
1639 err = nes_alloc_resource(nesadapter, nesadapter->allocated_cqs,
1640 nesadapter->max_cq, &cq_num, &nesadapter->next_cq);
1641 if (err) {
1642 return ERR_PTR(err);
1643 }
1644
1645 nescq = kzalloc(sizeof(struct nes_cq), GFP_KERNEL);
1646 if (!nescq) {
1647 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1648 nes_debug(NES_DBG_CQ, "Unable to allocate nes_cq struct\n");
1649 return ERR_PTR(-ENOMEM);
1650 }
1651
1652 nescq->hw_cq.cq_size = max(entries + 1, 5);
1653 nescq->hw_cq.cq_number = cq_num;
1654 nescq->ibcq.cqe = nescq->hw_cq.cq_size - 1;
1655
1656
1657 if (context) {
1658 nes_ucontext = to_nesucontext(context);
1659 if (ib_copy_from_udata(&req, udata, sizeof (struct nes_create_cq_req))) {
1660 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1661 kfree(nescq);
1662 return ERR_PTR(-EFAULT);
1663 }
1664 nesvnic->mcrq_ucontext = nes_ucontext;
1665 nes_ucontext->mcrqf = req.mcrqf;
1666 if (nes_ucontext->mcrqf) {
1667 if (nes_ucontext->mcrqf & 0x80000000)
1668 nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1;
1669 else if (nes_ucontext->mcrqf & 0x40000000)
1670 nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff;
1671 else
1672 nescq->hw_cq.cq_number = nesvnic->mcrq_qp_id + nes_ucontext->mcrqf-1;
1673 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1674 }
1675 nes_debug(NES_DBG_CQ, "CQ Virtual Address = %08lX, size = %u.\n",
1676 (unsigned long)req.user_cq_buffer, entries);
1677 list_for_each_entry(nespbl, &nes_ucontext->cq_reg_mem_list, list) {
1678 if (nespbl->user_base == (unsigned long )req.user_cq_buffer) {
1679 list_del(&nespbl->list);
1680 err = 0;
1681 nes_debug(NES_DBG_CQ, "Found PBL for virtual CQ. nespbl=%p.\n",
1682 nespbl);
1683 break;
1684 }
1685 }
1686 if (err) {
1687 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1688 kfree(nescq);
1689 return ERR_PTR(err);
1690 }
1691
1692 pbl_entries = nespbl->pbl_size >> 3;
1693 nescq->cq_mem_size = 0;
1694 } else {
1695 nescq->cq_mem_size = nescq->hw_cq.cq_size * sizeof(struct nes_hw_cqe);
1696 nes_debug(NES_DBG_CQ, "Attempting to allocate pci memory (%u entries, %u bytes) for CQ%u.\n",
1697 entries, nescq->cq_mem_size, nescq->hw_cq.cq_number);
1698
1699 /* allocate the physical buffer space */
1700 mem = pci_alloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
1701 &nescq->hw_cq.cq_pbase);
1702 if (!mem) {
1703 printk(KERN_ERR PFX "Unable to allocate pci memory for cq\n");
1704 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1705 kfree(nescq);
1706 return ERR_PTR(-ENOMEM);
1707 }
1708
1709 memset(mem, 0, nescq->cq_mem_size);
1710 nescq->hw_cq.cq_vbase = mem;
1711 nescq->hw_cq.cq_head = 0;
1712 nes_debug(NES_DBG_CQ, "CQ%u virtual address @ %p, phys = 0x%08X\n",
1713 nescq->hw_cq.cq_number, nescq->hw_cq.cq_vbase,
1714 (u32)nescq->hw_cq.cq_pbase);
1715 }
1716
1717 nescq->hw_cq.ce_handler = nes_iwarp_ce_handler;
1718 spin_lock_init(&nescq->lock);
1719
1720 /* send CreateCQ request to CQP */
1721 cqp_request = nes_get_cqp_request(nesdev);
1722 if (cqp_request == NULL) {
1723 nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
1724 if (!context)
1725 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
1726 nescq->hw_cq.cq_pbase);
1727 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1728 kfree(nescq);
1729 return ERR_PTR(-ENOMEM);
1730 }
1731 cqp_request->waiting = 1;
1732 cqp_wqe = &cqp_request->cqp_wqe;
1733
1734 opcode = NES_CQP_CREATE_CQ | NES_CQP_CQ_CEQ_VALID |
1735 NES_CQP_CQ_CHK_OVERFLOW |
1736 NES_CQP_CQ_CEQE_MASK | ((u32)nescq->hw_cq.cq_size << 16);
1737
1738 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1739
1740 if (pbl_entries != 1) {
1741 if (pbl_entries > 32) {
1742 /* use 4k pbl */
1743 nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 4k PBL\n", pbl_entries);
1744 if (nesadapter->free_4kpbl == 0) {
1745 if (cqp_request->dynamic) {
1746 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1747 kfree(cqp_request);
1748 } else {
1749 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
1750 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1751 }
1752 if (!context)
1753 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
1754 nescq->hw_cq.cq_pbase);
1755 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1756 kfree(nescq);
1757 return ERR_PTR(-ENOMEM);
1758 } else {
1759 opcode |= (NES_CQP_CQ_VIRT | NES_CQP_CQ_4KB_CHUNK);
1760 nescq->virtual_cq = 2;
1761 nesadapter->free_4kpbl--;
1762 }
1763 } else {
1764 /* use 256 byte pbl */
1765 nes_debug(NES_DBG_CQ, "pbl_entries=%u, use a 256 byte PBL\n", pbl_entries);
1766 if (nesadapter->free_256pbl == 0) {
1767 if (cqp_request->dynamic) {
1768 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1769 kfree(cqp_request);
1770 } else {
1771 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
1772 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1773 }
1774 if (!context)
1775 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
1776 nescq->hw_cq.cq_pbase);
1777 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1778 kfree(nescq);
1779 return ERR_PTR(-ENOMEM);
1780 } else {
1781 opcode |= NES_CQP_CQ_VIRT;
1782 nescq->virtual_cq = 1;
1783 nesadapter->free_256pbl--;
1784 }
1785 }
1786 }
1787
1788 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1789
1790 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1791 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
1792 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
1793 (nescq->hw_cq.cq_number | ((u32)nesdev->ceq_index << 16)));
1794
1795 if (context) {
1796 if (pbl_entries != 1)
1797 u64temp = (u64)nespbl->pbl_pbase;
1798 else
1799 u64temp = le64_to_cpu(nespbl->pbl_vbase[0]);
1800 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX,
1801 nes_ucontext->mmap_db_index[0]);
1802 } else {
1803 u64temp = (u64)nescq->hw_cq.cq_pbase;
1804 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_DOORBELL_INDEX_HIGH_IDX] = 0;
1805 }
1806 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_CQ_WQE_PBL_LOW_IDX, u64temp);
1807 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] = 0;
1808 u64temp = (u64)(unsigned long)&nescq->hw_cq;
1809 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_LOW_IDX] =
1810 cpu_to_le32((u32)(u64temp >> 1));
1811 cqp_wqe->wqe_words[NES_CQP_CQ_WQE_CQ_CONTEXT_HIGH_IDX] =
1812 cpu_to_le32(((u32)((u64temp) >> 33)) & 0x7FFFFFFF);
1813
1814 atomic_set(&cqp_request->refcount, 2);
1815 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
1816
1817 /* Wait for CQP */
1818 nes_debug(NES_DBG_CQ, "Waiting for create iWARP CQ%u to complete.\n",
1819 nescq->hw_cq.cq_number);
1820 ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
1821 NES_EVENT_TIMEOUT * 2);
1822 nes_debug(NES_DBG_CQ, "Create iWARP CQ%u completed, wait_event_timeout ret = %d.\n",
1823 nescq->hw_cq.cq_number, ret);
1824 if ((!ret) || (cqp_request->major_code)) {
1825 if (atomic_dec_and_test(&cqp_request->refcount)) {
1826 if (cqp_request->dynamic) {
1827 kfree(cqp_request);
1828 } else {
1829 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1830 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
1831 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1832 }
1833 }
1834 nes_debug(NES_DBG_CQ, "iWARP CQ%u create timeout expired, major code = 0x%04X,"
1835 " minor code = 0x%04X\n",
1836 nescq->hw_cq.cq_number, cqp_request->major_code, cqp_request->minor_code);
1837 if (!context)
1838 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, mem,
1839 nescq->hw_cq.cq_pbase);
1840 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1841 kfree(nescq);
1842 return ERR_PTR(-EIO);
1843 } else {
1844 if (atomic_dec_and_test(&cqp_request->refcount)) {
1845 if (cqp_request->dynamic) {
1846 kfree(cqp_request);
1847 } else {
1848 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1849 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
1850 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1851 }
1852 }
1853 }
1854
1855 if (context) {
1856 /* free the nespbl */
1857 pci_free_consistent(nesdev->pcidev, nespbl->pbl_size, nespbl->pbl_vbase,
1858 nespbl->pbl_pbase);
1859 kfree(nespbl);
1860 resp.cq_id = nescq->hw_cq.cq_number;
1861 resp.cq_size = nescq->hw_cq.cq_size;
1862 resp.mmap_db_index = 0;
1863 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
1864 nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
1865 kfree(nescq);
1866 return ERR_PTR(-EFAULT);
1867 }
1868 }
1869
1870 return &nescq->ibcq;
1871}
1872
1873
1874/**
1875 * nes_destroy_cq
1876 */
1877static int nes_destroy_cq(struct ib_cq *ib_cq)
1878{
1879 struct nes_cq *nescq;
1880 struct nes_device *nesdev;
1881 struct nes_vnic *nesvnic;
1882 struct nes_adapter *nesadapter;
1883 struct nes_hw_cqp_wqe *cqp_wqe;
1884 struct nes_cqp_request *cqp_request;
1885 unsigned long flags;
1886 u32 opcode = 0;
1887 int ret;
1888
1889 if (ib_cq == NULL)
1890 return 0;
1891
1892 nescq = to_nescq(ib_cq);
1893 nesvnic = to_nesvnic(ib_cq->device);
1894 nesdev = nesvnic->nesdev;
1895 nesadapter = nesdev->nesadapter;
1896
1897 nes_debug(NES_DBG_CQ, "Destroy CQ%u\n", nescq->hw_cq.cq_number);
1898
1899 /* Send DestroyCQ request to CQP */
1900 cqp_request = nes_get_cqp_request(nesdev);
1901 if (cqp_request == NULL) {
1902 nes_debug(NES_DBG_CQ, "Failed to get a cqp_request.\n");
1903 return -ENOMEM;
1904 }
1905 cqp_request->waiting = 1;
1906 cqp_wqe = &cqp_request->cqp_wqe;
1907 opcode = NES_CQP_DESTROY_CQ | (nescq->hw_cq.cq_size << 16);
1908 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
1909 if (nescq->virtual_cq == 1) {
1910 nesadapter->free_256pbl++;
1911 if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
1912 printk(KERN_ERR PFX "%s: free 256B PBLs(%u) has exceeded the max(%u)\n",
1913 __FUNCTION__, nesadapter->free_256pbl, nesadapter->max_256pbl);
1914 }
1915 } else if (nescq->virtual_cq == 2) {
1916 nesadapter->free_4kpbl++;
1917 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
1918 printk(KERN_ERR PFX "%s: free 4K PBLs(%u) has exceeded the max(%u)\n",
1919 __FUNCTION__, nesadapter->free_4kpbl, nesadapter->max_4kpbl);
1920 }
1921 opcode |= NES_CQP_CQ_4KB_CHUNK;
1922 }
1923
1924 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
1925
1926 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
1927 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
1928 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX,
1929 (nescq->hw_cq.cq_number | ((u32)PCI_FUNC(nesdev->pcidev->devfn) << 16)));
1930 nes_free_resource(nesadapter, nesadapter->allocated_cqs, nescq->hw_cq.cq_number);
1931 atomic_set(&cqp_request->refcount, 2);
1932 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
1933
1934 /* Wait for CQP */
1935 nes_debug(NES_DBG_CQ, "Waiting for destroy iWARP CQ%u to complete.\n",
1936 nescq->hw_cq.cq_number);
1937 ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
1938 NES_EVENT_TIMEOUT);
1939 nes_debug(NES_DBG_CQ, "Destroy iWARP CQ%u completed, wait_event_timeout ret = %u,"
1940 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
1941 nescq->hw_cq.cq_number, ret, cqp_request->major_code,
1942 cqp_request->minor_code);
1943 if ((!ret) || (cqp_request->major_code)) {
1944 if (atomic_dec_and_test(&cqp_request->refcount)) {
1945 if (cqp_request->dynamic) {
1946 kfree(cqp_request);
1947 } else {
1948 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1949 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
1950 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1951 }
1952 }
1953 if (!ret) {
1954 nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy timeout expired\n",
1955 nescq->hw_cq.cq_number);
1956 ret = -ETIME;
1957 } else {
1958 nes_debug(NES_DBG_CQ, "iWARP CQ%u destroy failed\n",
1959 nescq->hw_cq.cq_number);
1960 ret = -EIO;
1961 }
1962 } else {
1963 ret = 0;
1964 if (atomic_dec_and_test(&cqp_request->refcount)) {
1965 if (cqp_request->dynamic) {
1966 kfree(cqp_request);
1967 } else {
1968 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1969 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
1970 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
1971 }
1972 }
1973 }
1974
1975 if (nescq->cq_mem_size)
1976 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
1977 (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
1978 kfree(nescq);
1979
1980 return ret;
1981}
1982
1983
1984/**
1985 * nes_reg_mr
1986 */
1987static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd,
1988 u32 stag, u64 region_length, struct nes_root_vpbl *root_vpbl,
1989 dma_addr_t single_buffer, u16 pbl_count, u16 residual_page_count,
1990 int acc, u64 *iova_start)
1991{
1992 struct nes_hw_cqp_wqe *cqp_wqe;
1993 struct nes_cqp_request *cqp_request;
1994 unsigned long flags;
1995 int ret;
1996 struct nes_adapter *nesadapter = nesdev->nesadapter;
1997 /* int count; */
1998 u32 opcode = 0;
1999 u16 major_code;
2000
2001 /* Register the region with the adapter */
2002 cqp_request = nes_get_cqp_request(nesdev);
2003 if (cqp_request == NULL) {
2004 nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
2005 return -ENOMEM;
2006 }
2007 cqp_request->waiting = 1;
2008 cqp_wqe = &cqp_request->cqp_wqe;
2009
2010 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2011 /* track PBL resources */
2012 if (pbl_count != 0) {
2013 if (pbl_count > 1) {
2014 /* Two level PBL */
2015 if ((pbl_count+1) > nesadapter->free_4kpbl) {
2016 nes_debug(NES_DBG_MR, "Out of 4KB Pbls for two level request.\n");
2017 if (cqp_request->dynamic) {
2018 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2019 kfree(cqp_request);
2020 } else {
2021 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2022 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2023 }
2024 return -ENOMEM;
2025 } else {
2026 nesadapter->free_4kpbl -= pbl_count+1;
2027 }
2028 } else if (residual_page_count > 32) {
2029 if (pbl_count > nesadapter->free_4kpbl) {
2030 nes_debug(NES_DBG_MR, "Out of 4KB Pbls.\n");
2031 if (cqp_request->dynamic) {
2032 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2033 kfree(cqp_request);
2034 } else {
2035 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2036 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2037 }
2038 return -ENOMEM;
2039 } else {
2040 nesadapter->free_4kpbl -= pbl_count;
2041 }
2042 } else {
2043 if (pbl_count > nesadapter->free_256pbl) {
2044 nes_debug(NES_DBG_MR, "Out of 256B Pbls.\n");
2045 if (cqp_request->dynamic) {
2046 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2047 kfree(cqp_request);
2048 } else {
2049 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2050 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2051 }
2052 return -ENOMEM;
2053 } else {
2054 nesadapter->free_256pbl -= pbl_count;
2055 }
2056 }
2057 }
2058
2059 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2060
2061 opcode = NES_CQP_REGISTER_STAG | NES_CQP_STAG_RIGHTS_LOCAL_READ |
2062 NES_CQP_STAG_VA_TO | NES_CQP_STAG_MR;
2063 if (acc & IB_ACCESS_LOCAL_WRITE)
2064 opcode |= NES_CQP_STAG_RIGHTS_LOCAL_WRITE;
2065 if (acc & IB_ACCESS_REMOTE_WRITE)
2066 opcode |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE | NES_CQP_STAG_REM_ACC_EN;
2067 if (acc & IB_ACCESS_REMOTE_READ)
2068 opcode |= NES_CQP_STAG_RIGHTS_REMOTE_READ | NES_CQP_STAG_REM_ACC_EN;
2069 if (acc & IB_ACCESS_MW_BIND)
2070 opcode |= NES_CQP_STAG_RIGHTS_WINDOW_BIND | NES_CQP_STAG_REM_ACC_EN;
2071
2072 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
2073 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX, opcode);
2074 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_VA_LOW_IDX, *iova_start);
2075 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_LEN_LOW_IDX, region_length);
2076
2077 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] =
2078 cpu_to_le32((u32)(region_length >> 8) & 0xff000000);
2079 cqp_wqe->wqe_words[NES_CQP_STAG_WQE_LEN_HIGH_PD_IDX] |=
2080 cpu_to_le32(nespd->pd_id & 0x00007fff);
2081 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, stag);
2082
2083 if (pbl_count == 0) {
2084 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, single_buffer);
2085 } else {
2086 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PA_LOW_IDX, root_vpbl->pbl_pbase);
2087 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_BLK_COUNT_IDX, pbl_count);
2088 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_PBL_LEN_IDX,
2089 (((pbl_count - 1) * 4096) + (residual_page_count*8)));
2090
2091 if ((pbl_count > 1) || (residual_page_count > 32))
2092 cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_STAG_PBL_BLK_SIZE);
2093 }
2094 barrier();
2095
2096 atomic_set(&cqp_request->refcount, 2);
2097 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
2098
2099 /* Wait for CQP */
2100 ret = wait_event_timeout(cqp_request->waitq, (0 != cqp_request->request_done),
2101 NES_EVENT_TIMEOUT);
2102 nes_debug(NES_DBG_MR, "Register STag 0x%08X completed, wait_event_timeout ret = %u,"
2103 " CQP Major:Minor codes = 0x%04X:0x%04X.\n",
2104 stag, ret, cqp_request->major_code, cqp_request->minor_code);
2105 major_code = cqp_request->major_code;
2106 if (atomic_dec_and_test(&cqp_request->refcount)) {
2107 if (cqp_request->dynamic) {
2108 kfree(cqp_request);
2109 } else {
2110 spin_lock_irqsave(&nesdev->cqp.lock, flags);
2111 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2112 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
2113 }
2114 }
2115 if (!ret)
2116 return -ETIME;
2117 else if (major_code)
2118 return -EIO;
2119 else
2120 return 0;
2121
2122 return 0;
2123}
2124
2125
2126/**
2127 * nes_reg_phys_mr
2128 */
2129static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
2130 struct ib_phys_buf *buffer_list, int num_phys_buf, int acc,
2131 u64 * iova_start)
2132{
2133 u64 region_length;
2134 struct nes_pd *nespd = to_nespd(ib_pd);
2135 struct nes_vnic *nesvnic = to_nesvnic(ib_pd->device);
2136 struct nes_device *nesdev = nesvnic->nesdev;
2137 struct nes_adapter *nesadapter = nesdev->nesadapter;
2138 struct nes_mr *nesmr;
2139 struct ib_mr *ibmr;
2140 struct nes_vpbl vpbl;
2141 struct nes_root_vpbl root_vpbl;
2142 u32 stag;
2143 u32 i;
2144 u32 stag_index = 0;
2145 u32 next_stag_index = 0;
2146 u32 driver_key = 0;
2147 u32 root_pbl_index = 0;
2148 u32 cur_pbl_index = 0;
2149 int err = 0, pbl_depth = 0;
2150 int ret = 0;
2151 u16 pbl_count = 0;
2152 u8 single_page = 1;
2153 u8 stag_key = 0;
2154
2155 pbl_depth = 0;
2156 region_length = 0;
2157 vpbl.pbl_vbase = NULL;
2158 root_vpbl.pbl_vbase = NULL;
2159 root_vpbl.pbl_pbase = 0;
2160
2161 get_random_bytes(&next_stag_index, sizeof(next_stag_index));
2162 stag_key = (u8)next_stag_index;
2163
2164 driver_key = 0;
2165
2166 next_stag_index >>= 8;
2167 next_stag_index %= nesadapter->max_mr;
2168 if (num_phys_buf > (1024*512)) {
2169 return ERR_PTR(-E2BIG);
2170 }
2171
2172 err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs, nesadapter->max_mr,
2173 &stag_index, &next_stag_index);
2174 if (err) {
2175 return ERR_PTR(err);
2176 }
2177
2178 nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
2179 if (!nesmr) {
2180 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
2181 return ERR_PTR(-ENOMEM);
2182 }
2183
2184 for (i = 0; i < num_phys_buf; i++) {
2185
2186 if ((i & 0x01FF) == 0) {
2187 if (root_pbl_index == 1) {
2188 /* Allocate the root PBL */
2189 root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 8192,
2190 &root_vpbl.pbl_pbase);
2191 nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
2192 root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
2193 if (!root_vpbl.pbl_vbase) {
2194 pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
2195 vpbl.pbl_pbase);
2196 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
2197 kfree(nesmr);
2198 return ERR_PTR(-ENOMEM);
2199 }
2200 root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024, GFP_KERNEL);
2201 if (!root_vpbl.leaf_vpbl) {
2202 pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
2203 root_vpbl.pbl_pbase);
2204 pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
2205 vpbl.pbl_pbase);
2206 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
2207 kfree(nesmr);
2208 return ERR_PTR(-ENOMEM);
2209 }
2210 root_vpbl.pbl_vbase[0].pa_low = cpu_to_le32((u32)vpbl.pbl_pbase);
2211 root_vpbl.pbl_vbase[0].pa_high =
2212 cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
2213 root_vpbl.leaf_vpbl[0] = vpbl;
2214 }
2215 /* Allocate a 4K buffer for the PBL */
2216 vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
2217 &vpbl.pbl_pbase);
2218 nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%016lX\n",
2219 vpbl.pbl_vbase, (unsigned long)vpbl.pbl_pbase);
2220 if (!vpbl.pbl_vbase) {
2221 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
2222 ibmr = ERR_PTR(-ENOMEM);
2223 kfree(nesmr);
2224 goto reg_phys_err;
2225 }
2226 /* Fill in the root table */
2227 if (1 <= root_pbl_index) {
2228 root_vpbl.pbl_vbase[root_pbl_index].pa_low =
2229 cpu_to_le32((u32)vpbl.pbl_pbase);
2230 root_vpbl.pbl_vbase[root_pbl_index].pa_high =
2231 cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
2232 root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
2233 }
2234 root_pbl_index++;
2235 cur_pbl_index = 0;
2236 }
2237 if (buffer_list[i].addr & ~PAGE_MASK) {
2238 /* TODO: Unwind allocated buffers */
2239 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
2240 nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
2241 (unsigned int) buffer_list[i].addr);
2242 ibmr = ERR_PTR(-EINVAL);
2243 kfree(nesmr);
2244 goto reg_phys_err;
2245 }
2246
2247 if (!buffer_list[i].size) {
2248 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
2249 nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
2250 ibmr = ERR_PTR(-EINVAL);
2251 kfree(nesmr);
2252 goto reg_phys_err;
2253 }
2254
2255 region_length += buffer_list[i].size;
2256 if ((i != 0) && (single_page)) {
2257 if ((buffer_list[i-1].addr+PAGE_SIZE) != buffer_list[i].addr)
2258 single_page = 0;
2259 }
2260 vpbl.pbl_vbase[cur_pbl_index].pa_low = cpu_to_le32((u32)buffer_list[i].addr);
2261 vpbl.pbl_vbase[cur_pbl_index++].pa_high =
2262 cpu_to_le32((u32)((((u64)buffer_list[i].addr) >> 32)));
2263 }
2264
2265 stag = stag_index << 8;
2266 stag |= driver_key;
2267 stag += (u32)stag_key;
2268
2269 nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%016lX,"
2270 " length = 0x%016lX, index = 0x%08X\n",
2271 stag, (unsigned long)*iova_start, (unsigned long)region_length, stag_index);
2272
2273 region_length -= (*iova_start)&PAGE_MASK;
2274
2275 /* Make the leaf PBL the root if only one PBL */
2276 if (root_pbl_index == 1) {
2277 root_vpbl.pbl_pbase = vpbl.pbl_pbase;
2278 }
2279
2280 if (single_page) {
2281 pbl_count = 0;
2282 } else {
2283 pbl_count = root_pbl_index;
2284 }
2285 ret = nes_reg_mr(nesdev, nespd, stag, region_length, &root_vpbl,
2286 buffer_list[0].addr, pbl_count, (u16)cur_pbl_index, acc, iova_start);
2287
2288 if (ret == 0) {
2289 nesmr->ibmr.rkey = stag;
2290 nesmr->ibmr.lkey = stag;
2291 nesmr->mode = IWNES_MEMREG_TYPE_MEM;
2292 ibmr = &nesmr->ibmr;
2293 nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
2294 nesmr->pbls_used = pbl_count;
2295 if (pbl_count > 1) {
2296 nesmr->pbls_used++;
2297 }
2298 } else {
2299 kfree(nesmr);
2300 ibmr = ERR_PTR(-ENOMEM);
2301 }
2302
2303 reg_phys_err:
2304 /* free the resources */
2305 if (root_pbl_index == 1) {
2306 /* single PBL case */
2307 pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase, vpbl.pbl_pbase);
2308 } else {
2309 for (i=0; i<root_pbl_index; i++) {
2310 pci_free_consistent(nesdev->pcidev, 4096, root_vpbl.leaf_vpbl[i].pbl_vbase,
2311 root_vpbl.leaf_vpbl[i].pbl_pbase);
2312 }
2313 kfree(root_vpbl.leaf_vpbl);
2314 pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
2315 root_vpbl.pbl_pbase);
2316 }
2317
2318 return ibmr;
2319}
2320
2321
2322/**
2323 * nes_get_dma_mr
2324 */
2325static struct ib_mr *nes_get_dma_mr(struct ib_pd *pd, int acc)
2326{
2327 struct ib_phys_buf bl;
2328 u64 kva = 0;
2329
2330 nes_debug(NES_DBG_MR, "\n");
2331
2332 bl.size = (u64)0xffffffffffULL;
2333 bl.addr = 0;
2334 return nes_reg_phys_mr(pd, &bl, 1, acc, &kva);
2335}
2336
2337
2338/**
2339 * nes_reg_user_mr
2340 */
2341static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2342 u64 virt, int acc, struct ib_udata *udata)
2343{
2344 u64 iova_start;
2345 __le64 *pbl;
2346 u64 region_length;
2347 dma_addr_t last_dma_addr = 0;
2348 dma_addr_t first_dma_addr = 0;
2349 struct nes_pd *nespd = to_nespd(pd);
2350 struct nes_vnic *nesvnic = to_nesvnic(pd->device);
2351 struct nes_device *nesdev = nesvnic->nesdev;
2352 struct nes_adapter *nesadapter = nesdev->nesadapter;
2353 struct ib_mr *ibmr = ERR_PTR(-EINVAL);
2354 struct ib_umem_chunk *chunk;
2355 struct nes_ucontext *nes_ucontext;
2356 struct nes_pbl *nespbl;
2357 struct nes_mr *nesmr;
2358 struct ib_umem *region;
2359 struct nes_mem_reg_req req;
2360 struct nes_vpbl vpbl;
2361 struct nes_root_vpbl root_vpbl;
2362 int nmap_index, page_index;
2363 int page_count = 0;
2364 int err, pbl_depth = 0;
2365 int chunk_pages;
2366 int ret;
2367 u32 stag;
2368 u32 stag_index = 0;
2369 u32 next_stag_index;
2370 u32 driver_key;
2371 u32 root_pbl_index = 0;
2372 u32 cur_pbl_index = 0;
2373 u32 skip_pages;
2374 u16 pbl_count;
2375 u8 single_page = 1;
2376 u8 stag_key;
2377
2378 region = ib_umem_get(pd->uobject->context, start, length, acc);
2379 if (IS_ERR(region)) {
2380 return (struct ib_mr *)region;
2381 }
2382
2383 nes_debug(NES_DBG_MR, "User base = 0x%lX, Virt base = 0x%lX, length = %u,"
2384 " offset = %u, page size = %u.\n",
2385 (unsigned long int)start, (unsigned long int)virt, (u32)length,
2386 region->offset, region->page_size);
2387
2388 skip_pages = ((u32)region->offset) >> 12;
2389
2390 if (ib_copy_from_udata(&req, udata, sizeof(req)))
2391 return ERR_PTR(-EFAULT);
2392 nes_debug(NES_DBG_MR, "Memory Registration type = %08X.\n", req.reg_type);
2393
2394 switch (req.reg_type) {
2395 case IWNES_MEMREG_TYPE_MEM:
2396 pbl_depth = 0;
2397 region_length = 0;
2398 vpbl.pbl_vbase = NULL;
2399 root_vpbl.pbl_vbase = NULL;
2400 root_vpbl.pbl_pbase = 0;
2401
2402 get_random_bytes(&next_stag_index, sizeof(next_stag_index));
2403 stag_key = (u8)next_stag_index;
2404
2405 driver_key = next_stag_index & 0x70000000;
2406
2407 next_stag_index >>= 8;
2408 next_stag_index %= nesadapter->max_mr;
2409
2410 err = nes_alloc_resource(nesadapter, nesadapter->allocated_mrs,
2411 nesadapter->max_mr, &stag_index, &next_stag_index);
2412 if (err) {
2413 ib_umem_release(region);
2414 return ERR_PTR(err);
2415 }
2416
2417 nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
2418 if (!nesmr) {
2419 ib_umem_release(region);
2420 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
2421 return ERR_PTR(-ENOMEM);
2422 }
2423 nesmr->region = region;
2424
2425 list_for_each_entry(chunk, &region->chunk_list, list) {
2426 nes_debug(NES_DBG_MR, "Chunk: nents = %u, nmap = %u .\n",
2427 chunk->nents, chunk->nmap);
2428 for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
2429 if (sg_dma_address(&chunk->page_list[nmap_index]) & ~PAGE_MASK) {
2430 ib_umem_release(region);
2431 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
2432 nes_debug(NES_DBG_MR, "Unaligned Memory Buffer: 0x%x\n",
2433 (unsigned int) sg_dma_address(&chunk->page_list[nmap_index]));
2434 ibmr = ERR_PTR(-EINVAL);
2435 kfree(nesmr);
2436 goto reg_user_mr_err;
2437 }
2438
2439 if (!sg_dma_len(&chunk->page_list[nmap_index])) {
2440 ib_umem_release(region);
2441 nes_free_resource(nesadapter, nesadapter->allocated_mrs,
2442 stag_index);
2443 nes_debug(NES_DBG_MR, "Invalid Buffer Size\n");
2444 ibmr = ERR_PTR(-EINVAL);
2445 kfree(nesmr);
2446 goto reg_user_mr_err;
2447 }
2448
2449 region_length += sg_dma_len(&chunk->page_list[nmap_index]);
2450 chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
2451 region_length -= skip_pages << 12;
2452 for (page_index=skip_pages; page_index < chunk_pages; page_index++) {
2453 skip_pages = 0;
2454 if ((page_count!=0)&&(page_count<<12)-(region->offset&(4096-1))>=region->length)
2455 goto enough_pages;
2456 if ((page_count&0x01FF) == 0) {
2457 if (page_count>(1024*512)) {
2458 ib_umem_release(region);
2459 pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
2460 vpbl.pbl_pbase);
2461 nes_free_resource(nesadapter,
2462 nesadapter->allocated_mrs, stag_index);
2463 kfree(nesmr);
2464 ibmr = ERR_PTR(-E2BIG);
2465 goto reg_user_mr_err;
2466 }
2467 if (root_pbl_index == 1) {
2468 root_vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev,
2469 8192, &root_vpbl.pbl_pbase);
2470 nes_debug(NES_DBG_MR, "Allocating root PBL, va = %p, pa = 0x%08X\n",
2471 root_vpbl.pbl_vbase, (unsigned int)root_vpbl.pbl_pbase);
2472 if (!root_vpbl.pbl_vbase) {
2473 ib_umem_release(region);
2474 pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
2475 vpbl.pbl_pbase);
2476 nes_free_resource(nesadapter, nesadapter->allocated_mrs,
2477 stag_index);
2478 kfree(nesmr);
2479 ibmr = ERR_PTR(-ENOMEM);
2480 goto reg_user_mr_err;
2481 }
2482 root_vpbl.leaf_vpbl = kzalloc(sizeof(*root_vpbl.leaf_vpbl)*1024,
2483 GFP_KERNEL);
2484 if (!root_vpbl.leaf_vpbl) {
2485 ib_umem_release(region);
2486 pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
2487 root_vpbl.pbl_pbase);
2488 pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
2489 vpbl.pbl_pbase);
2490 nes_free_resource(nesadapter, nesadapter->allocated_mrs,
2491 stag_index);
2492 kfree(nesmr);
2493 ibmr = ERR_PTR(-ENOMEM);
2494 goto reg_user_mr_err;
2495 }
2496 root_vpbl.pbl_vbase[0].pa_low =
2497 cpu_to_le32((u32)vpbl.pbl_pbase);
2498 root_vpbl.pbl_vbase[0].pa_high =
2499 cpu_to_le32((u32)((((u64)vpbl.pbl_pbase) >> 32)));
2500 root_vpbl.leaf_vpbl[0] = vpbl;
2501 }
2502 vpbl.pbl_vbase = pci_alloc_consistent(nesdev->pcidev, 4096,
2503 &vpbl.pbl_pbase);
2504 nes_debug(NES_DBG_MR, "Allocating leaf PBL, va = %p, pa = 0x%08X\n",
2505 vpbl.pbl_vbase, (unsigned int)vpbl.pbl_pbase);
2506 if (!vpbl.pbl_vbase) {
2507 ib_umem_release(region);
2508 nes_free_resource(nesadapter, nesadapter->allocated_mrs, stag_index);
2509 ibmr = ERR_PTR(-ENOMEM);
2510 kfree(nesmr);
2511 goto reg_user_mr_err;
2512 }
2513 if (1 <= root_pbl_index) {
2514 root_vpbl.pbl_vbase[root_pbl_index].pa_low =
2515 cpu_to_le32((u32)vpbl.pbl_pbase);
2516 root_vpbl.pbl_vbase[root_pbl_index].pa_high =
2517 cpu_to_le32((u32)((((u64)vpbl.pbl_pbase)>>32)));
2518 root_vpbl.leaf_vpbl[root_pbl_index] = vpbl;
2519 }
2520 root_pbl_index++;
2521 cur_pbl_index = 0;
2522 }
2523 if (single_page) {
2524 if (page_count != 0) {
2525 if ((last_dma_addr+4096) !=
2526 (sg_dma_address(&chunk->page_list[nmap_index])+
2527 (page_index*4096)))
2528 single_page = 0;
2529 last_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
2530 (page_index*4096);
2531 } else {
2532 first_dma_addr = sg_dma_address(&chunk->page_list[nmap_index])+
2533 (page_index*4096);
2534 last_dma_addr = first_dma_addr;
2535 }
2536 }
2537
2538 vpbl.pbl_vbase[cur_pbl_index].pa_low =
2539 cpu_to_le32((u32)(sg_dma_address(&chunk->page_list[nmap_index])+
2540 (page_index*4096)));
2541 vpbl.pbl_vbase[cur_pbl_index].pa_high =
2542 cpu_to_le32((u32)((((u64)(sg_dma_address(&chunk->page_list[nmap_index])+
2543 (page_index*4096))) >> 32)));
2544 cur_pbl_index++;
2545 page_count++;
2546 }
2547 }
2548 }
2549 enough_pages:
2550 nes_debug(NES_DBG_MR, "calculating stag, stag_index=0x%08x, driver_key=0x%08x,"
2551 " stag_key=0x%08x\n",
2552 stag_index, driver_key, stag_key);
2553 stag = stag_index << 8;
2554 stag |= driver_key;
2555 stag += (u32)stag_key;
2556 if (stag == 0) {
2557 stag = 1;
2558 }
2559
2560 iova_start = virt;
2561 /* Make the leaf PBL the root if only one PBL */
2562 if (root_pbl_index == 1) {
2563 root_vpbl.pbl_pbase = vpbl.pbl_pbase;
2564 }
2565
2566 if (single_page) {
2567 pbl_count = 0;
2568 } else {
2569 pbl_count = root_pbl_index;
2570 first_dma_addr = 0;
2571 }
2572 nes_debug(NES_DBG_MR, "Registering STag 0x%08X, VA = 0x%08X, length = 0x%08X,"
2573 " index = 0x%08X, region->length=0x%08llx, pbl_count = %u\n",
2574 stag, (unsigned int)iova_start,
2575 (unsigned int)region_length, stag_index,
2576 (unsigned long long)region->length, pbl_count);
2577 ret = nes_reg_mr( nesdev, nespd, stag, region->length, &root_vpbl,
2578 first_dma_addr, pbl_count, (u16)cur_pbl_index, acc, &iova_start);
2579
2580 nes_debug(NES_DBG_MR, "ret=%d\n", ret);
2581
2582 if (ret == 0) {
2583 nesmr->ibmr.rkey = stag;
2584 nesmr->ibmr.lkey = stag;
2585 nesmr->mode = IWNES_MEMREG_TYPE_MEM;
2586 ibmr = &nesmr->ibmr;
2587 nesmr->pbl_4k = ((pbl_count > 1) || (cur_pbl_index > 32)) ? 1 : 0;
2588 nesmr->pbls_used = pbl_count;
2589 if (pbl_count > 1) {
2590 nesmr->pbls_used++;
2591 }
2592 } else {
2593 ib_umem_release(region);
2594 kfree(nesmr);
2595 ibmr = ERR_PTR(-ENOMEM);
2596 }
2597
2598 reg_user_mr_err:
2599 /* free the resources */
2600 if (root_pbl_index == 1) {
2601 pci_free_consistent(nesdev->pcidev, 4096, vpbl.pbl_vbase,
2602 vpbl.pbl_pbase);
2603 } else {
2604 for (page_index=0; page_index<root_pbl_index; page_index++) {
2605 pci_free_consistent(nesdev->pcidev, 4096,
2606 root_vpbl.leaf_vpbl[page_index].pbl_vbase,
2607 root_vpbl.leaf_vpbl[page_index].pbl_pbase);
2608 }
2609 kfree(root_vpbl.leaf_vpbl);
2610 pci_free_consistent(nesdev->pcidev, 8192, root_vpbl.pbl_vbase,
2611 root_vpbl.pbl_pbase);
2612 }
2613
2614 nes_debug(NES_DBG_MR, "Leaving, ibmr=%p", ibmr);
2615
2616 return ibmr;
2617 break;
2618 case IWNES_MEMREG_TYPE_QP:
2619 case IWNES_MEMREG_TYPE_CQ:
2620 nespbl = kzalloc(sizeof(*nespbl), GFP_KERNEL);
2621 if (!nespbl) {
2622 nes_debug(NES_DBG_MR, "Unable to allocate PBL\n");
2623 ib_umem_release(region);
2624 return ERR_PTR(-ENOMEM);
2625 }
2626 nesmr = kzalloc(sizeof(*nesmr), GFP_KERNEL);
2627 if (!nesmr) {
2628 ib_umem_release(region);
2629 kfree(nespbl);
2630 nes_debug(NES_DBG_MR, "Unable to allocate nesmr\n");
2631 return ERR_PTR(-ENOMEM);
2632 }
2633 nesmr->region = region;
2634 nes_ucontext = to_nesucontext(pd->uobject->context);
2635 pbl_depth = region->length >> 12;
2636 pbl_depth += (region->length & (4096-1)) ? 1 : 0;
2637 nespbl->pbl_size = pbl_depth*sizeof(u64);
2638 if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
2639 nes_debug(NES_DBG_MR, "Attempting to allocate QP PBL memory");
2640 } else {
2641 nes_debug(NES_DBG_MR, "Attempting to allocate CP PBL memory");
2642 }
2643
2644 nes_debug(NES_DBG_MR, " %u bytes, %u entries.\n",
2645 nespbl->pbl_size, pbl_depth);
2646 pbl = pci_alloc_consistent(nesdev->pcidev, nespbl->pbl_size,
2647 &nespbl->pbl_pbase);
2648 if (!pbl) {
2649 ib_umem_release(region);
2650 kfree(nesmr);
2651 kfree(nespbl);
2652 nes_debug(NES_DBG_MR, "Unable to allocate PBL memory\n");
2653 return ERR_PTR(-ENOMEM);
2654 }
2655
2656 nespbl->pbl_vbase = (u64 *)pbl;
2657 nespbl->user_base = start;
2658 nes_debug(NES_DBG_MR, "Allocated PBL memory, %u bytes, pbl_pbase=%p,"
2659 " pbl_vbase=%p user_base=0x%lx\n",
2660 nespbl->pbl_size, (void *)nespbl->pbl_pbase,
2661 (void*)nespbl->pbl_vbase, nespbl->user_base);
2662
2663 list_for_each_entry(chunk, &region->chunk_list, list) {
2664 for (nmap_index = 0; nmap_index < chunk->nmap; ++nmap_index) {
2665 chunk_pages = sg_dma_len(&chunk->page_list[nmap_index]) >> 12;
2666 chunk_pages += (sg_dma_len(&chunk->page_list[nmap_index]) & (4096-1)) ? 1 : 0;
2667 nespbl->page = sg_page(&chunk->page_list[0]);
2668 for (page_index=0; page_index<chunk_pages; page_index++) {
2669 ((__le32 *)pbl)[0] = cpu_to_le32((u32)
2670 (sg_dma_address(&chunk->page_list[nmap_index])+
2671 (page_index*4096)));
2672 ((__le32 *)pbl)[1] = cpu_to_le32(((u64)
2673 (sg_dma_address(&chunk->page_list[nmap_index])+
2674 (page_index*4096)))>>32);
2675 nes_debug(NES_DBG_MR, "pbl=%p, *pbl=0x%016llx, 0x%08x%08x\n", pbl,
2676 (unsigned long long)*pbl,
2677 le32_to_cpu(((__le32 *)pbl)[1]), le32_to_cpu(((__le32 *)pbl)[0]));
2678 pbl++;
2679 }
2680 }
2681 }
2682 if (req.reg_type == IWNES_MEMREG_TYPE_QP) {
2683 list_add_tail(&nespbl->list, &nes_ucontext->qp_reg_mem_list);
2684 } else {
2685 list_add_tail(&nespbl->list, &nes_ucontext->cq_reg_mem_list);
2686 }
2687 nesmr->ibmr.rkey = -1;
2688 nesmr->ibmr.lkey = -1;
2689 nesmr->mode = req.reg_type;
2690 return &nesmr->ibmr;
2691 break;
2692 }
2693
2694 return ERR_PTR(-ENOSYS);
2695}
2696
2697
2698/**
2699 * nes_dereg_mr
2700 */
2701static int nes_dereg_mr(struct ib_mr *ib_mr)
2702{
2703 struct nes_mr *nesmr = to_nesmr(ib_mr);
2704 struct nes_vnic *nesvnic = to_nesvnic(ib_mr->device);
2705 struct nes_device *nesdev = nesvnic->nesdev;
2706 struct nes_adapter *nesadapter = nesdev->nesadapter;
2707 struct nes_hw_cqp_wqe *cqp_wqe;
2708 struct nes_cqp_request *cqp_request;
2709 unsigned long flags;
2710 int ret;
2711 u16 major_code;
2712 u16 minor_code;
2713
2714 if (nesmr->region) {
2715 ib_umem_release(nesmr->region);
2716 }
2717 if (nesmr->mode != IWNES_MEMREG_TYPE_MEM) {
2718 kfree(nesmr);
2719 return 0;
2720 }
2721
2722 /* Deallocate the region with the adapter */
2723
2724 cqp_request = nes_get_cqp_request(nesdev);
2725 if (cqp_request == NULL) {
2726 nes_debug(NES_DBG_MR, "Failed to get a cqp_request.\n");
2727 return -ENOMEM;
2728 }
2729 cqp_request->waiting = 1;
2730 cqp_wqe = &cqp_request->cqp_wqe;
2731
2732 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
2733 if (nesmr->pbls_used != 0) {
2734 if (nesmr->pbl_4k) {
2735 nesadapter->free_4kpbl += nesmr->pbls_used;
2736 if (nesadapter->free_4kpbl > nesadapter->max_4kpbl) {
2737 printk(KERN_ERR PFX "free 4KB PBLs(%u) has exceeded the max(%u)\n",
2738 nesadapter->free_4kpbl, nesadapter->max_4kpbl);
2739 }
2740 } else {
2741 nesadapter->free_256pbl += nesmr->pbls_used;
2742 if (nesadapter->free_256pbl > nesadapter->max_256pbl) {
2743 printk(KERN_ERR PFX "free 256B PBLs(%u) has exceeded the max(%u)\n",
2744 nesadapter->free_256pbl, nesadapter->max_256pbl);
2745 }
2746 }
2747 }
2748
2749 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
2750 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
2751 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
2752 NES_CQP_DEALLOCATE_STAG | NES_CQP_STAG_VA_TO |
2753 NES_CQP_STAG_DEALLOC_PBLS | NES_CQP_STAG_MR);
2754 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_STAG_WQE_STAG_IDX, ib_mr->rkey);
2755
2756 atomic_set(&cqp_request->refcount, 2);
2757 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
2758
2759 /* Wait for CQP */
2760 nes_debug(NES_DBG_MR, "Waiting for deallocate STag 0x%08X completed\n", ib_mr->rkey);
2761 ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
2762 NES_EVENT_TIMEOUT);
2763 nes_debug(NES_DBG_MR, "Deallocate STag 0x%08X completed, wait_event_timeout ret = %u,"
2764 " CQP Major:Minor codes = 0x%04X:0x%04X\n",
2765 ib_mr->rkey, ret, cqp_request->major_code, cqp_request->minor_code);
2766
2767 nes_free_resource(nesadapter, nesadapter->allocated_mrs,
2768 (ib_mr->rkey & 0x0fffff00) >> 8);
2769
2770 kfree(nesmr);
2771
2772 major_code = cqp_request->major_code;
2773 minor_code = cqp_request->minor_code;
2774 if (atomic_dec_and_test(&cqp_request->refcount)) {
2775 if (cqp_request->dynamic) {
2776 kfree(cqp_request);
2777 } else {
2778 spin_lock_irqsave(&nesdev->cqp.lock, flags);
2779 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2780 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
2781 }
2782 }
2783 if (!ret) {
2784 nes_debug(NES_DBG_MR, "Timeout waiting to destroy STag,"
2785 " ib_mr=%p, rkey = 0x%08X\n",
2786 ib_mr, ib_mr->rkey);
2787 return -ETIME;
2788 } else if (major_code) {
2789 nes_debug(NES_DBG_MR, "Error (0x%04X:0x%04X) while attempting"
2790 " to destroy STag, ib_mr=%p, rkey = 0x%08X\n",
2791 major_code, minor_code, ib_mr, ib_mr->rkey);
2792 return -EIO;
2793 } else
2794 return 0;
2795}
2796
2797
2798/**
2799 * show_rev
2800 */
2801static ssize_t show_rev(struct class_device *cdev, char *buf)
2802{
2803 struct nes_ib_device *nesibdev =
2804 container_of(cdev, struct nes_ib_device, ibdev.class_dev);
2805 struct nes_vnic *nesvnic = nesibdev->nesvnic;
2806
2807 nes_debug(NES_DBG_INIT, "\n");
2808 return sprintf(buf, "%x\n", nesvnic->nesdev->nesadapter->hw_rev);
2809}
2810
2811
2812/**
2813 * show_fw_ver
2814 */
2815static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
2816{
2817 struct nes_ib_device *nesibdev =
2818 container_of(cdev, struct nes_ib_device, ibdev.class_dev);
2819 struct nes_vnic *nesvnic = nesibdev->nesvnic;
2820
2821 nes_debug(NES_DBG_INIT, "\n");
2822 return sprintf(buf, "%x.%x.%x\n",
2823 (int)(nesvnic->nesdev->nesadapter->fw_ver >> 32),
2824 (int)(nesvnic->nesdev->nesadapter->fw_ver >> 16) & 0xffff,
2825 (int)(nesvnic->nesdev->nesadapter->fw_ver & 0xffff));
2826}
2827
2828
2829/**
2830 * show_hca
2831 */
2832static ssize_t show_hca(struct class_device *cdev, char *buf)
2833{
2834 nes_debug(NES_DBG_INIT, "\n");
2835 return sprintf(buf, "NES020\n");
2836}
2837
2838
2839/**
2840 * show_board
2841 */
2842static ssize_t show_board(struct class_device *cdev, char *buf)
2843{
2844 nes_debug(NES_DBG_INIT, "\n");
2845 return sprintf(buf, "%.*s\n", 32, "NES020 Board ID");
2846}
2847
2848
2849static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
2850static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
2851static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
2852static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
2853
2854static struct class_device_attribute *nes_class_attributes[] = {
2855 &class_device_attr_hw_rev,
2856 &class_device_attr_fw_ver,
2857 &class_device_attr_hca_type,
2858 &class_device_attr_board_id
2859};
2860
2861
2862/**
2863 * nes_query_qp
2864 */
2865static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2866 int attr_mask, struct ib_qp_init_attr *init_attr)
2867{
2868 struct nes_qp *nesqp = to_nesqp(ibqp);
2869
2870 nes_debug(NES_DBG_QP, "\n");
2871
2872 attr->qp_access_flags = 0;
2873 attr->cap.max_send_wr = nesqp->hwqp.sq_size;
2874 attr->cap.max_recv_wr = nesqp->hwqp.rq_size;
2875 attr->cap.max_recv_sge = 1;
2876 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
2877 init_attr->cap.max_inline_data = 0;
2878 } else {
2879 init_attr->cap.max_inline_data = 64;
2880 }
2881
2882 init_attr->event_handler = nesqp->ibqp.event_handler;
2883 init_attr->qp_context = nesqp->ibqp.qp_context;
2884 init_attr->send_cq = nesqp->ibqp.send_cq;
2885 init_attr->recv_cq = nesqp->ibqp.recv_cq;
2886 init_attr->srq = nesqp->ibqp.srq = nesqp->ibqp.srq;
2887 init_attr->cap = attr->cap;
2888
2889 return 0;
2890}
2891
2892
2893/**
2894 * nes_hw_modify_qp
2895 */
2896int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
2897 u32 next_iwarp_state, u32 wait_completion)
2898{
2899 struct nes_hw_cqp_wqe *cqp_wqe;
2900 /* struct iw_cm_id *cm_id = nesqp->cm_id; */
2901 /* struct iw_cm_event cm_event; */
2902 struct nes_cqp_request *cqp_request;
2903 unsigned long flags;
2904 int ret;
2905 u16 major_code;
2906
2907 nes_debug(NES_DBG_MOD_QP, "QP%u, refcount=%d\n",
2908 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
2909
2910 cqp_request = nes_get_cqp_request(nesdev);
2911 if (cqp_request == NULL) {
2912 nes_debug(NES_DBG_MOD_QP, "Failed to get a cqp_request.\n");
2913 return -ENOMEM;
2914 }
2915 if (wait_completion) {
2916 cqp_request->waiting = 1;
2917 } else {
2918 cqp_request->waiting = 0;
2919 }
2920 cqp_wqe = &cqp_request->cqp_wqe;
2921
2922 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_OPCODE_IDX,
2923 NES_CQP_MODIFY_QP | NES_CQP_QP_TYPE_IWARP | next_iwarp_state);
2924 nes_debug(NES_DBG_MOD_QP, "using next_iwarp_state=%08x, wqe_words=%08x\n",
2925 next_iwarp_state, le32_to_cpu(cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX]));
2926 nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
2927 set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
2928 set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
2929
2930 atomic_set(&cqp_request->refcount, 2);
2931 nes_post_cqp_request(nesdev, cqp_request, NES_CQP_REQUEST_RING_DOORBELL);
2932
2933 /* Wait for CQP */
2934 if (wait_completion) {
2935 /* nes_debug(NES_DBG_MOD_QP, "Waiting for modify iWARP QP%u to complete.\n",
2936 nesqp->hwqp.qp_id); */
2937 ret = wait_event_timeout(cqp_request->waitq, (cqp_request->request_done != 0),
2938 NES_EVENT_TIMEOUT);
2939 nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u completed, wait_event_timeout ret=%u, "
2940 "CQP Major:Minor codes = 0x%04X:0x%04X.\n",
2941 nesqp->hwqp.qp_id, ret, cqp_request->major_code, cqp_request->minor_code);
2942 major_code = cqp_request->major_code;
2943 if (major_code) {
2944 nes_debug(NES_DBG_MOD_QP, "Modify iwarp QP%u failed"
2945 "CQP Major:Minor codes = 0x%04X:0x%04X, intended next state = 0x%08X.\n",
2946 nesqp->hwqp.qp_id, cqp_request->major_code,
2947 cqp_request->minor_code, next_iwarp_state);
2948 }
2949 if (atomic_dec_and_test(&cqp_request->refcount)) {
2950 if (cqp_request->dynamic) {
2951 kfree(cqp_request);
2952 } else {
2953 spin_lock_irqsave(&nesdev->cqp.lock, flags);
2954 list_add_tail(&cqp_request->list, &nesdev->cqp_avail_reqs);
2955 spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
2956 }
2957 }
2958 if (!ret)
2959 return -ETIME;
2960 else if (major_code)
2961 return -EIO;
2962 else
2963 return 0;
2964 } else {
2965 return 0;
2966 }
2967}
2968
2969
2970/**
2971 * nes_modify_qp
2972 */
2973int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2974 int attr_mask, struct ib_udata *udata)
2975{
2976 struct nes_qp *nesqp = to_nesqp(ibqp);
2977 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
2978 struct nes_device *nesdev = nesvnic->nesdev;
2979 /* u32 cqp_head; */
2980 /* u32 counter; */
2981 u32 next_iwarp_state = 0;
2982 int err;
2983 unsigned long qplockflags;
2984 int ret;
2985 u16 original_last_aeq;
2986 u8 issue_modify_qp = 0;
2987 u8 issue_disconnect = 0;
2988 u8 dont_wait = 0;
2989
2990 nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u,"
2991 " iwarp_state=0x%X, refcount=%d\n",
2992 nesqp->hwqp.qp_id, attr->qp_state, nesqp->ibqp_state,
2993 nesqp->iwarp_state, atomic_read(&nesqp->refcount));
2994
2995 nes_add_ref(&nesqp->ibqp);
2996 spin_lock_irqsave(&nesqp->lock, qplockflags);
2997
2998 nes_debug(NES_DBG_MOD_QP, "QP%u: hw_iwarp_state=0x%X, hw_tcp_state=0x%X,"
2999 " QP Access Flags=0x%X, attr_mask = 0x%0x\n",
3000 nesqp->hwqp.qp_id, nesqp->hw_iwarp_state,
3001 nesqp->hw_tcp_state, attr->qp_access_flags, attr_mask);
3002
3003 if (attr_mask & IB_QP_STATE) {
3004 switch (attr->qp_state) {
3005 case IB_QPS_INIT:
3006 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = init\n",
3007 nesqp->hwqp.qp_id);
3008 if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_IDLE) {
3009 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3010 nes_rem_ref(&nesqp->ibqp);
3011 return -EINVAL;
3012 }
3013 next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
3014 issue_modify_qp = 1;
3015 break;
3016 case IB_QPS_RTR:
3017 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rtr\n",
3018 nesqp->hwqp.qp_id);
3019 if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_IDLE) {
3020 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3021 nes_rem_ref(&nesqp->ibqp);
3022 return -EINVAL;
3023 }
3024 next_iwarp_state = NES_CQP_QP_IWARP_STATE_IDLE;
3025 issue_modify_qp = 1;
3026 break;
3027 case IB_QPS_RTS:
3028 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = rts\n",
3029 nesqp->hwqp.qp_id);
3030 if (nesqp->iwarp_state>(u32)NES_CQP_QP_IWARP_STATE_RTS) {
3031 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3032 nes_rem_ref(&nesqp->ibqp);
3033 return -EINVAL;
3034 }
3035 if (nesqp->cm_id == NULL) {
3036 nes_debug(NES_DBG_MOD_QP, "QP%u: Failing attempt to move QP to RTS without a CM_ID. \n",
3037 nesqp->hwqp.qp_id );
3038 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3039 nes_rem_ref(&nesqp->ibqp);
3040 return -EINVAL;
3041 }
3042 next_iwarp_state = NES_CQP_QP_IWARP_STATE_RTS;
3043 if (nesqp->iwarp_state != NES_CQP_QP_IWARP_STATE_RTS)
3044 next_iwarp_state |= NES_CQP_QP_CONTEXT_VALID |
3045 NES_CQP_QP_ARP_VALID | NES_CQP_QP_ORD_VALID;
3046 issue_modify_qp = 1;
3047 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_ESTABLISHED;
3048 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_RTS;
3049 nesqp->hte_added = 1;
3050 break;
3051 case IB_QPS_SQD:
3052 issue_modify_qp = 1;
3053 nes_debug(NES_DBG_MOD_QP, "QP%u: new state=closing. SQ head=%u, SQ tail=%u\n",
3054 nesqp->hwqp.qp_id, nesqp->hwqp.sq_head, nesqp->hwqp.sq_tail);
3055 if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
3056 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3057 nes_rem_ref(&nesqp->ibqp);
3058 return 0;
3059 } else {
3060 if (nesqp->iwarp_state > (u32)NES_CQP_QP_IWARP_STATE_CLOSING) {
3061 nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing"
3062 " ignored due to current iWARP state\n",
3063 nesqp->hwqp.qp_id);
3064 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3065 nes_rem_ref(&nesqp->ibqp);
3066 return -EINVAL;
3067 }
3068 if (nesqp->hw_iwarp_state != NES_AEQE_IWARP_STATE_RTS) {
3069 nes_debug(NES_DBG_MOD_QP, "QP%u: State change to closing"
3070 " already done based on hw state.\n",
3071 nesqp->hwqp.qp_id);
3072 issue_modify_qp = 0;
3073 nesqp->in_disconnect = 0;
3074 }
3075 switch (nesqp->hw_iwarp_state) {
3076 case NES_AEQE_IWARP_STATE_CLOSING:
3077 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3078 case NES_AEQE_IWARP_STATE_TERMINATE:
3079 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
3080 break;
3081 case NES_AEQE_IWARP_STATE_ERROR:
3082 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
3083 break;
3084 default:
3085 next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
3086 nesqp->in_disconnect = 1;
3087 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
3088 break;
3089 }
3090 }
3091 break;
3092 case IB_QPS_SQE:
3093 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = terminate\n",
3094 nesqp->hwqp.qp_id);
3095 if (nesqp->iwarp_state>=(u32)NES_CQP_QP_IWARP_STATE_TERMINATE) {
3096 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3097 nes_rem_ref(&nesqp->ibqp);
3098 return -EINVAL;
3099 }
3100 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
3101 next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
3102 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
3103 issue_modify_qp = 1;
3104 nesqp->in_disconnect = 1;
3105 break;
3106 case IB_QPS_ERR:
3107 case IB_QPS_RESET:
3108 if (nesqp->iwarp_state == (u32)NES_CQP_QP_IWARP_STATE_ERROR) {
3109 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3110 nes_rem_ref(&nesqp->ibqp);
3111 return -EINVAL;
3112 }
3113 nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
3114 nesqp->hwqp.qp_id);
3115 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
3116 /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
3117 if (nesqp->hte_added) {
3118 nes_debug(NES_DBG_MOD_QP, "set CQP_QP_DEL_HTE\n");
3119 next_iwarp_state |= NES_CQP_QP_DEL_HTE;
3120 nesqp->hte_added = 0;
3121 }
3122 if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) &&
3123 (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) {
3124 next_iwarp_state |= NES_CQP_QP_RESET;
3125 nesqp->in_disconnect = 1;
3126 } else {
3127 nes_debug(NES_DBG_MOD_QP, "QP%u NOT setting NES_CQP_QP_RESET since TCP state = %u\n",
3128 nesqp->hwqp.qp_id, nesqp->hw_tcp_state);
3129 dont_wait = 1;
3130 }
3131 issue_modify_qp = 1;
3132 nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
3133 break;
3134 default:
3135 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3136 nes_rem_ref(&nesqp->ibqp);
3137 return -EINVAL;
3138 break;
3139 }
3140
3141 nesqp->ibqp_state = attr->qp_state;
3142 if (((nesqp->iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) ==
3143 (u32)NES_CQP_QP_IWARP_STATE_RTS) &&
3144 ((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) >
3145 (u32)NES_CQP_QP_IWARP_STATE_RTS)) {
3146 nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
3147 nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
3148 nesqp->iwarp_state);
3149 issue_disconnect = 1;
3150 } else {
3151 nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK;
3152 nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n",
3153 nesqp->iwarp_state);
3154 }
3155 }
3156
3157 if (attr_mask & IB_QP_ACCESS_FLAGS) {
3158 if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) {
3159 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN |
3160 NES_QPCONTEXT_MISC_RDMA_READ_EN);
3161 issue_modify_qp = 1;
3162 }
3163 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) {
3164 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN);
3165 issue_modify_qp = 1;
3166 }
3167 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) {
3168 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_READ_EN);
3169 issue_modify_qp = 1;
3170 }
3171 if (attr->qp_access_flags & IB_ACCESS_MW_BIND) {
3172 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_WBIND_EN);
3173 issue_modify_qp = 1;
3174 }
3175
3176 if (nesqp->user_mode) {
3177 nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_RDMA_WRITE_EN |
3178 NES_QPCONTEXT_MISC_RDMA_READ_EN);
3179 issue_modify_qp = 1;
3180 }
3181 }
3182
3183 original_last_aeq = nesqp->last_aeq;
3184 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3185
3186 nes_debug(NES_DBG_MOD_QP, "issue_modify_qp=%u\n", issue_modify_qp);
3187
3188 ret = 0;
3189
3190
3191 if (issue_modify_qp) {
3192 nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
3193 ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1);
3194 if (ret)
3195 nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
3196 " failed for QP%u.\n",
3197 next_iwarp_state, nesqp->hwqp.qp_id);
3198
3199 }
3200
3201 if ((issue_modify_qp) && (nesqp->ibqp_state > IB_QPS_RTS)) {
3202 nes_debug(NES_DBG_MOD_QP, "QP%u Issued ModifyQP refcount (%d),"
3203 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3204 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3205 original_last_aeq, nesqp->last_aeq);
3206 if ((!ret) ||
3207 ((original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) &&
3208 (ret))) {
3209 if (dont_wait) {
3210 if (nesqp->cm_id && nesqp->hw_tcp_state != 0) {
3211 nes_debug(NES_DBG_MOD_QP, "QP%u Queuing fake disconnect for QP refcount (%d),"
3212 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3213 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3214 original_last_aeq, nesqp->last_aeq);
3215 /* this one is for the cm_disconnect thread */
3216 nes_add_ref(&nesqp->ibqp);
3217 spin_lock_irqsave(&nesqp->lock, qplockflags);
3218 nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
3219 nesqp->last_aeq = NES_AEQE_AEID_RESET_SENT;
3220 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3221 nes_cm_disconn(nesqp);
3222 } else {
3223 nes_debug(NES_DBG_MOD_QP, "QP%u No fake disconnect, QP refcount=%d\n",
3224 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
3225 nes_rem_ref(&nesqp->ibqp);
3226 }
3227 } else {
3228 spin_lock_irqsave(&nesqp->lock, qplockflags);
3229 if (nesqp->cm_id) {
3230 /* These two are for the timer thread */
3231 if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
3232 nes_add_ref(&nesqp->ibqp);
3233 nesqp->cm_id->add_ref(nesqp->cm_id);
3234 nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
3235 " need ae to finish up, original_last_aeq = 0x%04X."
3236 " last_aeq = 0x%04X, scheduling timer.\n",
3237 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3238 original_last_aeq, nesqp->last_aeq);
3239 schedule_nes_timer(nesqp->cm_node, (struct sk_buff *) nesqp, NES_TIMER_TYPE_CLOSE, 1, 0);
3240 }
3241 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3242 } else {
3243 spin_unlock_irqrestore(&nesqp->lock, qplockflags);
3244 nes_debug(NES_DBG_MOD_QP, "QP%u Not decrementing QP refcount (%d),"
3245 " need ae to finish up, original_last_aeq = 0x%04X."
3246 " last_aeq = 0x%04X.\n",
3247 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3248 original_last_aeq, nesqp->last_aeq);
3249 }
3250 }
3251 } else {
3252 nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
3253 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3254 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3255 original_last_aeq, nesqp->last_aeq);
3256 nes_rem_ref(&nesqp->ibqp);
3257 }
3258 } else {
3259 nes_debug(NES_DBG_MOD_QP, "QP%u Decrementing QP refcount (%d), No ae to finish up,"
3260 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n",
3261 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
3262 original_last_aeq, nesqp->last_aeq);
3263 nes_rem_ref(&nesqp->ibqp);
3264 }
3265
3266 err = 0;
3267
3268 nes_debug(NES_DBG_MOD_QP, "QP%u Leaving, refcount=%d\n",
3269 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount));
3270
3271 return err;
3272}
3273
3274
3275/**
3276 * nes_muticast_attach
3277 */
3278static int nes_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
3279{
3280 nes_debug(NES_DBG_INIT, "\n");
3281 return -ENOSYS;
3282}
3283
3284
3285/**
3286 * nes_multicast_detach
3287 */
3288static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
3289{
3290 nes_debug(NES_DBG_INIT, "\n");
3291 return -ENOSYS;
3292}
3293
3294
3295/**
3296 * nes_process_mad
3297 */
3298static int nes_process_mad(struct ib_device *ibdev, int mad_flags,
3299 u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh,
3300 struct ib_mad *in_mad, struct ib_mad *out_mad)
3301{
3302 nes_debug(NES_DBG_INIT, "\n");
3303 return -ENOSYS;
3304}
3305
3306static inline void
3307fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey)
3308{
3309 int sge_index;
3310 int total_payload_length = 0;
3311 for (sge_index = 0; sge_index < ib_wr->num_sge; sge_index++) {
3312 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX+(sge_index*4),
3313 ib_wr->sg_list[sge_index].addr);
3314 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_LENGTH0_IDX + (sge_index*4),
3315 ib_wr->sg_list[sge_index].length);
3316 if (uselkey)
3317 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4),
3318 (ib_wr->sg_list[sge_index].lkey));
3319 else
3320 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX + (sge_index*4), 0);
3321
3322 total_payload_length += ib_wr->sg_list[sge_index].length;
3323 }
3324 nes_debug(NES_DBG_IW_TX, "UC UC UC, sending total_payload_length=%u \n",
3325 total_payload_length);
3326 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
3327 total_payload_length);
3328}
3329
3330/**
3331 * nes_post_send
3332 */
3333static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3334 struct ib_send_wr **bad_wr)
3335{
3336 u64 u64temp;
3337 unsigned long flags = 0;
3338 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
3339 struct nes_device *nesdev = nesvnic->nesdev;
3340 struct nes_qp *nesqp = to_nesqp(ibqp);
3341 struct nes_hw_qp_wqe *wqe;
3342 int err;
3343 u32 qsize = nesqp->hwqp.sq_size;
3344 u32 head;
3345 u32 wqe_misc;
3346 u32 wqe_count;
3347 u32 counter;
3348 u32 total_payload_length;
3349
3350 err = 0;
3351 wqe_misc = 0;
3352 wqe_count = 0;
3353 total_payload_length = 0;
3354
3355 if (nesqp->ibqp_state > IB_QPS_RTS)
3356 return -EINVAL;
3357
3358 spin_lock_irqsave(&nesqp->lock, flags);
3359
3360 head = nesqp->hwqp.sq_head;
3361
3362 while (ib_wr) {
3363 /* Check for SQ overflow */
3364 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
3365 err = -EINVAL;
3366 break;
3367 }
3368
3369 wqe = &nesqp->hwqp.sq_vbase[head];
3370 /* nes_debug(NES_DBG_IW_TX, "processing sq wqe for QP%u at %p, head = %u.\n",
3371 nesqp->hwqp.qp_id, wqe, head); */
3372 nes_fill_init_qp_wqe(wqe, nesqp, head);
3373 u64temp = (u64)(ib_wr->wr_id);
3374 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
3375 u64temp);
3376 switch (ib_wr->opcode) {
3377 case IB_WR_SEND:
3378 if (ib_wr->send_flags & IB_SEND_SOLICITED) {
3379 wqe_misc = NES_IWARP_SQ_OP_SENDSE;
3380 } else {
3381 wqe_misc = NES_IWARP_SQ_OP_SEND;
3382 }
3383 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
3384 err = -EINVAL;
3385 break;
3386 }
3387 if (ib_wr->send_flags & IB_SEND_FENCE) {
3388 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
3389 }
3390 if ((ib_wr->send_flags & IB_SEND_INLINE) &&
3391 ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
3392 (ib_wr->sg_list[0].length <= 64)) {
3393 memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
3394 (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
3395 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
3396 ib_wr->sg_list[0].length);
3397 wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
3398 } else {
3399 fill_wqe_sg_send(wqe, ib_wr, 1);
3400 }
3401
3402 break;
3403 case IB_WR_RDMA_WRITE:
3404 wqe_misc = NES_IWARP_SQ_OP_RDMAW;
3405 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
3406 nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=%u\n",
3407 ib_wr->num_sge,
3408 nesdev->nesadapter->max_sge);
3409 err = -EINVAL;
3410 break;
3411 }
3412 if (ib_wr->send_flags & IB_SEND_FENCE) {
3413 wqe_misc |= NES_IWARP_SQ_WQE_LOCAL_FENCE;
3414 }
3415
3416 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
3417 ib_wr->wr.rdma.rkey);
3418 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
3419 ib_wr->wr.rdma.remote_addr);
3420
3421 if ((ib_wr->send_flags & IB_SEND_INLINE) &&
3422 ((nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) == 0) &&
3423 (ib_wr->sg_list[0].length <= 64)) {
3424 memcpy(&wqe->wqe_words[NES_IWARP_SQ_WQE_IMM_DATA_START_IDX],
3425 (void *)(unsigned long)ib_wr->sg_list[0].addr, ib_wr->sg_list[0].length);
3426 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX,
3427 ib_wr->sg_list[0].length);
3428 wqe_misc |= NES_IWARP_SQ_WQE_IMM_DATA;
3429 } else {
3430 fill_wqe_sg_send(wqe, ib_wr, 1);
3431 }
3432 wqe->wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX] =
3433 wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX];
3434 break;
3435 case IB_WR_RDMA_READ:
3436 /* iWARP only supports 1 sge for RDMA reads */
3437 if (ib_wr->num_sge > 1) {
3438 nes_debug(NES_DBG_IW_TX, "Exceeded max sge, ib_wr=%u, max=1\n",
3439 ib_wr->num_sge);
3440 err = -EINVAL;
3441 break;
3442 }
3443 wqe_misc = NES_IWARP_SQ_OP_RDMAR;
3444 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_TO_LOW_IDX,
3445 ib_wr->wr.rdma.remote_addr);
3446 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_STAG_IDX,
3447 ib_wr->wr.rdma.rkey);
3448 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX,
3449 ib_wr->sg_list->length);
3450 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
3451 ib_wr->sg_list->addr);
3452 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_STAG0_IDX,
3453 ib_wr->sg_list->lkey);
3454 break;
3455 default:
3456 /* error */
3457 err = -EINVAL;
3458 break;
3459 }
3460
3461 if (ib_wr->send_flags & IB_SEND_SIGNALED) {
3462 wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL;
3463 }
3464 wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] = cpu_to_le32(wqe_misc);
3465
3466 ib_wr = ib_wr->next;
3467 head++;
3468 wqe_count++;
3469 if (head >= qsize)
3470 head = 0;
3471
3472 }
3473
3474 nesqp->hwqp.sq_head = head;
3475 barrier();
3476 while (wqe_count) {
3477 counter = min(wqe_count, ((u32)255));
3478 wqe_count -= counter;
3479 nes_write32(nesdev->regs + NES_WQE_ALLOC,
3480 (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id);
3481 }
3482
3483 spin_unlock_irqrestore(&nesqp->lock, flags);
3484
3485 if (err)
3486 *bad_wr = ib_wr;
3487 return err;
3488}
3489
3490
3491/**
3492 * nes_post_recv
3493 */
3494static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3495 struct ib_recv_wr **bad_wr)
3496{
3497 u64 u64temp;
3498 unsigned long flags = 0;
3499 struct nes_vnic *nesvnic = to_nesvnic(ibqp->device);
3500 struct nes_device *nesdev = nesvnic->nesdev;
3501 struct nes_qp *nesqp = to_nesqp(ibqp);
3502 struct nes_hw_qp_wqe *wqe;
3503 int err = 0;
3504 int sge_index;
3505 u32 qsize = nesqp->hwqp.rq_size;
3506 u32 head;
3507 u32 wqe_count = 0;
3508 u32 counter;
3509 u32 total_payload_length;
3510
3511 if (nesqp->ibqp_state > IB_QPS_RTS)
3512 return -EINVAL;
3513
3514 spin_lock_irqsave(&nesqp->lock, flags);
3515
3516 head = nesqp->hwqp.rq_head;
3517
3518 while (ib_wr) {
3519 if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
3520 err = -EINVAL;
3521 break;
3522 }
3523 /* Check for RQ overflow */
3524 if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) {
3525 err = -EINVAL;
3526 break;
3527 }
3528
3529 nes_debug(NES_DBG_IW_RX, "ibwr sge count = %u.\n", ib_wr->num_sge);
3530 wqe = &nesqp->hwqp.rq_vbase[head];
3531
3532 /* nes_debug(NES_DBG_IW_RX, "QP%u:processing rq wqe at %p, head = %u.\n",
3533 nesqp->hwqp.qp_id, wqe, head); */
3534 nes_fill_init_qp_wqe(wqe, nesqp, head);
3535 u64temp = (u64)(ib_wr->wr_id);
3536 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX,
3537 u64temp);
3538 total_payload_length = 0;
3539 for (sge_index=0; sge_index < ib_wr->num_sge; sge_index++) {
3540 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_FRAG0_LOW_IDX+(sge_index*4),
3541 ib_wr->sg_list[sge_index].addr);
3542 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_LENGTH0_IDX+(sge_index*4),
3543 ib_wr->sg_list[sge_index].length);
3544 set_wqe_32bit_value(wqe->wqe_words,NES_IWARP_RQ_WQE_STAG0_IDX+(sge_index*4),
3545 ib_wr->sg_list[sge_index].lkey);
3546
3547 total_payload_length += ib_wr->sg_list[sge_index].length;
3548 }
3549 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_RQ_WQE_TOTAL_PAYLOAD_IDX,
3550 total_payload_length);
3551
3552 ib_wr = ib_wr->next;
3553 head++;
3554 wqe_count++;
3555 if (head >= qsize)
3556 head = 0;
3557 }
3558
3559 nesqp->hwqp.rq_head = head;
3560 barrier();
3561 while (wqe_count) {
3562 counter = min(wqe_count, ((u32)255));
3563 wqe_count -= counter;
3564 nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id);
3565 }
3566
3567 spin_unlock_irqrestore(&nesqp->lock, flags);
3568
3569 if (err)
3570 *bad_wr = ib_wr;
3571 return err;
3572}
3573
3574
3575/**
3576 * nes_poll_cq
3577 */
3578static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3579{
3580 u64 u64temp;
3581 u64 wrid;
3582 /* u64 u64temp; */
3583 unsigned long flags = 0;
3584 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
3585 struct nes_device *nesdev = nesvnic->nesdev;
3586 struct nes_cq *nescq = to_nescq(ibcq);
3587 struct nes_qp *nesqp;
3588 struct nes_hw_cqe cqe;
3589 u32 head;
3590 u32 wq_tail;
3591 u32 cq_size;
3592 u32 cqe_count = 0;
3593 u32 wqe_index;
3594 u32 u32temp;
3595 /* u32 counter; */
3596
3597 nes_debug(NES_DBG_CQ, "\n");
3598
3599 spin_lock_irqsave(&nescq->lock, flags);
3600
3601 head = nescq->hw_cq.cq_head;
3602 cq_size = nescq->hw_cq.cq_size;
3603
3604 while (cqe_count < num_entries) {
3605 if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
3606 NES_CQE_VALID) {
3607 cqe = nescq->hw_cq.cq_vbase[head];
3608 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
3609 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
3610 wqe_index = u32temp &
3611 (nesdev->nesadapter->max_qp_wr - 1);
3612 u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
3613 /* parse CQE, get completion context from WQE (either rq or sq */
3614 u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
3615 ((u64)u32temp);
3616 nesqp = *((struct nes_qp **)&u64temp);
3617 memset(entry, 0, sizeof *entry);
3618 if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
3619 entry->status = IB_WC_SUCCESS;
3620 } else {
3621 entry->status = IB_WC_WR_FLUSH_ERR;
3622 }
3623
3624 entry->qp = &nesqp->ibqp;
3625 entry->src_qp = nesqp->hwqp.qp_id;
3626
3627 if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
3628 if (nesqp->skip_lsmm) {
3629 nesqp->skip_lsmm = 0;
3630 wq_tail = nesqp->hwqp.sq_tail++;
3631 }
3632
3633 /* Working on a SQ Completion*/
3634 wq_tail = wqe_index;
3635 nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
3636 wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
3637 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
3638 ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
3639 wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
3640 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
3641 wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
3642
3643 switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
3644 wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
3645 case NES_IWARP_SQ_OP_RDMAW:
3646 nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
3647 entry->opcode = IB_WC_RDMA_WRITE;
3648 break;
3649 case NES_IWARP_SQ_OP_RDMAR:
3650 nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
3651 entry->opcode = IB_WC_RDMA_READ;
3652 entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
3653 wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
3654 break;
3655 case NES_IWARP_SQ_OP_SENDINV:
3656 case NES_IWARP_SQ_OP_SENDSEINV:
3657 case NES_IWARP_SQ_OP_SEND:
3658 case NES_IWARP_SQ_OP_SENDSE:
3659 nes_debug(NES_DBG_CQ, "Operation = Send.\n");
3660 entry->opcode = IB_WC_SEND;
3661 break;
3662 }
3663 } else {
3664 /* Working on a RQ Completion*/
3665 wq_tail = wqe_index;
3666 nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
3667 entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
3668 wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
3669 ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
3670 entry->opcode = IB_WC_RECV;
3671 }
3672 entry->wr_id = wrid;
3673
3674 if (++head >= cq_size)
3675 head = 0;
3676 cqe_count++;
3677 nescq->polled_completions++;
3678 if ((nescq->polled_completions > (cq_size / 2)) ||
3679 (nescq->polled_completions == 255)) {
3680 nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
3681 " are pending %u of %u.\n",
3682 nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
3683 nes_write32(nesdev->regs+NES_CQE_ALLOC,
3684 nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
3685 nescq->polled_completions = 0;
3686 }
3687 entry++;
3688 } else
3689 break;
3690 }
3691
3692 if (nescq->polled_completions) {
3693 nes_write32(nesdev->regs+NES_CQE_ALLOC,
3694 nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
3695 nescq->polled_completions = 0;
3696 }
3697
3698 nescq->hw_cq.cq_head = head;
3699 nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n",
3700 cqe_count, nescq->hw_cq.cq_number);
3701
3702 spin_unlock_irqrestore(&nescq->lock, flags);
3703
3704 return cqe_count;
3705}
3706
3707
3708/**
3709 * nes_req_notify_cq
3710 */
3711static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
3712 {
3713 struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
3714 struct nes_device *nesdev = nesvnic->nesdev;
3715 struct nes_cq *nescq = to_nescq(ibcq);
3716 u32 cq_arm;
3717
3718 nes_debug(NES_DBG_CQ, "Requesting notification for CQ%u.\n",
3719 nescq->hw_cq.cq_number);
3720
3721 cq_arm = nescq->hw_cq.cq_number;
3722 if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
3723 cq_arm |= NES_CQE_ALLOC_NOTIFY_NEXT;
3724 else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
3725 cq_arm |= NES_CQE_ALLOC_NOTIFY_SE;
3726 else
3727 return -EINVAL;
3728
3729 nes_write32(nesdev->regs+NES_CQE_ALLOC, cq_arm);
3730 nes_read32(nesdev->regs+NES_CQE_ALLOC);
3731
3732 return 0;
3733}
3734
3735
3736/**
3737 * nes_init_ofa_device
3738 */
3739struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
3740{
3741 struct nes_ib_device *nesibdev;
3742 struct nes_vnic *nesvnic = netdev_priv(netdev);
3743 struct nes_device *nesdev = nesvnic->nesdev;
3744
3745 nesibdev = (struct nes_ib_device *)ib_alloc_device(sizeof(struct nes_ib_device));
3746 if (nesibdev == NULL) {
3747 return NULL;
3748 }
3749 strlcpy(nesibdev->ibdev.name, "nes%d", IB_DEVICE_NAME_MAX);
3750 nesibdev->ibdev.owner = THIS_MODULE;
3751
3752 nesibdev->ibdev.node_type = RDMA_NODE_RNIC;
3753 memset(&nesibdev->ibdev.node_guid, 0, sizeof(nesibdev->ibdev.node_guid));
3754 memcpy(&nesibdev->ibdev.node_guid, netdev->dev_addr, 6);
3755
3756 nesibdev->ibdev.uverbs_cmd_mask =
3757 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
3758 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
3759 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
3760 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
3761 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
3762 (1ull << IB_USER_VERBS_CMD_REG_MR) |
3763 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
3764 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
3765 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
3766 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
3767 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
3768 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
3769 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
3770 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
3771 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
3772 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
3773 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
3774 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
3775 (1ull << IB_USER_VERBS_CMD_BIND_MW) |
3776 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW) |
3777 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
3778 (1ull << IB_USER_VERBS_CMD_POST_SEND);
3779
3780 nesibdev->ibdev.phys_port_cnt = 1;
3781 nesibdev->ibdev.num_comp_vectors = 1;
3782 nesibdev->ibdev.dma_device = &nesdev->pcidev->dev;
3783 nesibdev->ibdev.class_dev.dev = &nesdev->pcidev->dev;
3784 nesibdev->ibdev.query_device = nes_query_device;
3785 nesibdev->ibdev.query_port = nes_query_port;
3786 nesibdev->ibdev.modify_port = nes_modify_port;
3787 nesibdev->ibdev.query_pkey = nes_query_pkey;
3788 nesibdev->ibdev.query_gid = nes_query_gid;
3789 nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext;
3790 nesibdev->ibdev.dealloc_ucontext = nes_dealloc_ucontext;
3791 nesibdev->ibdev.mmap = nes_mmap;
3792 nesibdev->ibdev.alloc_pd = nes_alloc_pd;
3793 nesibdev->ibdev.dealloc_pd = nes_dealloc_pd;
3794 nesibdev->ibdev.create_ah = nes_create_ah;
3795 nesibdev->ibdev.destroy_ah = nes_destroy_ah;
3796 nesibdev->ibdev.create_qp = nes_create_qp;
3797 nesibdev->ibdev.modify_qp = nes_modify_qp;
3798 nesibdev->ibdev.query_qp = nes_query_qp;
3799 nesibdev->ibdev.destroy_qp = nes_destroy_qp;
3800 nesibdev->ibdev.create_cq = nes_create_cq;
3801 nesibdev->ibdev.destroy_cq = nes_destroy_cq;
3802 nesibdev->ibdev.poll_cq = nes_poll_cq;
3803 nesibdev->ibdev.get_dma_mr = nes_get_dma_mr;
3804 nesibdev->ibdev.reg_phys_mr = nes_reg_phys_mr;
3805 nesibdev->ibdev.reg_user_mr = nes_reg_user_mr;
3806 nesibdev->ibdev.dereg_mr = nes_dereg_mr;
3807 nesibdev->ibdev.alloc_mw = nes_alloc_mw;
3808 nesibdev->ibdev.dealloc_mw = nes_dealloc_mw;
3809 nesibdev->ibdev.bind_mw = nes_bind_mw;
3810
3811 nesibdev->ibdev.alloc_fmr = nes_alloc_fmr;
3812 nesibdev->ibdev.unmap_fmr = nes_unmap_fmr;
3813 nesibdev->ibdev.dealloc_fmr = nes_dealloc_fmr;
3814 nesibdev->ibdev.map_phys_fmr = nes_map_phys_fmr;
3815
3816 nesibdev->ibdev.attach_mcast = nes_multicast_attach;
3817 nesibdev->ibdev.detach_mcast = nes_multicast_detach;
3818 nesibdev->ibdev.process_mad = nes_process_mad;
3819
3820 nesibdev->ibdev.req_notify_cq = nes_req_notify_cq;
3821 nesibdev->ibdev.post_send = nes_post_send;
3822 nesibdev->ibdev.post_recv = nes_post_recv;
3823
3824 nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
3825 if (nesibdev->ibdev.iwcm == NULL) {
3826 ib_dealloc_device(&nesibdev->ibdev);
3827 return NULL;
3828 }
3829 nesibdev->ibdev.iwcm->add_ref = nes_add_ref;
3830 nesibdev->ibdev.iwcm->rem_ref = nes_rem_ref;
3831 nesibdev->ibdev.iwcm->get_qp = nes_get_qp;
3832 nesibdev->ibdev.iwcm->connect = nes_connect;
3833 nesibdev->ibdev.iwcm->accept = nes_accept;
3834 nesibdev->ibdev.iwcm->reject = nes_reject;
3835 nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
3836 nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
3837
3838 return nesibdev;
3839}
3840
3841
3842/**
3843 * nes_destroy_ofa_device
3844 */
3845void nes_destroy_ofa_device(struct nes_ib_device *nesibdev)
3846{
3847 if (nesibdev == NULL)
3848 return;
3849
3850 nes_unregister_ofa_device(nesibdev);
3851
3852 kfree(nesibdev->ibdev.iwcm);
3853 ib_dealloc_device(&nesibdev->ibdev);
3854}
3855
3856
3857/**
3858 * nes_register_ofa_device
3859 */
3860int nes_register_ofa_device(struct nes_ib_device *nesibdev)
3861{
3862 struct nes_vnic *nesvnic = nesibdev->nesvnic;
3863 struct nes_device *nesdev = nesvnic->nesdev;
3864 struct nes_adapter *nesadapter = nesdev->nesadapter;
3865 int i, ret;
3866
3867 ret = ib_register_device(&nesvnic->nesibdev->ibdev);
3868 if (ret) {
3869 return ret;
3870 }
3871
3872 /* Get the resources allocated to this device */
3873 nesibdev->max_cq = (nesadapter->max_cq-NES_FIRST_QPN) / nesadapter->port_count;
3874 nesibdev->max_mr = nesadapter->max_mr / nesadapter->port_count;
3875 nesibdev->max_qp = (nesadapter->max_qp-NES_FIRST_QPN) / nesadapter->port_count;
3876 nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count;
3877
3878 for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
3879 ret = class_device_create_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
3880 if (ret) {
3881 while (i > 0) {
3882 i--;
3883 class_device_remove_file(&nesibdev->ibdev.class_dev,
3884 nes_class_attributes[i]);
3885 }
3886 ib_unregister_device(&nesibdev->ibdev);
3887 return ret;
3888 }
3889 }
3890
3891 nesvnic->of_device_registered = 1;
3892
3893 return 0;
3894}
3895
3896
3897/**
3898 * nes_unregister_ofa_device
3899 */
3900void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
3901{
3902 struct nes_vnic *nesvnic = nesibdev->nesvnic;
3903 int i;
3904
3905 if (nesibdev == NULL)
3906 return;
3907
3908 for (i = 0; i < ARRAY_SIZE(nes_class_attributes); ++i) {
3909 class_device_remove_file(&nesibdev->ibdev.class_dev, nes_class_attributes[i]);
3910 }
3911
3912 if (nesvnic->of_device_registered) {
3913 ib_unregister_device(&nesibdev->ibdev);
3914 }
3915
3916 nesvnic->of_device_registered = 0;
3917}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
new file mode 100644
index 000000000000..6c6b4da5184f
--- /dev/null
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -0,0 +1,169 @@
1/*
2 * Copyright (c) 2006 - 2008 NetEffect, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#ifndef NES_VERBS_H
36#define NES_VERBS_H
37
38struct nes_device;
39
40#define NES_MAX_USER_DB_REGIONS 4096
41#define NES_MAX_USER_WQ_REGIONS 4096
42
43struct nes_ucontext {
44 struct ib_ucontext ibucontext;
45 struct nes_device *nesdev;
46 unsigned long mmap_wq_offset;
47 unsigned long mmap_cq_offset; /* to be removed */
48 int index; /* rnic index (minor) */
49 unsigned long allocated_doorbells[BITS_TO_LONGS(NES_MAX_USER_DB_REGIONS)];
50 u16 mmap_db_index[NES_MAX_USER_DB_REGIONS];
51 u16 first_free_db;
52 unsigned long allocated_wqs[BITS_TO_LONGS(NES_MAX_USER_WQ_REGIONS)];
53 struct nes_qp *mmap_nesqp[NES_MAX_USER_WQ_REGIONS];
54 u16 first_free_wq;
55 struct list_head cq_reg_mem_list;
56 struct list_head qp_reg_mem_list;
57 u32 mcrqf;
58 atomic_t usecnt;
59};
60
61struct nes_pd {
62 struct ib_pd ibpd;
63 u16 pd_id;
64 atomic_t sqp_count;
65 u16 mmap_db_index;
66};
67
68struct nes_mr {
69 union {
70 struct ib_mr ibmr;
71 struct ib_mw ibmw;
72 struct ib_fmr ibfmr;
73 };
74 struct ib_umem *region;
75 u16 pbls_used;
76 u8 mode;
77 u8 pbl_4k;
78};
79
80struct nes_hw_pb {
81 __le32 pa_low;
82 __le32 pa_high;
83};
84
85struct nes_vpbl {
86 dma_addr_t pbl_pbase;
87 struct nes_hw_pb *pbl_vbase;
88};
89
90struct nes_root_vpbl {
91 dma_addr_t pbl_pbase;
92 struct nes_hw_pb *pbl_vbase;
93 struct nes_vpbl *leaf_vpbl;
94};
95
96struct nes_fmr {
97 struct nes_mr nesmr;
98 u32 leaf_pbl_cnt;
99 struct nes_root_vpbl root_vpbl;
100 struct ib_qp *ib_qp;
101 int access_rights;
102 struct ib_fmr_attr attr;
103};
104
105struct nes_av;
106
107struct nes_cq {
108 struct ib_cq ibcq;
109 struct nes_hw_cq hw_cq;
110 u32 polled_completions;
111 u32 cq_mem_size;
112 spinlock_t lock;
113 u8 virtual_cq;
114 u8 pad[3];
115};
116
117struct nes_wq {
118 spinlock_t lock;
119};
120
121struct iw_cm_id;
122struct ietf_mpa_frame;
123
124struct nes_qp {
125 struct ib_qp ibqp;
126 void *allocated_buffer;
127 struct iw_cm_id *cm_id;
128 struct workqueue_struct *wq;
129 struct work_struct disconn_work;
130 struct nes_cq *nesscq;
131 struct nes_cq *nesrcq;
132 struct nes_pd *nespd;
133 void *cm_node; /* handle of the node this QP is associated with */
134 struct ietf_mpa_frame *ietf_frame;
135 dma_addr_t ietf_frame_pbase;
136 wait_queue_head_t state_waitq;
137 unsigned long socket;
138 struct nes_hw_qp hwqp;
139 struct work_struct work;
140 struct work_struct ae_work;
141 enum ib_qp_state ibqp_state;
142 u32 iwarp_state;
143 u32 hte_index;
144 u32 last_aeq;
145 u32 qp_mem_size;
146 atomic_t refcount;
147 atomic_t close_timer_started;
148 u32 mmap_sq_db_index;
149 u32 mmap_rq_db_index;
150 spinlock_t lock;
151 struct nes_qp_context *nesqp_context;
152 dma_addr_t nesqp_context_pbase;
153 void *pbl_vbase;
154 dma_addr_t pbl_pbase;
155 struct page *page;
156 wait_queue_head_t kick_waitq;
157 u16 in_disconnect;
158 u16 private_data_len;
159 u8 active_conn;
160 u8 skip_lsmm;
161 u8 user_mode;
162 u8 hte_added;
163 u8 hw_iwarp_state;
164 u8 flush_issued;
165 u8 hw_tcp_state;
166 u8 disconn_pending;
167 u8 destroyed;
168};
169#endif /* NES_VERBS_H */