aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/mad.c5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h14
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c225
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c211
-rw-r--r--drivers/infiniband/hw/ipath/ipath_rc.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c7
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c51
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c59
-rw-r--r--drivers/infiniband/hw/nes/nes.c95
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c41
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c205
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h6
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c122
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c88
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c30
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c76
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c31
26 files changed, 961 insertions, 355 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 922d35f4fc08..3cab0cedfca2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3748,6 +3748,7 @@ error1:
3748 cm_remove_port_fs(port); 3748 cm_remove_port_fs(port);
3749 } 3749 }
3750 device_unregister(cm_dev->device); 3750 device_unregister(cm_dev->device);
3751 kfree(cm_dev);
3751} 3752}
3752 3753
3753static void cm_remove_one(struct ib_device *ib_device) 3754static void cm_remove_one(struct ib_device *ib_device)
@@ -3776,6 +3777,7 @@ static void cm_remove_one(struct ib_device *ib_device)
3776 cm_remove_port_fs(port); 3777 cm_remove_port_fs(port);
3777 } 3778 }
3778 device_unregister(cm_dev->device); 3779 device_unregister(cm_dev->device);
3780 kfree(cm_dev);
3779} 3781}
3780 3782
3781static int __init ib_cm_init(void) 3783static int __init ib_cm_init(void)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 1adf2efd3cb3..49c45feccd5b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1697,9 +1697,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv,
1697 u8 port_num = mad_agent_priv->agent.port_num; 1697 u8 port_num = mad_agent_priv->agent.port_num;
1698 u8 lmc; 1698 u8 lmc;
1699 1699
1700 send_resp = ((struct ib_mad *)(wr->send_buf.mad))-> 1700 send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad);
1701 mad_hdr.method & IB_MGMT_METHOD_RESP; 1701 rcv_resp = ib_response_mad(rwc->recv_buf.mad);
1702 rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1703 1702
1704 if (send_resp == rcv_resp) 1703 if (send_resp == rcv_resp)
1705 /* both requests, or both responses. GIDs different */ 1704 /* both requests, or both responses. GIDs different */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index eb778bfd6f66..ecff98043589 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1155,13 +1155,11 @@ static int iwch_query_port(struct ib_device *ibdev,
1155 u8 port, struct ib_port_attr *props) 1155 u8 port, struct ib_port_attr *props)
1156{ 1156{
1157 PDBG("%s ibdev %p\n", __func__, ibdev); 1157 PDBG("%s ibdev %p\n", __func__, ibdev);
1158
1159 memset(props, 0, sizeof(struct ib_port_attr));
1158 props->max_mtu = IB_MTU_4096; 1160 props->max_mtu = IB_MTU_4096;
1159 props->lid = 0; 1161 props->active_mtu = IB_MTU_2048;
1160 props->lmc = 0;
1161 props->sm_lid = 0;
1162 props->sm_sl = 0;
1163 props->state = IB_PORT_ACTIVE; 1162 props->state = IB_PORT_ACTIVE;
1164 props->phys_state = 0;
1165 props->port_cap_flags = 1163 props->port_cap_flags =
1166 IB_PORT_CM_SUP | 1164 IB_PORT_CM_SUP |
1167 IB_PORT_SNMP_TUNNEL_SUP | 1165 IB_PORT_SNMP_TUNNEL_SUP |
@@ -1170,7 +1168,6 @@ static int iwch_query_port(struct ib_device *ibdev,
1170 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; 1168 IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
1171 props->gid_tbl_len = 1; 1169 props->gid_tbl_len = 1;
1172 props->pkey_tbl_len = 1; 1170 props->pkey_tbl_len = 1;
1173 props->qkey_viol_cntr = 0;
1174 props->active_width = 2; 1171 props->active_width = 2;
1175 props->active_speed = 2; 1172 props->active_speed = 2;
1176 props->max_msg_sz = -1; 1173 props->max_msg_sz = -1;
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 1ab919f836a8..5d7b7855afb9 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -164,6 +164,13 @@ struct ehca_qmap_entry {
164 u16 reported; 164 u16 reported;
165}; 165};
166 166
167struct ehca_queue_map {
168 struct ehca_qmap_entry *map;
169 unsigned int entries;
170 unsigned int tail;
171 unsigned int left_to_poll;
172};
173
167struct ehca_qp { 174struct ehca_qp {
168 union { 175 union {
169 struct ib_qp ib_qp; 176 struct ib_qp ib_qp;
@@ -173,8 +180,9 @@ struct ehca_qp {
173 enum ehca_ext_qp_type ext_type; 180 enum ehca_ext_qp_type ext_type;
174 enum ib_qp_state state; 181 enum ib_qp_state state;
175 struct ipz_queue ipz_squeue; 182 struct ipz_queue ipz_squeue;
176 struct ehca_qmap_entry *sq_map; 183 struct ehca_queue_map sq_map;
177 struct ipz_queue ipz_rqueue; 184 struct ipz_queue ipz_rqueue;
185 struct ehca_queue_map rq_map;
178 struct h_galpas galpas; 186 struct h_galpas galpas;
179 u32 qkey; 187 u32 qkey;
180 u32 real_qp_num; 188 u32 real_qp_num;
@@ -204,6 +212,8 @@ struct ehca_qp {
204 atomic_t nr_events; /* events seen */ 212 atomic_t nr_events; /* events seen */
205 wait_queue_head_t wait_completion; 213 wait_queue_head_t wait_completion;
206 int mig_armed; 214 int mig_armed;
215 struct list_head sq_err_node;
216 struct list_head rq_err_node;
207}; 217};
208 218
209#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 219#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
@@ -233,6 +243,8 @@ struct ehca_cq {
233 /* mmap counter for resources mapped into user space */ 243 /* mmap counter for resources mapped into user space */
234 u32 mm_count_queue; 244 u32 mm_count_queue;
235 u32 mm_count_galpa; 245 u32 mm_count_galpa;
246 struct list_head sqp_err_list;
247 struct list_head rqp_err_list;
236}; 248};
237 249
238enum ehca_mr_flag { 250enum ehca_mr_flag {
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 5540b276a33c..33647a95eb9a 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -276,6 +276,9 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
276 for (i = 0; i < QP_HASHTAB_LEN; i++) 276 for (i = 0; i < QP_HASHTAB_LEN; i++)
277 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]); 277 INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
278 278
279 INIT_LIST_HEAD(&my_cq->sqp_err_list);
280 INIT_LIST_HEAD(&my_cq->rqp_err_list);
281
279 if (context) { 282 if (context) {
280 struct ipz_queue *ipz_queue = &my_cq->ipz_queue; 283 struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
281 struct ehca_create_cq_resp resp; 284 struct ehca_create_cq_resp resp;
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index a8a2ea585d2f..8f7f282ead65 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -197,6 +197,8 @@ void ehca_poll_eqs(unsigned long data);
197int ehca_calc_ipd(struct ehca_shca *shca, int port, 197int ehca_calc_ipd(struct ehca_shca *shca, int port,
198 enum ib_rate path_rate, u32 *ipd); 198 enum ib_rate path_rate, u32 *ipd);
199 199
200void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
201
200#ifdef CONFIG_PPC_64K_PAGES 202#ifdef CONFIG_PPC_64K_PAGES
201void *ehca_alloc_fw_ctrlblock(gfp_t flags); 203void *ehca_alloc_fw_ctrlblock(gfp_t flags);
202void ehca_free_fw_ctrlblock(void *ptr); 204void ehca_free_fw_ctrlblock(void *ptr);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index b6bcee036734..4dbe2870e014 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -396,6 +396,50 @@ static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
396 queue->is_small = (queue->page_size != 0); 396 queue->is_small = (queue->page_size != 0);
397} 397}
398 398
399/* needs to be called with cq->spinlock held */
400void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
401{
402 struct list_head *list, *node;
403
404 /* TODO: support low latency QPs */
405 if (qp->ext_type == EQPT_LLQP)
406 return;
407
408 if (on_sq) {
409 list = &qp->send_cq->sqp_err_list;
410 node = &qp->sq_err_node;
411 } else {
412 list = &qp->recv_cq->rqp_err_list;
413 node = &qp->rq_err_node;
414 }
415
416 if (list_empty(node))
417 list_add_tail(node, list);
418
419 return;
420}
421
422static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
423{
424 unsigned long flags;
425
426 spin_lock_irqsave(&cq->spinlock, flags);
427
428 if (!list_empty(node))
429 list_del_init(node);
430
431 spin_unlock_irqrestore(&cq->spinlock, flags);
432}
433
434static void reset_queue_map(struct ehca_queue_map *qmap)
435{
436 int i;
437
438 qmap->tail = 0;
439 for (i = 0; i < qmap->entries; i++)
440 qmap->map[i].reported = 1;
441}
442
399/* 443/*
400 * Create an ib_qp struct that is either a QP or an SRQ, depending on 444 * Create an ib_qp struct that is either a QP or an SRQ, depending on
401 * the value of the is_srq parameter. If init_attr and srq_init_attr share 445 * the value of the is_srq parameter. If init_attr and srq_init_attr share
@@ -407,12 +451,11 @@ static struct ehca_qp *internal_create_qp(
407 struct ib_srq_init_attr *srq_init_attr, 451 struct ib_srq_init_attr *srq_init_attr,
408 struct ib_udata *udata, int is_srq) 452 struct ib_udata *udata, int is_srq)
409{ 453{
410 struct ehca_qp *my_qp; 454 struct ehca_qp *my_qp, *my_srq = NULL;
411 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); 455 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
412 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, 456 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
413 ib_device); 457 ib_device);
414 struct ib_ucontext *context = NULL; 458 struct ib_ucontext *context = NULL;
415 u32 nr_qes;
416 u64 h_ret; 459 u64 h_ret;
417 int is_llqp = 0, has_srq = 0; 460 int is_llqp = 0, has_srq = 0;
418 int qp_type, max_send_sge, max_recv_sge, ret; 461 int qp_type, max_send_sge, max_recv_sge, ret;
@@ -457,8 +500,7 @@ static struct ehca_qp *internal_create_qp(
457 500
458 /* handle SRQ base QPs */ 501 /* handle SRQ base QPs */
459 if (init_attr->srq) { 502 if (init_attr->srq) {
460 struct ehca_qp *my_srq = 503 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
461 container_of(init_attr->srq, struct ehca_qp, ib_srq);
462 504
463 has_srq = 1; 505 has_srq = 1;
464 parms.ext_type = EQPT_SRQBASE; 506 parms.ext_type = EQPT_SRQBASE;
@@ -716,15 +758,19 @@ static struct ehca_qp *internal_create_qp(
716 "and pages ret=%i", ret); 758 "and pages ret=%i", ret);
717 goto create_qp_exit2; 759 goto create_qp_exit2;
718 } 760 }
719 nr_qes = my_qp->ipz_squeue.queue_length / 761
762 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
720 my_qp->ipz_squeue.qe_size; 763 my_qp->ipz_squeue.qe_size;
721 my_qp->sq_map = vmalloc(nr_qes * 764 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
722 sizeof(struct ehca_qmap_entry)); 765 sizeof(struct ehca_qmap_entry));
723 if (!my_qp->sq_map) { 766 if (!my_qp->sq_map.map) {
724 ehca_err(pd->device, "Couldn't allocate squeue " 767 ehca_err(pd->device, "Couldn't allocate squeue "
725 "map ret=%i", ret); 768 "map ret=%i", ret);
726 goto create_qp_exit3; 769 goto create_qp_exit3;
727 } 770 }
771 INIT_LIST_HEAD(&my_qp->sq_err_node);
772 /* to avoid the generation of bogus flush CQEs */
773 reset_queue_map(&my_qp->sq_map);
728 } 774 }
729 775
730 if (HAS_RQ(my_qp)) { 776 if (HAS_RQ(my_qp)) {
@@ -736,6 +782,25 @@ static struct ehca_qp *internal_create_qp(
736 "and pages ret=%i", ret); 782 "and pages ret=%i", ret);
737 goto create_qp_exit4; 783 goto create_qp_exit4;
738 } 784 }
785
786 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
787 my_qp->ipz_rqueue.qe_size;
788 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
789 sizeof(struct ehca_qmap_entry));
790 if (!my_qp->rq_map.map) {
791 ehca_err(pd->device, "Couldn't allocate squeue "
792 "map ret=%i", ret);
793 goto create_qp_exit5;
794 }
795 INIT_LIST_HEAD(&my_qp->rq_err_node);
796 /* to avoid the generation of bogus flush CQEs */
797 reset_queue_map(&my_qp->rq_map);
798 } else if (init_attr->srq) {
799 /* this is a base QP, use the queue map of the SRQ */
800 my_qp->rq_map = my_srq->rq_map;
801 INIT_LIST_HEAD(&my_qp->rq_err_node);
802
803 my_qp->ipz_rqueue = my_srq->ipz_rqueue;
739 } 804 }
740 805
741 if (is_srq) { 806 if (is_srq) {
@@ -799,7 +864,7 @@ static struct ehca_qp *internal_create_qp(
799 if (ret) { 864 if (ret) {
800 ehca_err(pd->device, 865 ehca_err(pd->device,
801 "Couldn't assign qp to send_cq ret=%i", ret); 866 "Couldn't assign qp to send_cq ret=%i", ret);
802 goto create_qp_exit6; 867 goto create_qp_exit7;
803 } 868 }
804 } 869 }
805 870
@@ -825,25 +890,29 @@ static struct ehca_qp *internal_create_qp(
825 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 890 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
826 ehca_err(pd->device, "Copy to udata failed"); 891 ehca_err(pd->device, "Copy to udata failed");
827 ret = -EINVAL; 892 ret = -EINVAL;
828 goto create_qp_exit7; 893 goto create_qp_exit8;
829 } 894 }
830 } 895 }
831 896
832 return my_qp; 897 return my_qp;
833 898
834create_qp_exit7: 899create_qp_exit8:
835 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); 900 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
836 901
837create_qp_exit6: 902create_qp_exit7:
838 kfree(my_qp->mod_qp_parm); 903 kfree(my_qp->mod_qp_parm);
839 904
905create_qp_exit6:
906 if (HAS_RQ(my_qp))
907 vfree(my_qp->rq_map.map);
908
840create_qp_exit5: 909create_qp_exit5:
841 if (HAS_RQ(my_qp)) 910 if (HAS_RQ(my_qp))
842 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 911 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
843 912
844create_qp_exit4: 913create_qp_exit4:
845 if (HAS_SQ(my_qp)) 914 if (HAS_SQ(my_qp))
846 vfree(my_qp->sq_map); 915 vfree(my_qp->sq_map.map);
847 916
848create_qp_exit3: 917create_qp_exit3:
849 if (HAS_SQ(my_qp)) 918 if (HAS_SQ(my_qp))
@@ -1035,6 +1104,101 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1035 return 0; 1104 return 0;
1036} 1105}
1037 1106
1107static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1108 struct ehca_queue_map *qmap)
1109{
1110 void *wqe_v;
1111 u64 q_ofs;
1112 u32 wqe_idx;
1113
1114 /* convert real to abs address */
1115 wqe_p = wqe_p & (~(1UL << 63));
1116
1117 wqe_v = abs_to_virt(wqe_p);
1118
1119 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1120 ehca_gen_err("Invalid offset for calculating left cqes "
1121 "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v);
1122 return -EFAULT;
1123 }
1124
1125 wqe_idx = q_ofs / ipz_queue->qe_size;
1126 if (wqe_idx < qmap->tail)
1127 qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx;
1128 else
1129 qmap->left_to_poll = wqe_idx - qmap->tail;
1130
1131 return 0;
1132}
1133
1134static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1135{
1136 u64 h_ret;
1137 void *send_wqe_p, *recv_wqe_p;
1138 int ret;
1139 unsigned long flags;
1140 int qp_num = my_qp->ib_qp.qp_num;
1141
1142 /* this hcall is not supported on base QPs */
1143 if (my_qp->ext_type != EQPT_SRQBASE) {
1144 /* get send and receive wqe pointer */
1145 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1146 my_qp->ipz_qp_handle, &my_qp->pf,
1147 &send_wqe_p, &recv_wqe_p, 4);
1148 if (h_ret != H_SUCCESS) {
1149 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1150 "failed ehca_qp=%p qp_num=%x h_ret=%li",
1151 my_qp, qp_num, h_ret);
1152 return ehca2ib_return_code(h_ret);
1153 }
1154
1155 /*
1156 * acquire lock to ensure that nobody is polling the cq which
1157 * could mean that the qmap->tail pointer is in an
1158 * inconsistent state.
1159 */
1160 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1161 ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
1162 &my_qp->sq_map);
1163 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1164 if (ret)
1165 return ret;
1166
1167
1168 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1169 ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
1170 &my_qp->rq_map);
1171 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1172 if (ret)
1173 return ret;
1174 } else {
1175 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1176 my_qp->sq_map.left_to_poll = 0;
1177 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1178
1179 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1180 my_qp->rq_map.left_to_poll = 0;
1181 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1182 }
1183
1184 /* this assures flush cqes being generated only for pending wqes */
1185 if ((my_qp->sq_map.left_to_poll == 0) &&
1186 (my_qp->rq_map.left_to_poll == 0)) {
1187 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1188 ehca_add_to_err_list(my_qp, 1);
1189 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1190
1191 if (HAS_RQ(my_qp)) {
1192 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1193 ehca_add_to_err_list(my_qp, 0);
1194 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
1195 flags);
1196 }
1197 }
1198
1199 return 0;
1200}
1201
1038/* 1202/*
1039 * internal_modify_qp with circumvention to handle aqp0 properly 1203 * internal_modify_qp with circumvention to handle aqp0 properly
1040 * smi_reset2init indicates if this is an internal reset-to-init-call for 1204 * smi_reset2init indicates if this is an internal reset-to-init-call for
@@ -1539,10 +1703,27 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1539 goto modify_qp_exit2; 1703 goto modify_qp_exit2;
1540 } 1704 }
1541 } 1705 }
1706 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) {
1707 ret = check_for_left_cqes(my_qp, shca);
1708 if (ret)
1709 goto modify_qp_exit2;
1710 }
1542 1711
1543 if (statetrans == IB_QPST_ANY2RESET) { 1712 if (statetrans == IB_QPST_ANY2RESET) {
1544 ipz_qeit_reset(&my_qp->ipz_rqueue); 1713 ipz_qeit_reset(&my_qp->ipz_rqueue);
1545 ipz_qeit_reset(&my_qp->ipz_squeue); 1714 ipz_qeit_reset(&my_qp->ipz_squeue);
1715
1716 if (qp_cur_state == IB_QPS_ERR) {
1717 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1718
1719 if (HAS_RQ(my_qp))
1720 del_from_err_list(my_qp->recv_cq,
1721 &my_qp->rq_err_node);
1722 }
1723 reset_queue_map(&my_qp->sq_map);
1724
1725 if (HAS_RQ(my_qp))
1726 reset_queue_map(&my_qp->rq_map);
1546 } 1727 }
1547 1728
1548 if (attr_mask & IB_QP_QKEY) 1729 if (attr_mask & IB_QP_QKEY)
@@ -1958,6 +2139,16 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1958 idr_remove(&ehca_qp_idr, my_qp->token); 2139 idr_remove(&ehca_qp_idr, my_qp->token);
1959 write_unlock_irqrestore(&ehca_qp_idr_lock, flags); 2140 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
1960 2141
2142 /*
2143 * SRQs will never get into an error list and do not have a recv_cq,
2144 * so we need to skip them here.
2145 */
2146 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp))
2147 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2148
2149 if (HAS_SQ(my_qp))
2150 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2151
1961 /* now wait until all pending events have completed */ 2152 /* now wait until all pending events have completed */
1962 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events)); 2153 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
1963 2154
@@ -1983,7 +2174,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1983 if (qp_type == IB_QPT_GSI) { 2174 if (qp_type == IB_QPT_GSI) {
1984 struct ib_event event; 2175 struct ib_event event;
1985 ehca_info(dev, "device %s: port %x is inactive.", 2176 ehca_info(dev, "device %s: port %x is inactive.",
1986 shca->ib_device.name, port_num); 2177 shca->ib_device.name, port_num);
1987 event.device = &shca->ib_device; 2178 event.device = &shca->ib_device;
1988 event.event = IB_EVENT_PORT_ERR; 2179 event.event = IB_EVENT_PORT_ERR;
1989 event.element.port_num = port_num; 2180 event.element.port_num = port_num;
@@ -1991,11 +2182,15 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1991 ib_dispatch_event(&event); 2182 ib_dispatch_event(&event);
1992 } 2183 }
1993 2184
1994 if (HAS_RQ(my_qp)) 2185 if (HAS_RQ(my_qp)) {
1995 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 2186 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2187
2188 vfree(my_qp->rq_map.map);
2189 }
1996 if (HAS_SQ(my_qp)) { 2190 if (HAS_SQ(my_qp)) {
1997 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 2191 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
1998 vfree(my_qp->sq_map); 2192
2193 vfree(my_qp->sq_map.map);
1999 } 2194 }
2000 kmem_cache_free(qp_cache, my_qp); 2195 kmem_cache_free(qp_cache, my_qp);
2001 atomic_dec(&shca->num_qps); 2196 atomic_dec(&shca->num_qps);
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 4426d82fe798..64928079eafa 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -53,9 +53,25 @@
53/* in RC traffic, insert an empty RDMA READ every this many packets */ 53/* in RC traffic, insert an empty RDMA READ every this many packets */
54#define ACK_CIRC_THRESHOLD 2000000 54#define ACK_CIRC_THRESHOLD 2000000
55 55
56static u64 replace_wr_id(u64 wr_id, u16 idx)
57{
58 u64 ret;
59
60 ret = wr_id & ~QMAP_IDX_MASK;
61 ret |= idx & QMAP_IDX_MASK;
62
63 return ret;
64}
65
66static u16 get_app_wr_id(u64 wr_id)
67{
68 return wr_id & QMAP_IDX_MASK;
69}
70
56static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, 71static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
57 struct ehca_wqe *wqe_p, 72 struct ehca_wqe *wqe_p,
58 struct ib_recv_wr *recv_wr) 73 struct ib_recv_wr *recv_wr,
74 u32 rq_map_idx)
59{ 75{
60 u8 cnt_ds; 76 u8 cnt_ds;
61 if (unlikely((recv_wr->num_sge < 0) || 77 if (unlikely((recv_wr->num_sge < 0) ||
@@ -69,7 +85,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
69 /* clear wqe header until sglist */ 85 /* clear wqe header until sglist */
70 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); 86 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
71 87
72 wqe_p->work_request_id = recv_wr->wr_id; 88 wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
73 wqe_p->nr_of_data_seg = recv_wr->num_sge; 89 wqe_p->nr_of_data_seg = recv_wr->num_sge;
74 90
75 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) { 91 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
@@ -146,6 +162,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
146 u64 dma_length; 162 u64 dma_length;
147 struct ehca_av *my_av; 163 struct ehca_av *my_av;
148 u32 remote_qkey = send_wr->wr.ud.remote_qkey; 164 u32 remote_qkey = send_wr->wr.ud.remote_qkey;
165 struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
149 166
150 if (unlikely((send_wr->num_sge < 0) || 167 if (unlikely((send_wr->num_sge < 0) ||
151 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) { 168 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
@@ -158,11 +175,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
158 /* clear wqe header until sglist */ 175 /* clear wqe header until sglist */
159 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); 176 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
160 177
161 wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK; 178 wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
162 wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK;
163 179
164 qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK; 180 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
165 qp->sq_map[sq_map_idx].reported = 0; 181 qmap_entry->reported = 0;
166 182
167 switch (send_wr->opcode) { 183 switch (send_wr->opcode) {
168 case IB_WR_SEND: 184 case IB_WR_SEND:
@@ -496,7 +512,9 @@ static int internal_post_recv(struct ehca_qp *my_qp,
496 struct ehca_wqe *wqe_p; 512 struct ehca_wqe *wqe_p;
497 int wqe_cnt = 0; 513 int wqe_cnt = 0;
498 int ret = 0; 514 int ret = 0;
515 u32 rq_map_idx;
499 unsigned long flags; 516 unsigned long flags;
517 struct ehca_qmap_entry *qmap_entry;
500 518
501 if (unlikely(!HAS_RQ(my_qp))) { 519 if (unlikely(!HAS_RQ(my_qp))) {
502 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d", 520 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
@@ -524,8 +542,15 @@ static int internal_post_recv(struct ehca_qp *my_qp,
524 } 542 }
525 goto post_recv_exit0; 543 goto post_recv_exit0;
526 } 544 }
545 /*
546 * Get the index of the WQE in the recv queue. The same index
547 * is used for writing into the rq_map.
548 */
549 rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
550
527 /* write a RECV WQE into the QUEUE */ 551 /* write a RECV WQE into the QUEUE */
528 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr); 552 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr,
553 rq_map_idx);
529 /* 554 /*
530 * if something failed, 555 * if something failed,
531 * reset the free entry pointer to the start value 556 * reset the free entry pointer to the start value
@@ -540,6 +565,11 @@ static int internal_post_recv(struct ehca_qp *my_qp,
540 } 565 }
541 goto post_recv_exit0; 566 goto post_recv_exit0;
542 } 567 }
568
569 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
570 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
571 qmap_entry->reported = 0;
572
543 wqe_cnt++; 573 wqe_cnt++;
544 } /* eof for cur_recv_wr */ 574 } /* eof for cur_recv_wr */
545 575
@@ -596,10 +626,12 @@ static const u8 ib_wc_opcode[255] = {
596/* internal function to poll one entry of cq */ 626/* internal function to poll one entry of cq */
597static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) 627static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
598{ 628{
599 int ret = 0; 629 int ret = 0, qmap_tail_idx;
600 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 630 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
601 struct ehca_cqe *cqe; 631 struct ehca_cqe *cqe;
602 struct ehca_qp *my_qp; 632 struct ehca_qp *my_qp;
633 struct ehca_qmap_entry *qmap_entry;
634 struct ehca_queue_map *qmap;
603 int cqe_count = 0, is_error; 635 int cqe_count = 0, is_error;
604 636
605repoll: 637repoll:
@@ -674,27 +706,52 @@ repoll:
674 goto repoll; 706 goto repoll;
675 wc->qp = &my_qp->ib_qp; 707 wc->qp = &my_qp->ib_qp;
676 708
677 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) { 709 if (is_error) {
678 struct ehca_qmap_entry *qmap_entry;
679 /* 710 /*
680 * We got a send completion and need to restore the original 711 * set left_to_poll to 0 because in error state, we will not
681 * wr_id. 712 * get any additional CQEs
682 */ 713 */
683 qmap_entry = &my_qp->sq_map[cqe->work_request_id & 714 ehca_add_to_err_list(my_qp, 1);
684 QMAP_IDX_MASK]; 715 my_qp->sq_map.left_to_poll = 0;
685 716
686 if (qmap_entry->reported) { 717 if (HAS_RQ(my_qp))
687 ehca_warn(cq->device, "Double cqe on qp_num=%#x", 718 ehca_add_to_err_list(my_qp, 0);
688 my_qp->real_qp_num); 719 my_qp->rq_map.left_to_poll = 0;
689 /* found a double cqe, discard it and read next one */ 720 }
690 goto repoll; 721
691 } 722 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
692 wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK; 723 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
693 wc->wr_id |= qmap_entry->app_wr_id; 724 /* We got a send completion. */
694 qmap_entry->reported = 1; 725 qmap = &my_qp->sq_map;
695 } else 726 else
696 /* We got a receive completion. */ 727 /* We got a receive completion. */
697 wc->wr_id = cqe->work_request_id; 728 qmap = &my_qp->rq_map;
729
730 qmap_entry = &qmap->map[qmap_tail_idx];
731 if (qmap_entry->reported) {
732 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
733 my_qp->real_qp_num);
734 /* found a double cqe, discard it and read next one */
735 goto repoll;
736 }
737
738 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
739 qmap_entry->reported = 1;
740
741 /* this is a proper completion, we need to advance the tail pointer */
742 if (++qmap->tail == qmap->entries)
743 qmap->tail = 0;
744
745 /* if left_to_poll is decremented to 0, add the QP to the error list */
746 if (qmap->left_to_poll > 0) {
747 qmap->left_to_poll--;
748 if ((my_qp->sq_map.left_to_poll == 0) &&
749 (my_qp->rq_map.left_to_poll == 0)) {
750 ehca_add_to_err_list(my_qp, 1);
751 if (HAS_RQ(my_qp))
752 ehca_add_to_err_list(my_qp, 0);
753 }
754 }
698 755
699 /* eval ib_wc_opcode */ 756 /* eval ib_wc_opcode */
700 wc->opcode = ib_wc_opcode[cqe->optype]-1; 757 wc->opcode = ib_wc_opcode[cqe->optype]-1;
@@ -733,13 +790,88 @@ poll_cq_one_exit0:
733 return ret; 790 return ret;
734} 791}
735 792
793static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
794 struct ib_wc *wc, int num_entries,
795 struct ipz_queue *ipz_queue, int on_sq)
796{
797 int nr = 0;
798 struct ehca_wqe *wqe;
799 u64 offset;
800 struct ehca_queue_map *qmap;
801 struct ehca_qmap_entry *qmap_entry;
802
803 if (on_sq)
804 qmap = &my_qp->sq_map;
805 else
806 qmap = &my_qp->rq_map;
807
808 qmap_entry = &qmap->map[qmap->tail];
809
810 while ((nr < num_entries) && (qmap_entry->reported == 0)) {
811 /* generate flush CQE */
812 memset(wc, 0, sizeof(*wc));
813
814 offset = qmap->tail * ipz_queue->qe_size;
815 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
816 if (!wqe) {
817 ehca_err(cq->device, "Invalid wqe offset=%#lx on "
818 "qp_num=%#x", offset, my_qp->real_qp_num);
819 return nr;
820 }
821
822 wc->wr_id = replace_wr_id(wqe->work_request_id,
823 qmap_entry->app_wr_id);
824
825 if (on_sq) {
826 switch (wqe->optype) {
827 case WQE_OPTYPE_SEND:
828 wc->opcode = IB_WC_SEND;
829 break;
830 case WQE_OPTYPE_RDMAWRITE:
831 wc->opcode = IB_WC_RDMA_WRITE;
832 break;
833 case WQE_OPTYPE_RDMAREAD:
834 wc->opcode = IB_WC_RDMA_READ;
835 break;
836 default:
837 ehca_err(cq->device, "Invalid optype=%x",
838 wqe->optype);
839 return nr;
840 }
841 } else
842 wc->opcode = IB_WC_RECV;
843
844 if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
845 wc->ex.imm_data = wqe->immediate_data;
846 wc->wc_flags |= IB_WC_WITH_IMM;
847 }
848
849 wc->status = IB_WC_WR_FLUSH_ERR;
850
851 wc->qp = &my_qp->ib_qp;
852
853 /* mark as reported and advance tail pointer */
854 qmap_entry->reported = 1;
855 if (++qmap->tail == qmap->entries)
856 qmap->tail = 0;
857 qmap_entry = &qmap->map[qmap->tail];
858
859 wc++; nr++;
860 }
861
862 return nr;
863
864}
865
736int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) 866int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
737{ 867{
738 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 868 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
739 int nr; 869 int nr;
870 struct ehca_qp *err_qp;
740 struct ib_wc *current_wc = wc; 871 struct ib_wc *current_wc = wc;
741 int ret = 0; 872 int ret = 0;
742 unsigned long flags; 873 unsigned long flags;
874 int entries_left = num_entries;
743 875
744 if (num_entries < 1) { 876 if (num_entries < 1) {
745 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p " 877 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
@@ -749,15 +881,40 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
749 } 881 }
750 882
751 spin_lock_irqsave(&my_cq->spinlock, flags); 883 spin_lock_irqsave(&my_cq->spinlock, flags);
752 for (nr = 0; nr < num_entries; nr++) { 884
885 /* generate flush cqes for send queues */
886 list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
887 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
888 &err_qp->ipz_squeue, 1);
889 entries_left -= nr;
890 current_wc += nr;
891
892 if (entries_left == 0)
893 break;
894 }
895
896 /* generate flush cqes for receive queues */
897 list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
898 nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
899 &err_qp->ipz_rqueue, 0);
900 entries_left -= nr;
901 current_wc += nr;
902
903 if (entries_left == 0)
904 break;
905 }
906
907 for (nr = 0; nr < entries_left; nr++) {
753 ret = ehca_poll_cq_one(cq, current_wc); 908 ret = ehca_poll_cq_one(cq, current_wc);
754 if (ret) 909 if (ret)
755 break; 910 break;
756 current_wc++; 911 current_wc++;
757 } /* eof for nr */ 912 } /* eof for nr */
913 entries_left -= nr;
914
758 spin_unlock_irqrestore(&my_cq->spinlock, flags); 915 spin_unlock_irqrestore(&my_cq->spinlock, flags);
759 if (ret == -EAGAIN || !ret) 916 if (ret == -EAGAIN || !ret)
760 ret = nr; 917 ret = num_entries - entries_left;
761 918
762poll_cq_exit0: 919poll_cq_exit0:
763 return ret; 920 return ret;
diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
index 97710522624d..7b93cda1a4bd 100644
--- a/drivers/infiniband/hw/ipath/ipath_rc.c
+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
@@ -675,7 +675,8 @@ static void send_rc_ack(struct ipath_qp *qp)
675 hdr.lrh[0] = cpu_to_be16(lrh0); 675 hdr.lrh[0] = cpu_to_be16(lrh0);
676 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 676 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
677 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); 677 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
678 hdr.lrh[3] = cpu_to_be16(dd->ipath_lid); 678 hdr.lrh[3] = cpu_to_be16(dd->ipath_lid |
679 qp->remote_ah_attr.src_path_bits);
679 ohdr->bth[0] = cpu_to_be32(bth0); 680 ohdr->bth[0] = cpu_to_be32(bth0);
680 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 681 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
681 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK); 682 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index af051f757663..fc0f6d9e6030 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -618,7 +618,8 @@ void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
618 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); 618 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
619 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 619 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
620 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); 620 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
621 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); 621 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
622 qp->remote_ah_attr.src_path_bits);
622 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index); 623 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
623 bth0 |= extra_bytes << 20; 624 bth0 |= extra_bytes << 20;
624 ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22)); 625 ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index b766e40e9ebf..eabc4247860b 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -340,9 +340,16 @@ static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
340 int acc; 340 int acc;
341 int ret; 341 int ret;
342 unsigned long flags; 342 unsigned long flags;
343 struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
343 344
344 spin_lock_irqsave(&qp->s_lock, flags); 345 spin_lock_irqsave(&qp->s_lock, flags);
345 346
347 if (qp->ibqp.qp_type != IB_QPT_SMI &&
348 !(dd->ipath_flags & IPATH_LINKACTIVE)) {
349 ret = -ENETDOWN;
350 goto bail;
351 }
352
346 /* Check that state is OK to post send. */ 353 /* Check that state is OK to post send. */
347 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) 354 if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
348 goto bail_inval; 355 goto bail_inval;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9559248f265b..baa01deb2436 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1058,6 +1058,9 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1058 else 1058 else
1059 sqd_event = 0; 1059 sqd_event = 0;
1060 1060
1061 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1062 context->rlkey |= (1 << 4);
1063
1061 /* 1064 /*
1062 * Before passing a kernel QP to the HW, make sure that the 1065 * Before passing a kernel QP to the HW, make sure that the
1063 * ownership bits of the send queue are set and the SQ 1066 * ownership bits of the send queue are set and the SQ
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index cc440f90000b..65ad359fdf16 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -149,18 +149,10 @@ void mthca_start_catas_poll(struct mthca_dev *dev)
149 ((pci_resource_len(dev->pdev, 0) - 1) & 149 ((pci_resource_len(dev->pdev, 0) - 1) &
150 dev->catas_err.addr); 150 dev->catas_err.addr);
151 151
152 if (!request_mem_region(addr, dev->catas_err.size * 4,
153 DRV_NAME)) {
154 mthca_warn(dev, "couldn't request catastrophic error region "
155 "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4);
156 return;
157 }
158
159 dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); 152 dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4);
160 if (!dev->catas_err.map) { 153 if (!dev->catas_err.map) {
161 mthca_warn(dev, "couldn't map catastrophic error region " 154 mthca_warn(dev, "couldn't map catastrophic error region "
162 "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4); 155 "at 0x%lx/0x%x\n", addr, dev->catas_err.size * 4);
163 release_mem_region(addr, dev->catas_err.size * 4);
164 return; 156 return;
165 } 157 }
166 158
@@ -175,13 +167,8 @@ void mthca_stop_catas_poll(struct mthca_dev *dev)
175{ 167{
176 del_timer_sync(&dev->catas_err.timer); 168 del_timer_sync(&dev->catas_err.timer);
177 169
178 if (dev->catas_err.map) { 170 if (dev->catas_err.map)
179 iounmap(dev->catas_err.map); 171 iounmap(dev->catas_err.map);
180 release_mem_region(pci_resource_start(dev->pdev, 0) +
181 ((pci_resource_len(dev->pdev, 0) - 1) &
182 dev->catas_err.addr),
183 dev->catas_err.size * 4);
184 }
185 172
186 spin_lock_irq(&catas_lock); 173 spin_lock_irq(&catas_lock);
187 list_del(&dev->catas_err.list); 174 list_del(&dev->catas_err.list);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index cc6858f0b65b..28f0e0c40d7d 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -652,27 +652,13 @@ static int mthca_map_reg(struct mthca_dev *dev,
652{ 652{
653 unsigned long base = pci_resource_start(dev->pdev, 0); 653 unsigned long base = pci_resource_start(dev->pdev, 0);
654 654
655 if (!request_mem_region(base + offset, size, DRV_NAME))
656 return -EBUSY;
657
658 *map = ioremap(base + offset, size); 655 *map = ioremap(base + offset, size);
659 if (!*map) { 656 if (!*map)
660 release_mem_region(base + offset, size);
661 return -ENOMEM; 657 return -ENOMEM;
662 }
663 658
664 return 0; 659 return 0;
665} 660}
666 661
667static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset,
668 unsigned long size, void __iomem *map)
669{
670 unsigned long base = pci_resource_start(dev->pdev, 0);
671
672 release_mem_region(base + offset, size);
673 iounmap(map);
674}
675
676static int mthca_map_eq_regs(struct mthca_dev *dev) 662static int mthca_map_eq_regs(struct mthca_dev *dev)
677{ 663{
678 if (mthca_is_memfree(dev)) { 664 if (mthca_is_memfree(dev)) {
@@ -699,9 +685,7 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
699 dev->fw.arbel.eq_arm_base) + 4, 4, 685 dev->fw.arbel.eq_arm_base) + 4, 4,
700 &dev->eq_regs.arbel.eq_arm)) { 686 &dev->eq_regs.arbel.eq_arm)) {
701 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n"); 687 mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
702 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & 688 iounmap(dev->clr_base);
703 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
704 dev->clr_base);
705 return -ENOMEM; 689 return -ENOMEM;
706 } 690 }
707 691
@@ -710,12 +694,8 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
710 MTHCA_EQ_SET_CI_SIZE, 694 MTHCA_EQ_SET_CI_SIZE,
711 &dev->eq_regs.arbel.eq_set_ci_base)) { 695 &dev->eq_regs.arbel.eq_set_ci_base)) {
712 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n"); 696 mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
713 mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) & 697 iounmap(dev->eq_regs.arbel.eq_arm);
714 dev->fw.arbel.eq_arm_base) + 4, 4, 698 iounmap(dev->clr_base);
715 dev->eq_regs.arbel.eq_arm);
716 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
717 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
718 dev->clr_base);
719 return -ENOMEM; 699 return -ENOMEM;
720 } 700 }
721 } else { 701 } else {
@@ -731,8 +711,7 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
731 &dev->eq_regs.tavor.ecr_base)) { 711 &dev->eq_regs.tavor.ecr_base)) {
732 mthca_err(dev, "Couldn't map ecr register, " 712 mthca_err(dev, "Couldn't map ecr register, "
733 "aborting.\n"); 713 "aborting.\n");
734 mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, 714 iounmap(dev->clr_base);
735 dev->clr_base);
736 return -ENOMEM; 715 return -ENOMEM;
737 } 716 }
738 } 717 }
@@ -744,22 +723,12 @@ static int mthca_map_eq_regs(struct mthca_dev *dev)
744static void mthca_unmap_eq_regs(struct mthca_dev *dev) 723static void mthca_unmap_eq_regs(struct mthca_dev *dev)
745{ 724{
746 if (mthca_is_memfree(dev)) { 725 if (mthca_is_memfree(dev)) {
747 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & 726 iounmap(dev->eq_regs.arbel.eq_set_ci_base);
748 dev->fw.arbel.eq_set_ci_base, 727 iounmap(dev->eq_regs.arbel.eq_arm);
749 MTHCA_EQ_SET_CI_SIZE, 728 iounmap(dev->clr_base);
750 dev->eq_regs.arbel.eq_set_ci_base);
751 mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
752 dev->fw.arbel.eq_arm_base) + 4, 4,
753 dev->eq_regs.arbel.eq_arm);
754 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
755 dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
756 dev->clr_base);
757 } else { 729 } else {
758 mthca_unmap_reg(dev, MTHCA_ECR_BASE, 730 iounmap(dev->eq_regs.tavor.ecr_base);
759 MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, 731 iounmap(dev->clr_base);
760 dev->eq_regs.tavor.ecr_base);
761 mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
762 dev->clr_base);
763 } 732 }
764} 733}
765 734
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index fb9f91b60f30..52f60f4eea00 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -921,58 +921,6 @@ err_uar_table_free:
921 return err; 921 return err;
922} 922}
923 923
924static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden)
925{
926 int err;
927
928 /*
929 * We can't just use pci_request_regions() because the MSI-X
930 * table is right in the middle of the first BAR. If we did
931 * pci_request_region and grab all of the first BAR, then
932 * setting up MSI-X would fail, since the PCI core wants to do
933 * request_mem_region on the MSI-X vector table.
934 *
935 * So just request what we need right now, and request any
936 * other regions we need when setting up EQs.
937 */
938 if (!request_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
939 MTHCA_HCR_SIZE, DRV_NAME))
940 return -EBUSY;
941
942 err = pci_request_region(pdev, 2, DRV_NAME);
943 if (err)
944 goto err_bar2_failed;
945
946 if (!ddr_hidden) {
947 err = pci_request_region(pdev, 4, DRV_NAME);
948 if (err)
949 goto err_bar4_failed;
950 }
951
952 return 0;
953
954err_bar4_failed:
955 pci_release_region(pdev, 2);
956
957err_bar2_failed:
958 release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
959 MTHCA_HCR_SIZE);
960
961 return err;
962}
963
964static void mthca_release_regions(struct pci_dev *pdev,
965 int ddr_hidden)
966{
967 if (!ddr_hidden)
968 pci_release_region(pdev, 4);
969
970 pci_release_region(pdev, 2);
971
972 release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
973 MTHCA_HCR_SIZE);
974}
975
976static int mthca_enable_msi_x(struct mthca_dev *mdev) 924static int mthca_enable_msi_x(struct mthca_dev *mdev)
977{ 925{
978 struct msix_entry entries[3]; 926 struct msix_entry entries[3];
@@ -1059,7 +1007,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
1059 if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM)) 1007 if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
1060 ddr_hidden = 1; 1008 ddr_hidden = 1;
1061 1009
1062 err = mthca_request_regions(pdev, ddr_hidden); 1010 err = pci_request_regions(pdev, DRV_NAME);
1063 if (err) { 1011 if (err) {
1064 dev_err(&pdev->dev, "Cannot obtain PCI resources, " 1012 dev_err(&pdev->dev, "Cannot obtain PCI resources, "
1065 "aborting.\n"); 1013 "aborting.\n");
@@ -1196,7 +1144,7 @@ err_free_dev:
1196 ib_dealloc_device(&mdev->ib_dev); 1144 ib_dealloc_device(&mdev->ib_dev);
1197 1145
1198err_free_res: 1146err_free_res:
1199 mthca_release_regions(pdev, ddr_hidden); 1147 pci_release_regions(pdev);
1200 1148
1201err_disable_pdev: 1149err_disable_pdev:
1202 pci_disable_device(pdev); 1150 pci_disable_device(pdev);
@@ -1240,8 +1188,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
1240 pci_disable_msix(pdev); 1188 pci_disable_msix(pdev);
1241 1189
1242 ib_dealloc_device(&mdev->ib_dev); 1190 ib_dealloc_device(&mdev->ib_dev);
1243 mthca_release_regions(pdev, mdev->mthca_flags & 1191 pci_release_regions(pdev);
1244 MTHCA_FLAG_DDR_HIDDEN);
1245 pci_disable_device(pdev); 1192 pci_disable_device(pdev);
1246 pci_set_drvdata(pdev, NULL); 1193 pci_set_drvdata(pdev, NULL);
1247 } 1194 }
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b0cab64e5e3d..a2b04d62b1a4 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -70,27 +70,31 @@ int interrupt_mod_interval = 0;
70 70
71/* Interoperability */ 71/* Interoperability */
72int mpa_version = 1; 72int mpa_version = 1;
73module_param(mpa_version, int, 0); 73module_param(mpa_version, int, 0644);
74MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)"); 74MODULE_PARM_DESC(mpa_version, "MPA version to be used int MPA Req/Resp (0 or 1)");
75 75
76/* Interoperability */ 76/* Interoperability */
77int disable_mpa_crc = 0; 77int disable_mpa_crc = 0;
78module_param(disable_mpa_crc, int, 0); 78module_param(disable_mpa_crc, int, 0644);
79MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC"); 79MODULE_PARM_DESC(disable_mpa_crc, "Disable checking of MPA CRC");
80 80
81unsigned int send_first = 0; 81unsigned int send_first = 0;
82module_param(send_first, int, 0); 82module_param(send_first, int, 0644);
83MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection"); 83MODULE_PARM_DESC(send_first, "Send RDMA Message First on Active Connection");
84 84
85 85
86unsigned int nes_drv_opt = 0; 86unsigned int nes_drv_opt = 0;
87module_param(nes_drv_opt, int, 0); 87module_param(nes_drv_opt, int, 0644);
88MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters"); 88MODULE_PARM_DESC(nes_drv_opt, "Driver option parameters");
89 89
90unsigned int nes_debug_level = 0; 90unsigned int nes_debug_level = 0;
91module_param_named(debug_level, nes_debug_level, uint, 0644); 91module_param_named(debug_level, nes_debug_level, uint, 0644);
92MODULE_PARM_DESC(debug_level, "Enable debug output level"); 92MODULE_PARM_DESC(debug_level, "Enable debug output level");
93 93
94unsigned int wqm_quanta = 0x10000;
95module_param(wqm_quanta, int, 0644);
96MODULE_PARM_DESC(wqm_quanta, "WQM quanta");
97
94LIST_HEAD(nes_adapter_list); 98LIST_HEAD(nes_adapter_list);
95static LIST_HEAD(nes_dev_list); 99static LIST_HEAD(nes_dev_list);
96 100
@@ -557,12 +561,32 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
557 goto bail5; 561 goto bail5;
558 } 562 }
559 nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval; 563 nesdev->nesadapter->et_rx_coalesce_usecs_irq = interrupt_mod_interval;
564 nesdev->nesadapter->wqm_quanta = wqm_quanta;
560 565
561 /* nesdev->base_doorbell_index = 566 /* nesdev->base_doorbell_index =
562 nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */ 567 nesdev->nesadapter->pd_config_base[PCI_FUNC(nesdev->pcidev->devfn)]; */
563 nesdev->base_doorbell_index = 1; 568 nesdev->base_doorbell_index = 1;
564 nesdev->doorbell_start = nesdev->nesadapter->doorbell_start; 569 nesdev->doorbell_start = nesdev->nesadapter->doorbell_start;
565 nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) % nesdev->nesadapter->port_count; 570 if (nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
571 switch (PCI_FUNC(nesdev->pcidev->devfn) %
572 nesdev->nesadapter->port_count) {
573 case 1:
574 nesdev->mac_index = 2;
575 break;
576 case 2:
577 nesdev->mac_index = 1;
578 break;
579 case 3:
580 nesdev->mac_index = 3;
581 break;
582 case 0:
583 default:
584 nesdev->mac_index = 0;
585 }
586 } else {
587 nesdev->mac_index = PCI_FUNC(nesdev->pcidev->devfn) %
588 nesdev->nesadapter->port_count;
589 }
566 590
567 tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); 591 tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
568 592
@@ -581,7 +605,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
581 nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) | 605 nesdev->int_req = (0x101 << PCI_FUNC(nesdev->pcidev->devfn)) |
582 (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16)); 606 (1 << (PCI_FUNC(nesdev->pcidev->devfn)+16));
583 if (PCI_FUNC(nesdev->pcidev->devfn) < 4) { 607 if (PCI_FUNC(nesdev->pcidev->devfn) < 4) {
584 nesdev->int_req |= (1 << (PCI_FUNC(nesdev->pcidev->devfn)+24)); 608 nesdev->int_req |= (1 << (PCI_FUNC(nesdev->mac_index)+24));
585 } 609 }
586 610
587 /* TODO: This really should be the first driver to load, not function 0 */ 611 /* TODO: This really should be the first driver to load, not function 0 */
@@ -772,14 +796,14 @@ static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf)
772 796
773 list_for_each_entry(nesdev, &nes_dev_list, list) { 797 list_for_each_entry(nesdev, &nes_dev_list, list) {
774 if (i == ee_flsh_adapter) { 798 if (i == ee_flsh_adapter) {
775 devfn = nesdev->nesadapter->devfn; 799 devfn = nesdev->pcidev->devfn;
776 bus_number = nesdev->nesadapter->bus_number; 800 bus_number = nesdev->pcidev->bus->number;
777 break; 801 break;
778 } 802 }
779 i++; 803 i++;
780 } 804 }
781 805
782 return snprintf(buf, PAGE_SIZE, "%x:%x", bus_number, devfn); 806 return snprintf(buf, PAGE_SIZE, "%x:%x\n", bus_number, devfn);
783} 807}
784 808
785static ssize_t nes_store_adapter(struct device_driver *ddp, 809static ssize_t nes_store_adapter(struct device_driver *ddp,
@@ -1050,6 +1074,55 @@ static ssize_t nes_store_idx_data(struct device_driver *ddp,
1050 return strnlen(buf, count); 1074 return strnlen(buf, count);
1051} 1075}
1052 1076
1077
1078/**
1079 * nes_show_wqm_quanta
1080 */
1081static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf)
1082{
1083 u32 wqm_quanta_value = 0xdead;
1084 u32 i = 0;
1085 struct nes_device *nesdev;
1086
1087 list_for_each_entry(nesdev, &nes_dev_list, list) {
1088 if (i == ee_flsh_adapter) {
1089 wqm_quanta_value = nesdev->nesadapter->wqm_quanta;
1090 break;
1091 }
1092 i++;
1093 }
1094
1095 return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta);
1096}
1097
1098
1099/**
1100 * nes_store_wqm_quanta
1101 */
1102static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
1103 const char *buf, size_t count)
1104{
1105 unsigned long wqm_quanta_value;
1106 u32 wqm_config1;
1107 u32 i = 0;
1108 struct nes_device *nesdev;
1109
1110 strict_strtoul(buf, 0, &wqm_quanta_value);
1111 list_for_each_entry(nesdev, &nes_dev_list, list) {
1112 if (i == ee_flsh_adapter) {
1113 nesdev->nesadapter->wqm_quanta = wqm_quanta_value;
1114 wqm_config1 = nes_read_indexed(nesdev,
1115 NES_IDX_WQM_CONFIG1);
1116 nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1,
1117 ((wqm_quanta_value << 1) |
1118 (wqm_config1 & 0x00000001)));
1119 break;
1120 }
1121 i++;
1122 }
1123 return strnlen(buf, count);
1124}
1125
1053static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR, 1126static DRIVER_ATTR(adapter, S_IRUSR | S_IWUSR,
1054 nes_show_adapter, nes_store_adapter); 1127 nes_show_adapter, nes_store_adapter);
1055static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR, 1128static DRIVER_ATTR(eeprom_cmd, S_IRUSR | S_IWUSR,
@@ -1068,6 +1141,8 @@ static DRIVER_ATTR(idx_addr, S_IRUSR | S_IWUSR,
1068 nes_show_idx_addr, nes_store_idx_addr); 1141 nes_show_idx_addr, nes_store_idx_addr);
1069static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR, 1142static DRIVER_ATTR(idx_data, S_IRUSR | S_IWUSR,
1070 nes_show_idx_data, nes_store_idx_data); 1143 nes_show_idx_data, nes_store_idx_data);
1144static DRIVER_ATTR(wqm_quanta, S_IRUSR | S_IWUSR,
1145 nes_show_wqm_quanta, nes_store_wqm_quanta);
1071 1146
1072static int nes_create_driver_sysfs(struct pci_driver *drv) 1147static int nes_create_driver_sysfs(struct pci_driver *drv)
1073{ 1148{
@@ -1081,6 +1156,7 @@ static int nes_create_driver_sysfs(struct pci_driver *drv)
1081 error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data); 1156 error |= driver_create_file(&drv->driver, &driver_attr_nonidx_data);
1082 error |= driver_create_file(&drv->driver, &driver_attr_idx_addr); 1157 error |= driver_create_file(&drv->driver, &driver_attr_idx_addr);
1083 error |= driver_create_file(&drv->driver, &driver_attr_idx_data); 1158 error |= driver_create_file(&drv->driver, &driver_attr_idx_data);
1159 error |= driver_create_file(&drv->driver, &driver_attr_wqm_quanta);
1084 return error; 1160 return error;
1085} 1161}
1086 1162
@@ -1095,6 +1171,7 @@ static void nes_remove_driver_sysfs(struct pci_driver *drv)
1095 driver_remove_file(&drv->driver, &driver_attr_nonidx_data); 1171 driver_remove_file(&drv->driver, &driver_attr_nonidx_data);
1096 driver_remove_file(&drv->driver, &driver_attr_idx_addr); 1172 driver_remove_file(&drv->driver, &driver_attr_idx_addr);
1097 driver_remove_file(&drv->driver, &driver_attr_idx_data); 1173 driver_remove_file(&drv->driver, &driver_attr_idx_data);
1174 driver_remove_file(&drv->driver, &driver_attr_wqm_quanta);
1098} 1175}
1099 1176
1100/** 1177/**
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 8eb7ae96974d..1595dc7bba9d 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -169,7 +169,7 @@ extern int disable_mpa_crc;
169extern unsigned int send_first; 169extern unsigned int send_first;
170extern unsigned int nes_drv_opt; 170extern unsigned int nes_drv_opt;
171extern unsigned int nes_debug_level; 171extern unsigned int nes_debug_level;
172 172extern unsigned int wqm_quanta;
173extern struct list_head nes_adapter_list; 173extern struct list_head nes_adapter_list;
174 174
175extern atomic_t cm_connects; 175extern atomic_t cm_connects;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 499d3cf83e1f..2caf9da81ad5 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -52,7 +52,7 @@
52#include <linux/random.h> 52#include <linux/random.h>
53#include <linux/list.h> 53#include <linux/list.h>
54#include <linux/threads.h> 54#include <linux/threads.h>
55 55#include <net/arp.h>
56#include <net/neighbour.h> 56#include <net/neighbour.h>
57#include <net/route.h> 57#include <net/route.h>
58#include <net/ip_fib.h> 58#include <net/ip_fib.h>
@@ -1019,23 +1019,43 @@ static inline int mini_cm_accelerated(struct nes_cm_core *cm_core,
1019 1019
1020 1020
1021/** 1021/**
1022 * nes_addr_send_arp 1022 * nes_addr_resolve_neigh
1023 */ 1023 */
1024static void nes_addr_send_arp(u32 dst_ip) 1024static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip)
1025{ 1025{
1026 struct rtable *rt; 1026 struct rtable *rt;
1027 struct flowi fl; 1027 struct flowi fl;
1028 struct neighbour *neigh;
1029 int rc = -1;
1030 DECLARE_MAC_BUF(mac);
1028 1031
1029 memset(&fl, 0, sizeof fl); 1032 memset(&fl, 0, sizeof fl);
1030 fl.nl_u.ip4_u.daddr = htonl(dst_ip); 1033 fl.nl_u.ip4_u.daddr = htonl(dst_ip);
1031 if (ip_route_output_key(&init_net, &rt, &fl)) { 1034 if (ip_route_output_key(&init_net, &rt, &fl)) {
1032 printk("%s: ip_route_output_key failed for 0x%08X\n", 1035 printk("%s: ip_route_output_key failed for 0x%08X\n",
1033 __func__, dst_ip); 1036 __func__, dst_ip);
1034 return; 1037 return rc;
1038 }
1039
1040 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, nesvnic->netdev);
1041 if (neigh) {
1042 if (neigh->nud_state & NUD_VALID) {
1043 nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
1044 " is %s, Gateway is 0x%08X \n", dst_ip,
1045 print_mac(mac, neigh->ha), ntohl(rt->rt_gateway));
1046 nes_manage_arp_cache(nesvnic->netdev, neigh->ha,
1047 dst_ip, NES_ARP_ADD);
1048 rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
1049 NES_ARP_RESOLVE);
1050 }
1051 neigh_release(neigh);
1035 } 1052 }
1036 1053
1037 neigh_event_send(rt->u.dst.neighbour, NULL); 1054 if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
1055 neigh_event_send(rt->u.dst.neighbour, NULL);
1056
1038 ip_rt_put(rt); 1057 ip_rt_put(rt);
1058 return rc;
1039} 1059}
1040 1060
1041 1061
@@ -1108,9 +1128,11 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1108 /* get the mac addr for the remote node */ 1128 /* get the mac addr for the remote node */
1109 arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE); 1129 arpindex = nes_arp_table(nesdev, cm_node->rem_addr, NULL, NES_ARP_RESOLVE);
1110 if (arpindex < 0) { 1130 if (arpindex < 0) {
1111 kfree(cm_node); 1131 arpindex = nes_addr_resolve_neigh(nesvnic, cm_info->rem_addr);
1112 nes_addr_send_arp(cm_info->rem_addr); 1132 if (arpindex < 0) {
1113 return NULL; 1133 kfree(cm_node);
1134 return NULL;
1135 }
1114 } 1136 }
1115 1137
1116 /* copy the mac addr to node context */ 1138 /* copy the mac addr to node context */
@@ -1826,7 +1848,7 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
1826/** 1848/**
1827 * mini_cm_connect - make a connection node with params 1849 * mini_cm_connect - make a connection node with params
1828 */ 1850 */
1829struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, 1851static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
1830 struct nes_vnic *nesvnic, u16 private_data_len, 1852 struct nes_vnic *nesvnic, u16 private_data_len,
1831 void *private_data, struct nes_cm_info *cm_info) 1853 void *private_data, struct nes_cm_info *cm_info)
1832{ 1854{
@@ -2007,7 +2029,6 @@ static int mini_cm_close(struct nes_cm_core *cm_core, struct nes_cm_node *cm_nod
2007 ret = rem_ref_cm_node(cm_core, cm_node); 2029 ret = rem_ref_cm_node(cm_core, cm_node);
2008 break; 2030 break;
2009 } 2031 }
2010 cm_node->cm_id = NULL;
2011 return ret; 2032 return ret;
2012} 2033}
2013 2034
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 1513d4066f1b..7c49cc882d75 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -55,18 +55,19 @@ u32 int_mod_cq_depth_24;
55u32 int_mod_cq_depth_16; 55u32 int_mod_cq_depth_16;
56u32 int_mod_cq_depth_4; 56u32 int_mod_cq_depth_4;
57u32 int_mod_cq_depth_1; 57u32 int_mod_cq_depth_1;
58 58static const u8 nes_max_critical_error_count = 100;
59#include "nes_cm.h" 59#include "nes_cm.h"
60 60
61static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq); 61static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq);
62static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count); 62static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_count);
63static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, 63static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
64 u8 OneG_Mode); 64 struct nes_adapter *nesadapter, u8 OneG_Mode);
65static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq); 65static void nes_nic_napi_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq);
66static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq); 66static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq);
67static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq); 67static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq);
68static void nes_process_iwarp_aeqe(struct nes_device *nesdev, 68static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
69 struct nes_hw_aeqe *aeqe); 69 struct nes_hw_aeqe *aeqe);
70static void process_critical_error(struct nes_device *nesdev);
70static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number); 71static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
71static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode); 72static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
72 73
@@ -222,11 +223,10 @@ static void nes_nic_tune_timer(struct nes_device *nesdev)
222 } 223 }
223 224
224 /* boundary checking */ 225 /* boundary checking */
225 if (shared_timer->timer_in_use > NES_NIC_FAST_TIMER_HIGH) 226 if (shared_timer->timer_in_use > shared_timer->threshold_high)
226 shared_timer->timer_in_use = NES_NIC_FAST_TIMER_HIGH; 227 shared_timer->timer_in_use = shared_timer->threshold_high;
227 else if (shared_timer->timer_in_use < NES_NIC_FAST_TIMER_LOW) { 228 else if (shared_timer->timer_in_use < shared_timer->threshold_low)
228 shared_timer->timer_in_use = NES_NIC_FAST_TIMER_LOW; 229 shared_timer->timer_in_use = shared_timer->threshold_low;
229 }
230 230
231 nesdev->currcq_count = 0; 231 nesdev->currcq_count = 0;
232 232
@@ -292,9 +292,6 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
292 292
293 if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0) 293 if ((port_count = nes_reset_adapter_ne020(nesdev, &OneG_Mode)) == 0)
294 return NULL; 294 return NULL;
295 if (nes_init_serdes(nesdev, hw_rev, port_count, OneG_Mode))
296 return NULL;
297 nes_init_csr_ne020(nesdev, hw_rev, port_count);
298 295
299 max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE); 296 max_qp = nes_read_indexed(nesdev, NES_IDX_QP_CTX_SIZE);
300 nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp); 297 nes_debug(NES_DBG_INIT, "QP_CTX_SIZE=%u\n", max_qp);
@@ -353,6 +350,22 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
353 nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n", 350 nes_debug(NES_DBG_INIT, "Allocating new nesadapter @ %p, size = %u (actual size = %u).\n",
354 nesadapter, (u32)sizeof(struct nes_adapter), adapter_size); 351 nesadapter, (u32)sizeof(struct nes_adapter), adapter_size);
355 352
353 if (nes_read_eeprom_values(nesdev, nesadapter)) {
354 printk(KERN_ERR PFX "Unable to read EEPROM data.\n");
355 kfree(nesadapter);
356 return NULL;
357 }
358
359 if (nes_init_serdes(nesdev, hw_rev, port_count, nesadapter,
360 OneG_Mode)) {
361 kfree(nesadapter);
362 return NULL;
363 }
364 nes_init_csr_ne020(nesdev, hw_rev, port_count);
365
366 memset(nesadapter->pft_mcast_map, 255,
367 sizeof nesadapter->pft_mcast_map);
368
356 /* populate the new nesadapter */ 369 /* populate the new nesadapter */
357 nesadapter->devfn = nesdev->pcidev->devfn; 370 nesadapter->devfn = nesdev->pcidev->devfn;
358 nesadapter->bus_number = nesdev->pcidev->bus->number; 371 nesadapter->bus_number = nesdev->pcidev->bus->number;
@@ -468,20 +481,25 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
468 481
469 /* setup port configuration */ 482 /* setup port configuration */
470 if (nesadapter->port_count == 1) { 483 if (nesadapter->port_count == 1) {
471 u32temp = 0x00000000; 484 nesadapter->log_port = 0x00000000;
472 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) 485 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT)
473 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002); 486 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000002);
474 else 487 else
475 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003); 488 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
476 } else { 489 } else {
477 if (nesadapter->port_count == 2) 490 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
478 u32temp = 0x00000044; 491 nesadapter->log_port = 0x000000D8;
479 else 492 } else {
480 u32temp = 0x000000e4; 493 if (nesadapter->port_count == 2)
494 nesadapter->log_port = 0x00000044;
495 else
496 nesadapter->log_port = 0x000000e4;
497 }
481 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003); 498 nes_write_indexed(nesdev, NES_IDX_TX_POOL_SIZE, 0x00000003);
482 } 499 }
483 500
484 nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT, u32temp); 501 nes_write_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT,
502 nesadapter->log_port);
485 nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n", 503 nes_debug(NES_DBG_INIT, "Probe time, LOG2PHY=%u\n",
486 nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT)); 504 nes_read_indexed(nesdev, NES_IDX_NIC_LOGPORT_TO_PHYPORT));
487 505
@@ -706,23 +724,43 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
706 * nes_init_serdes 724 * nes_init_serdes
707 */ 725 */
708static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, 726static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
709 u8 OneG_Mode) 727 struct nes_adapter *nesadapter, u8 OneG_Mode)
710{ 728{
711 int i; 729 int i;
712 u32 u32temp; 730 u32 u32temp;
731 u32 serdes_common_control;
713 732
714 if (hw_rev != NE020_REV) { 733 if (hw_rev != NE020_REV) {
715 /* init serdes 0 */ 734 /* init serdes 0 */
716 735
717 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); 736 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
718 if (!OneG_Mode) 737 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
738 serdes_common_control = nes_read_indexed(nesdev,
739 NES_IDX_ETH_SERDES_COMMON_CONTROL0);
740 serdes_common_control |= 0x000000100;
741 nes_write_indexed(nesdev,
742 NES_IDX_ETH_SERDES_COMMON_CONTROL0,
743 serdes_common_control);
744 } else if (!OneG_Mode) {
719 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); 745 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
720 if (port_count > 1) { 746 }
747 if (((port_count > 1) &&
748 (nesadapter->phy_type[0] != NES_PHY_TYPE_PUMA_1G)) ||
749 ((port_count > 2) &&
750 (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G))) {
721 /* init serdes 1 */ 751 /* init serdes 1 */
722 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF); 752 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF);
723 if (!OneG_Mode) 753 if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
754 serdes_common_control = nes_read_indexed(nesdev,
755 NES_IDX_ETH_SERDES_COMMON_CONTROL1);
756 serdes_common_control |= 0x000000100;
757 nes_write_indexed(nesdev,
758 NES_IDX_ETH_SERDES_COMMON_CONTROL1,
759 serdes_common_control);
760 } else if (!OneG_Mode) {
724 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000); 761 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000);
725 } 762 }
763 }
726 } else { 764 } else {
727 /* init serdes 0 */ 765 /* init serdes 0 */
728 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); 766 nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008);
@@ -826,7 +864,8 @@ static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_cou
826 864
827 nes_write_indexed(nesdev, 0x00005000, 0x00018000); 865 nes_write_indexed(nesdev, 0x00005000, 0x00018000);
828 /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */ 866 /* nes_write_indexed(nesdev, 0x00005000, 0x00010000); */
829 nes_write_indexed(nesdev, 0x00005004, 0x00020001); 867 nes_write_indexed(nesdev, NES_IDX_WQM_CONFIG1, (wqm_quanta << 1) |
868 0x00000001);
830 nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F); 869 nes_write_indexed(nesdev, 0x00005008, 0x1F1F1F1F);
831 nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F); 870 nes_write_indexed(nesdev, 0x00005010, 0x1F1F1F1F);
832 nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F); 871 nes_write_indexed(nesdev, 0x00005018, 0x1F1F1F1F);
@@ -1226,6 +1265,7 @@ int nes_init_phy(struct nes_device *nesdev)
1226 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) { 1265 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
1227 printk(PFX "%s: Programming mdc config for 1G\n", __func__); 1266 printk(PFX "%s: Programming mdc config for 1G\n", __func__);
1228 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1267 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1268 tx_config &= 0xFFFFFFE3;
1229 tx_config |= 0x04; 1269 tx_config |= 0x04;
1230 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); 1270 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1231 } 1271 }
@@ -1291,7 +1331,8 @@ int nes_init_phy(struct nes_device *nesdev)
1291 (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) { 1331 (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
1292 /* setup 10G MDIO operation */ 1332 /* setup 10G MDIO operation */
1293 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); 1333 tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
1294 tx_config |= 0x14; 1334 tx_config &= 0xFFFFFFE3;
1335 tx_config |= 0x15;
1295 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); 1336 nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
1296 } 1337 }
1297 if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) { 1338 if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
@@ -1315,7 +1356,7 @@ int nes_init_phy(struct nes_device *nesdev)
1315 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008); 1356 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008);
1316 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098); 1357 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098);
1317 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00); 1358 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00);
1318 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0000); 1359 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0001);
1319 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528); 1360 nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528);
1320 1361
1321 /* 1362 /*
@@ -1759,9 +1800,14 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
1759 */ 1800 */
1760void nes_destroy_nic_qp(struct nes_vnic *nesvnic) 1801void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
1761{ 1802{
1803 u64 u64temp;
1804 dma_addr_t bus_address;
1762 struct nes_device *nesdev = nesvnic->nesdev; 1805 struct nes_device *nesdev = nesvnic->nesdev;
1763 struct nes_hw_cqp_wqe *cqp_wqe; 1806 struct nes_hw_cqp_wqe *cqp_wqe;
1807 struct nes_hw_nic_sq_wqe *nic_sqe;
1764 struct nes_hw_nic_rq_wqe *nic_rqe; 1808 struct nes_hw_nic_rq_wqe *nic_rqe;
1809 __le16 *wqe_fragment_length;
1810 u16 wqe_fragment_index;
1765 u64 wqe_frag; 1811 u64 wqe_frag;
1766 u32 cqp_head; 1812 u32 cqp_head;
1767 unsigned long flags; 1813 unsigned long flags;
@@ -1770,14 +1816,69 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
1770 /* Free remaining NIC receive buffers */ 1816 /* Free remaining NIC receive buffers */
1771 while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) { 1817 while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
1772 nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail]; 1818 nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
1773 wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]); 1819 wqe_frag = (u64)le32_to_cpu(
1774 wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32; 1820 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
1821 wqe_frag |= ((u64)le32_to_cpu(
1822 nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX]))<<32;
1775 pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag, 1823 pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag,
1776 nesvnic->max_frame_size, PCI_DMA_FROMDEVICE); 1824 nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
1777 dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]); 1825 dev_kfree_skb(nesvnic->nic.rx_skb[nesvnic->nic.rq_tail++]);
1778 nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1); 1826 nesvnic->nic.rq_tail &= (nesvnic->nic.rq_size - 1);
1779 } 1827 }
1780 1828
1829 /* Free remaining NIC transmit buffers */
1830 while (nesvnic->nic.sq_head != nesvnic->nic.sq_tail) {
1831 nic_sqe = &nesvnic->nic.sq_vbase[nesvnic->nic.sq_tail];
1832 wqe_fragment_index = 1;
1833 wqe_fragment_length = (__le16 *)
1834 &nic_sqe->wqe_words[NES_NIC_SQ_WQE_LENGTH_0_TAG_IDX];
1835 /* bump past the vlan tag */
1836 wqe_fragment_length++;
1837 if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
1838 u64temp = (u64)le32_to_cpu(
1839 nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+
1840 wqe_fragment_index*2]);
1841 u64temp += ((u64)le32_to_cpu(
1842 nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX
1843 + wqe_fragment_index*2]))<<32;
1844 bus_address = (dma_addr_t)u64temp;
1845 if (test_and_clear_bit(nesvnic->nic.sq_tail,
1846 nesvnic->nic.first_frag_overflow)) {
1847 pci_unmap_single(nesdev->pcidev,
1848 bus_address,
1849 le16_to_cpu(wqe_fragment_length[
1850 wqe_fragment_index++]),
1851 PCI_DMA_TODEVICE);
1852 }
1853 for (; wqe_fragment_index < 5; wqe_fragment_index++) {
1854 if (wqe_fragment_length[wqe_fragment_index]) {
1855 u64temp = le32_to_cpu(
1856 nic_sqe->wqe_words[
1857 NES_NIC_SQ_WQE_FRAG0_LOW_IDX+
1858 wqe_fragment_index*2]);
1859 u64temp += ((u64)le32_to_cpu(
1860 nic_sqe->wqe_words[
1861 NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+
1862 wqe_fragment_index*2]))<<32;
1863 bus_address = (dma_addr_t)u64temp;
1864 pci_unmap_page(nesdev->pcidev,
1865 bus_address,
1866 le16_to_cpu(
1867 wqe_fragment_length[
1868 wqe_fragment_index]),
1869 PCI_DMA_TODEVICE);
1870 } else
1871 break;
1872 }
1873 }
1874 if (nesvnic->nic.tx_skb[nesvnic->nic.sq_tail])
1875 dev_kfree_skb(
1876 nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]);
1877
1878 nesvnic->nic.sq_tail = (++nesvnic->nic.sq_tail)
1879 & (nesvnic->nic.sq_size - 1);
1880 }
1881
1781 spin_lock_irqsave(&nesdev->cqp.lock, flags); 1882 spin_lock_irqsave(&nesdev->cqp.lock, flags);
1782 1883
1783 /* Destroy NIC QP */ 1884 /* Destroy NIC QP */
@@ -1894,7 +1995,30 @@ int nes_napi_isr(struct nes_device *nesdev)
1894 } 1995 }
1895} 1996}
1896 1997
1897 1998static void process_critical_error(struct nes_device *nesdev)
1999{
2000 u32 debug_error;
2001 u32 nes_idx_debug_error_masks0 = 0;
2002 u16 error_module = 0;
2003
2004 debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS);
2005 printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
2006 (u16)debug_error);
2007 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
2008 0x01010000 | (debug_error & 0x0000ffff));
2009 if (crit_err_count++ > 10)
2010 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
2011 error_module = (u16) (debug_error & 0x1F00) >> 8;
2012 if (++nesdev->nesadapter->crit_error_count[error_module-1] >=
2013 nes_max_critical_error_count) {
2014 printk(KERN_ERR PFX "Masking off critical error for module "
2015 "0x%02X\n", (u16)error_module);
2016 nes_idx_debug_error_masks0 = nes_read_indexed(nesdev,
2017 NES_IDX_DEBUG_ERROR_MASKS0);
2018 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS0,
2019 nes_idx_debug_error_masks0 | (1 << error_module));
2020 }
2021}
1898/** 2022/**
1899 * nes_dpc 2023 * nes_dpc
1900 */ 2024 */
@@ -1909,7 +2033,6 @@ void nes_dpc(unsigned long param)
1909 u32 timer_stat; 2033 u32 timer_stat;
1910 u32 temp_int_stat; 2034 u32 temp_int_stat;
1911 u32 intf_int_stat; 2035 u32 intf_int_stat;
1912 u32 debug_error;
1913 u32 processed_intf_int = 0; 2036 u32 processed_intf_int = 0;
1914 u16 processed_timer_int = 0; 2037 u16 processed_timer_int = 0;
1915 u16 completion_ints = 0; 2038 u16 completion_ints = 0;
@@ -1987,14 +2110,7 @@ void nes_dpc(unsigned long param)
1987 intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT); 2110 intf_int_stat = nes_read32(nesdev->regs+NES_INTF_INT_STAT);
1988 intf_int_stat &= nesdev->intf_int_req; 2111 intf_int_stat &= nesdev->intf_int_req;
1989 if (NES_INTF_INT_CRITERR & intf_int_stat) { 2112 if (NES_INTF_INT_CRITERR & intf_int_stat) {
1990 debug_error = nes_read_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS); 2113 process_critical_error(nesdev);
1991 printk(KERN_ERR PFX "Critical Error reported by device!!! 0x%02X\n",
1992 (u16)debug_error);
1993 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_CONTROL_STATUS,
1994 0x01010000 | (debug_error & 0x0000ffff));
1995 /* BUG(); */
1996 if (crit_err_count++ > 10)
1997 nes_write_indexed(nesdev, NES_IDX_DEBUG_ERROR_MASKS1, 1 << 0x17);
1998 } 2114 }
1999 if (NES_INTF_INT_PCIERR & intf_int_stat) { 2115 if (NES_INTF_INT_PCIERR & intf_int_stat) {
2000 printk(KERN_ERR PFX "PCI Error reported by device!!!\n"); 2116 printk(KERN_ERR PFX "PCI Error reported by device!!!\n");
@@ -2258,7 +2374,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2258 spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 2374 spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
2259 } 2375 }
2260 /* read the PHY interrupt status register */ 2376 /* read the PHY interrupt status register */
2261 if (nesadapter->OneG_Mode) { 2377 if ((nesadapter->OneG_Mode) &&
2378 (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
2262 do { 2379 do {
2263 nes_read_1G_phy_reg(nesdev, 0x1a, 2380 nes_read_1G_phy_reg(nesdev, 0x1a,
2264 nesadapter->phy_index[mac_index], &phy_data); 2381 nesadapter->phy_index[mac_index], &phy_data);
@@ -3077,6 +3194,22 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
3077 nes_cm_disconn(nesqp); 3194 nes_cm_disconn(nesqp);
3078 break; 3195 break;
3079 /* TODO: additional AEs need to be here */ 3196 /* TODO: additional AEs need to be here */
3197 case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
3198 nesqp = *((struct nes_qp **)&context);
3199 spin_lock_irqsave(&nesqp->lock, flags);
3200 nesqp->hw_iwarp_state = iwarp_state;
3201 nesqp->hw_tcp_state = tcp_state;
3202 nesqp->last_aeq = async_event_id;
3203 spin_unlock_irqrestore(&nesqp->lock, flags);
3204 if (nesqp->ibqp.event_handler) {
3205 ibevent.device = nesqp->ibqp.device;
3206 ibevent.element.qp = &nesqp->ibqp;
3207 ibevent.event = IB_EVENT_QP_ACCESS_ERR;
3208 nesqp->ibqp.event_handler(&ibevent,
3209 nesqp->ibqp.qp_context);
3210 }
3211 nes_cm_disconn(nesqp);
3212 break;
3080 default: 3213 default:
3081 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n", 3214 nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
3082 async_event_id); 3215 async_event_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 7b81e0ae0076..610b9d859597 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -156,6 +156,7 @@ enum indexed_regs {
156 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004, 156 NES_IDX_ENDNODE0_NSTAT_TX_OCTETS_HI = 0x7004,
157 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008, 157 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_LO = 0x7008,
158 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c, 158 NES_IDX_ENDNODE0_NSTAT_TX_FRAMES_HI = 0x700c,
159 NES_IDX_WQM_CONFIG1 = 0x5004,
159 NES_IDX_CM_CONFIG = 0x5100, 160 NES_IDX_CM_CONFIG = 0x5100,
160 NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000, 161 NES_IDX_NIC_LOGPORT_TO_PHYPORT = 0x6000,
161 NES_IDX_NIC_PHYPORT_TO_USW = 0x6008, 162 NES_IDX_NIC_PHYPORT_TO_USW = 0x6008,
@@ -967,6 +968,7 @@ struct nes_arp_entry {
967#define DEFAULT_JUMBO_NES_QL_TARGET 40 968#define DEFAULT_JUMBO_NES_QL_TARGET 40
968#define DEFAULT_JUMBO_NES_QL_HIGH 128 969#define DEFAULT_JUMBO_NES_QL_HIGH 128
969#define NES_NIC_CQ_DOWNWARD_TREND 16 970#define NES_NIC_CQ_DOWNWARD_TREND 16
971#define NES_PFT_SIZE 48
970 972
971struct nes_hw_tune_timer { 973struct nes_hw_tune_timer {
972 /* u16 cq_count; */ 974 /* u16 cq_count; */
@@ -1079,6 +1081,7 @@ struct nes_adapter {
1079 u32 et_rx_max_coalesced_frames_high; 1081 u32 et_rx_max_coalesced_frames_high;
1080 u32 et_rate_sample_interval; 1082 u32 et_rate_sample_interval;
1081 u32 timer_int_limit; 1083 u32 timer_int_limit;
1084 u32 wqm_quanta;
1082 1085
1083 /* Adapter base MAC address */ 1086 /* Adapter base MAC address */
1084 u32 mac_addr_low; 1087 u32 mac_addr_low;
@@ -1094,12 +1097,14 @@ struct nes_adapter {
1094 u16 pd_config_base[4]; 1097 u16 pd_config_base[4];
1095 1098
1096 u16 link_interrupt_count[4]; 1099 u16 link_interrupt_count[4];
1100 u8 crit_error_count[32];
1097 1101
1098 /* the phy index for each port */ 1102 /* the phy index for each port */
1099 u8 phy_index[4]; 1103 u8 phy_index[4];
1100 u8 mac_sw_state[4]; 1104 u8 mac_sw_state[4];
1101 u8 mac_link_down[4]; 1105 u8 mac_link_down[4];
1102 u8 phy_type[4]; 1106 u8 phy_type[4];
1107 u8 log_port;
1103 1108
1104 /* PCI information */ 1109 /* PCI information */
1105 unsigned int devfn; 1110 unsigned int devfn;
@@ -1113,6 +1118,7 @@ struct nes_adapter {
1113 u8 virtwq; 1118 u8 virtwq;
1114 u8 et_use_adaptive_rx_coalesce; 1119 u8 et_use_adaptive_rx_coalesce;
1115 u8 adapter_fcn_count; 1120 u8 adapter_fcn_count;
1121 u8 pft_mcast_map[NES_PFT_SIZE];
1116}; 1122};
1117 1123
1118struct nes_pbl { 1124struct nes_pbl {
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 1b0938c87774..730358637bb6 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -91,6 +91,7 @@ static struct nic_qp_map *nic_qp_mapping_per_function[] = {
91static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 91static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
92 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 92 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
93static int debug = -1; 93static int debug = -1;
94static int nics_per_function = 1;
94 95
95/** 96/**
96 * nes_netdev_poll 97 * nes_netdev_poll
@@ -201,7 +202,8 @@ static int nes_netdev_open(struct net_device *netdev)
201 nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW" 202 nes_debug(NES_DBG_NETDEV, "i=%d, perfect filter table index= %d, PERF FILTER LOW"
202 " (Addr:%08X) = %08X, HIGH = %08X.\n", 203 " (Addr:%08X) = %08X, HIGH = %08X.\n",
203 i, nesvnic->qp_nic_index[i], 204 i, nesvnic->qp_nic_index[i],
204 NES_IDX_PERFECT_FILTER_LOW+((nesvnic->perfect_filter_index + i) * 8), 205 NES_IDX_PERFECT_FILTER_LOW+
206 (nesvnic->qp_nic_index[i] * 8),
205 macaddr_low, 207 macaddr_low,
206 (u32)macaddr_high | NES_MAC_ADDR_VALID | 208 (u32)macaddr_high | NES_MAC_ADDR_VALID |
207 ((((u32)nesvnic->nic_index) << 16))); 209 ((((u32)nesvnic->nic_index) << 16)));
@@ -272,14 +274,18 @@ static int nes_netdev_stop(struct net_device *netdev)
272 break; 274 break;
273 } 275 }
274 276
275 if (first_nesvnic->netdev_open == 0) 277 if ((first_nesvnic->netdev_open == 1) && (first_nesvnic != nesvnic) &&
278 (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) !=
279 PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
280 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+
281 (0x200*nesdev->mac_index), 0xffffffff);
282 nes_write_indexed(first_nesvnic->nesdev,
283 NES_IDX_MAC_INT_MASK+
284 (0x200*first_nesvnic->nesdev->mac_index),
285 ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
286 NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
287 } else {
276 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff); 288 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK+(0x200*nesdev->mac_index), 0xffffffff);
277 else if ((first_nesvnic != nesvnic) &&
278 (PCI_FUNC(first_nesvnic->nesdev->pcidev->devfn) != PCI_FUNC(nesvnic->nesdev->pcidev->devfn))) {
279 nes_write_indexed(nesdev, NES_IDX_MAC_INT_MASK + (0x200 * nesdev->mac_index), 0xffffffff);
280 nes_write_indexed(first_nesvnic->nesdev, NES_IDX_MAC_INT_MASK + (0x200 * first_nesvnic->nesdev->mac_index),
281 ~(NES_MAC_INT_LINK_STAT_CHG | NES_MAC_INT_XGMII_EXT |
282 NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR));
283 } 289 }
284 290
285 nic_active_mask = ~((u32)(1 << nesvnic->nic_index)); 291 nic_active_mask = ~((u32)(1 << nesvnic->nic_index));
@@ -437,7 +443,7 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
437 struct nes_hw_nic_sq_wqe *nic_sqe; 443 struct nes_hw_nic_sq_wqe *nic_sqe;
438 struct tcphdr *tcph; 444 struct tcphdr *tcph;
439 /* struct udphdr *udph; */ 445 /* struct udphdr *udph; */
440#define NES_MAX_TSO_FRAGS 18 446#define NES_MAX_TSO_FRAGS MAX_SKB_FRAGS
441 /* 64K segment plus overflow on each side */ 447 /* 64K segment plus overflow on each side */
442 dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS]; 448 dma_addr_t tso_bus_address[NES_MAX_TSO_FRAGS];
443 dma_addr_t bus_address; 449 dma_addr_t bus_address;
@@ -605,6 +611,8 @@ tso_sq_no_longer_full:
605 wqe_fragment_length[wqe_fragment_index] = 0; 611 wqe_fragment_length[wqe_fragment_index] = 0;
606 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, 612 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
607 bus_address); 613 bus_address);
614 tso_wqe_length += skb_headlen(skb) -
615 original_first_length;
608 } 616 }
609 while (wqe_fragment_index < 5) { 617 while (wqe_fragment_index < 5) {
610 wqe_fragment_length[wqe_fragment_index] = 618 wqe_fragment_length[wqe_fragment_index] =
@@ -827,6 +835,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
827{ 835{
828 struct nes_vnic *nesvnic = netdev_priv(netdev); 836 struct nes_vnic *nesvnic = netdev_priv(netdev);
829 struct nes_device *nesdev = nesvnic->nesdev; 837 struct nes_device *nesdev = nesvnic->nesdev;
838 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
830 struct dev_mc_list *multicast_addr; 839 struct dev_mc_list *multicast_addr;
831 u32 nic_active_bit; 840 u32 nic_active_bit;
832 u32 nic_active; 841 u32 nic_active;
@@ -836,7 +845,12 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
836 u8 mc_all_on = 0; 845 u8 mc_all_on = 0;
837 u8 mc_index; 846 u8 mc_index;
838 int mc_nic_index = -1; 847 int mc_nic_index = -1;
848 u8 pft_entries_preallocated = max(nesadapter->adapter_fcn_count *
849 nics_per_function, 4);
850 u8 max_pft_entries_avaiable = NES_PFT_SIZE - pft_entries_preallocated;
851 unsigned long flags;
839 852
853 spin_lock_irqsave(&nesadapter->resource_lock, flags);
840 nic_active_bit = 1 << nesvnic->nic_index; 854 nic_active_bit = 1 << nesvnic->nic_index;
841 855
842 if (netdev->flags & IFF_PROMISC) { 856 if (netdev->flags & IFF_PROMISC) {
@@ -847,7 +861,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
847 nic_active |= nic_active_bit; 861 nic_active |= nic_active_bit;
848 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active); 862 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
849 mc_all_on = 1; 863 mc_all_on = 1;
850 } else if ((netdev->flags & IFF_ALLMULTI) || (netdev->mc_count > NES_MULTICAST_PF_MAX) || 864 } else if ((netdev->flags & IFF_ALLMULTI) ||
851 (nesvnic->nic_index > 3)) { 865 (nesvnic->nic_index > 3)) {
852 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL); 866 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
853 nic_active |= nic_active_bit; 867 nic_active |= nic_active_bit;
@@ -866,17 +880,34 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
866 } 880 }
867 881
868 nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n", 882 nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
869 netdev->mc_count, (netdev->flags & IFF_PROMISC)?1:0, 883 netdev->mc_count, !!(netdev->flags & IFF_PROMISC),
870 (netdev->flags & IFF_ALLMULTI)?1:0); 884 !!(netdev->flags & IFF_ALLMULTI));
871 if (!mc_all_on) { 885 if (!mc_all_on) {
872 multicast_addr = netdev->mc_list; 886 multicast_addr = netdev->mc_list;
873 perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 0x80; 887 perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
874 perfect_filter_register_address += nesvnic->nic_index*0x40; 888 pft_entries_preallocated * 0x8;
875 for (mc_index=0; mc_index < NES_MULTICAST_PF_MAX; mc_index++) { 889 for (mc_index = 0; mc_index < max_pft_entries_avaiable;
876 while (multicast_addr && nesvnic->mcrq_mcast_filter && ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic, multicast_addr->dmi_addr)) == 0)) 890 mc_index++) {
891 while (multicast_addr && nesvnic->mcrq_mcast_filter &&
892 ((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic,
893 multicast_addr->dmi_addr)) == 0)) {
877 multicast_addr = multicast_addr->next; 894 multicast_addr = multicast_addr->next;
895 }
878 if (mc_nic_index < 0) 896 if (mc_nic_index < 0)
879 mc_nic_index = nesvnic->nic_index; 897 mc_nic_index = nesvnic->nic_index;
898 while (nesadapter->pft_mcast_map[mc_index] < 16 &&
899 nesadapter->pft_mcast_map[mc_index] !=
900 nesvnic->nic_index &&
901 mc_index < max_pft_entries_avaiable) {
902 nes_debug(NES_DBG_NIC_RX,
903 "mc_index=%d skipping nic_index=%d,\
904 used for=%d \n", mc_index,
905 nesvnic->nic_index,
906 nesadapter->pft_mcast_map[mc_index]);
907 mc_index++;
908 }
909 if (mc_index >= max_pft_entries_avaiable)
910 break;
880 if (multicast_addr) { 911 if (multicast_addr) {
881 DECLARE_MAC_BUF(mac); 912 DECLARE_MAC_BUF(mac);
882 nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n", 913 nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
@@ -897,15 +928,33 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
897 (u32)macaddr_high | NES_MAC_ADDR_VALID | 928 (u32)macaddr_high | NES_MAC_ADDR_VALID |
898 ((((u32)(1<<mc_nic_index)) << 16))); 929 ((((u32)(1<<mc_nic_index)) << 16)));
899 multicast_addr = multicast_addr->next; 930 multicast_addr = multicast_addr->next;
931 nesadapter->pft_mcast_map[mc_index] =
932 nesvnic->nic_index;
900 } else { 933 } else {
901 nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n", 934 nes_debug(NES_DBG_NIC_RX, "Clearing MC Address at register 0x%04X\n",
902 perfect_filter_register_address+(mc_index * 8)); 935 perfect_filter_register_address+(mc_index * 8));
903 nes_write_indexed(nesdev, 936 nes_write_indexed(nesdev,
904 perfect_filter_register_address+4+(mc_index * 8), 937 perfect_filter_register_address+4+(mc_index * 8),
905 0); 938 0);
939 nesadapter->pft_mcast_map[mc_index] = 255;
906 } 940 }
907 } 941 }
942 /* PFT is not large enough */
943 if (multicast_addr && multicast_addr->next) {
944 nic_active = nes_read_indexed(nesdev,
945 NES_IDX_NIC_MULTICAST_ALL);
946 nic_active |= nic_active_bit;
947 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
948 nic_active);
949 nic_active = nes_read_indexed(nesdev,
950 NES_IDX_NIC_UNICAST_ALL);
951 nic_active &= ~nic_active_bit;
952 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL,
953 nic_active);
954 }
908 } 955 }
956
957 spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
909} 958}
910 959
911 960
@@ -918,6 +967,10 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
918 struct nes_device *nesdev = nesvnic->nesdev; 967 struct nes_device *nesdev = nesvnic->nesdev;
919 int ret = 0; 968 int ret = 0;
920 u8 jumbomode = 0; 969 u8 jumbomode = 0;
970 u32 nic_active;
971 u32 nic_active_bit;
972 u32 uc_all_active;
973 u32 mc_all_active;
921 974
922 if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu)) 975 if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
923 return -EINVAL; 976 return -EINVAL;
@@ -931,8 +984,24 @@ static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
931 nes_nic_init_timer_defaults(nesdev, jumbomode); 984 nes_nic_init_timer_defaults(nesdev, jumbomode);
932 985
933 if (netif_running(netdev)) { 986 if (netif_running(netdev)) {
987 nic_active_bit = 1 << nesvnic->nic_index;
988 mc_all_active = nes_read_indexed(nesdev,
989 NES_IDX_NIC_MULTICAST_ALL) & nic_active_bit;
990 uc_all_active = nes_read_indexed(nesdev,
991 NES_IDX_NIC_UNICAST_ALL) & nic_active_bit;
992
934 nes_netdev_stop(netdev); 993 nes_netdev_stop(netdev);
935 nes_netdev_open(netdev); 994 nes_netdev_open(netdev);
995
996 nic_active = nes_read_indexed(nesdev,
997 NES_IDX_NIC_MULTICAST_ALL);
998 nic_active |= mc_all_active;
999 nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
1000 nic_active);
1001
1002 nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
1003 nic_active |= uc_all_active;
1004 nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
936 } 1005 }
937 1006
938 return ret; 1007 return ret;
@@ -1208,10 +1277,12 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,
1208 struct ethtool_drvinfo *drvinfo) 1277 struct ethtool_drvinfo *drvinfo)
1209{ 1278{
1210 struct nes_vnic *nesvnic = netdev_priv(netdev); 1279 struct nes_vnic *nesvnic = netdev_priv(netdev);
1280 struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
1211 1281
1212 strcpy(drvinfo->driver, DRV_NAME); 1282 strcpy(drvinfo->driver, DRV_NAME);
1213 strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev)); 1283 strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev));
1214 strcpy(drvinfo->fw_version, "TBD"); 1284 sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16,
1285 nesadapter->firmware_version & 0x000000ff);
1215 strcpy(drvinfo->version, DRV_VERSION); 1286 strcpy(drvinfo->version, DRV_VERSION);
1216 drvinfo->n_stats = nes_netdev_get_stats_count(netdev); 1287 drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
1217 drvinfo->testinfo_len = 0; 1288 drvinfo->testinfo_len = 0;
@@ -1587,7 +1658,9 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1587 nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id, 1658 nesvnic, (unsigned long)netdev->features, nesvnic->nic.qp_id,
1588 nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index); 1659 nesvnic->nic_index, nesvnic->logical_port, nesdev->mac_index);
1589 1660
1590 if (nesvnic->nesdev->nesadapter->port_count == 1) { 1661 if (nesvnic->nesdev->nesadapter->port_count == 1 &&
1662 nesvnic->nesdev->nesadapter->adapter_fcn_count == 1) {
1663
1591 nesvnic->qp_nic_index[0] = nesvnic->nic_index; 1664 nesvnic->qp_nic_index[0] = nesvnic->nic_index;
1592 nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1; 1665 nesvnic->qp_nic_index[1] = nesvnic->nic_index + 1;
1593 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) { 1666 if (nes_drv_opt & NES_DRV_OPT_DUAL_LOGICAL_PORT) {
@@ -1598,11 +1671,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
1598 nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3; 1671 nesvnic->qp_nic_index[3] = nesvnic->nic_index + 3;
1599 } 1672 }
1600 } else { 1673 } else {
1601 if (nesvnic->nesdev->nesadapter->port_count == 2) { 1674 if (nesvnic->nesdev->nesadapter->port_count == 2 ||
1602 nesvnic->qp_nic_index[0] = nesvnic->nic_index; 1675 (nesvnic->nesdev->nesadapter->port_count == 1 &&
1603 nesvnic->qp_nic_index[1] = nesvnic->nic_index + 2; 1676 nesvnic->nesdev->nesadapter->adapter_fcn_count == 2)) {
1604 nesvnic->qp_nic_index[2] = 0xf; 1677 nesvnic->qp_nic_index[0] = nesvnic->nic_index;
1605 nesvnic->qp_nic_index[3] = 0xf; 1678 nesvnic->qp_nic_index[1] = nesvnic->nic_index
1679 + 2;
1680 nesvnic->qp_nic_index[2] = 0xf;
1681 nesvnic->qp_nic_index[3] = 0xf;
1606 } else { 1682 } else {
1607 nesvnic->qp_nic_index[0] = nesvnic->nic_index; 1683 nesvnic->qp_nic_index[0] = nesvnic->nic_index;
1608 nesvnic->qp_nic_index[1] = 0xf; 1684 nesvnic->qp_nic_index[1] = 0xf;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 05eb41b8ab63..68ba5c3482e4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -268,10 +268,9 @@ struct ipoib_lro {
268}; 268};
269 269
270/* 270/*
271 * Device private locking: tx_lock protects members used in TX fast 271 * Device private locking: network stack tx_lock protects members used
272 * path (and we use LLTX so upper layers don't do extra locking). 272 * in TX fast path, lock protects everything else. lock nests inside
273 * lock protects everything else. lock nests inside of tx_lock (ie 273 * of tx_lock (ie tx_lock must be acquired first if needed).
274 * tx_lock must be acquired first if needed).
275 */ 274 */
276struct ipoib_dev_priv { 275struct ipoib_dev_priv {
277 spinlock_t lock; 276 spinlock_t lock;
@@ -320,7 +319,6 @@ struct ipoib_dev_priv {
320 319
321 struct ipoib_rx_buf *rx_ring; 320 struct ipoib_rx_buf *rx_ring;
322 321
323 spinlock_t tx_lock;
324 struct ipoib_tx_buf *tx_ring; 322 struct ipoib_tx_buf *tx_ring;
325 unsigned tx_head; 323 unsigned tx_head;
326 unsigned tx_tail; 324 unsigned tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 341ffedafed6..7b14c2c39500 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -786,7 +786,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
786 786
787 dev_kfree_skb_any(tx_req->skb); 787 dev_kfree_skb_any(tx_req->skb);
788 788
789 spin_lock_irqsave(&priv->tx_lock, flags); 789 netif_tx_lock(dev);
790
790 ++tx->tx_tail; 791 ++tx->tx_tail;
791 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 792 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
792 netif_queue_stopped(dev) && 793 netif_queue_stopped(dev) &&
@@ -801,7 +802,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
801 "(status=%d, wrid=%d vend_err %x)\n", 802 "(status=%d, wrid=%d vend_err %x)\n",
802 wc->status, wr_id, wc->vendor_err); 803 wc->status, wr_id, wc->vendor_err);
803 804
804 spin_lock(&priv->lock); 805 spin_lock_irqsave(&priv->lock, flags);
805 neigh = tx->neigh; 806 neigh = tx->neigh;
806 807
807 if (neigh) { 808 if (neigh) {
@@ -821,10 +822,10 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
821 822
822 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); 823 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
823 824
824 spin_unlock(&priv->lock); 825 spin_unlock_irqrestore(&priv->lock, flags);
825 } 826 }
826 827
827 spin_unlock_irqrestore(&priv->tx_lock, flags); 828 netif_tx_unlock(dev);
828} 829}
829 830
830int ipoib_cm_dev_open(struct net_device *dev) 831int ipoib_cm_dev_open(struct net_device *dev)
@@ -1149,7 +1150,6 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1149{ 1150{
1150 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1151 struct ipoib_dev_priv *priv = netdev_priv(p->dev);
1151 struct ipoib_cm_tx_buf *tx_req; 1152 struct ipoib_cm_tx_buf *tx_req;
1152 unsigned long flags;
1153 unsigned long begin; 1153 unsigned long begin;
1154 1154
1155 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", 1155 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
@@ -1180,12 +1180,12 @@ timeout:
1180 DMA_TO_DEVICE); 1180 DMA_TO_DEVICE);
1181 dev_kfree_skb_any(tx_req->skb); 1181 dev_kfree_skb_any(tx_req->skb);
1182 ++p->tx_tail; 1182 ++p->tx_tail;
1183 spin_lock_irqsave(&priv->tx_lock, flags); 1183 netif_tx_lock_bh(p->dev);
1184 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && 1184 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1185 netif_queue_stopped(p->dev) && 1185 netif_queue_stopped(p->dev) &&
1186 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 1186 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1187 netif_wake_queue(p->dev); 1187 netif_wake_queue(p->dev);
1188 spin_unlock_irqrestore(&priv->tx_lock, flags); 1188 netif_tx_unlock_bh(p->dev);
1189 } 1189 }
1190 1190
1191 if (p->qp) 1191 if (p->qp)
@@ -1202,6 +1202,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1202 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); 1202 struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
1203 struct net_device *dev = priv->dev; 1203 struct net_device *dev = priv->dev;
1204 struct ipoib_neigh *neigh; 1204 struct ipoib_neigh *neigh;
1205 unsigned long flags;
1205 int ret; 1206 int ret;
1206 1207
1207 switch (event->event) { 1208 switch (event->event) {
@@ -1220,8 +1221,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1220 case IB_CM_REJ_RECEIVED: 1221 case IB_CM_REJ_RECEIVED:
1221 case IB_CM_TIMEWAIT_EXIT: 1222 case IB_CM_TIMEWAIT_EXIT:
1222 ipoib_dbg(priv, "CM error %d.\n", event->event); 1223 ipoib_dbg(priv, "CM error %d.\n", event->event);
1223 spin_lock_irq(&priv->tx_lock); 1224 netif_tx_lock_bh(dev);
1224 spin_lock(&priv->lock); 1225 spin_lock_irqsave(&priv->lock, flags);
1225 neigh = tx->neigh; 1226 neigh = tx->neigh;
1226 1227
1227 if (neigh) { 1228 if (neigh) {
@@ -1239,8 +1240,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1239 queue_work(ipoib_workqueue, &priv->cm.reap_task); 1240 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1240 } 1241 }
1241 1242
1242 spin_unlock(&priv->lock); 1243 spin_unlock_irqrestore(&priv->lock, flags);
1243 spin_unlock_irq(&priv->tx_lock); 1244 netif_tx_unlock_bh(dev);
1244 break; 1245 break;
1245 default: 1246 default:
1246 break; 1247 break;
@@ -1294,19 +1295,24 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1294 struct ib_sa_path_rec pathrec; 1295 struct ib_sa_path_rec pathrec;
1295 u32 qpn; 1296 u32 qpn;
1296 1297
1297 spin_lock_irqsave(&priv->tx_lock, flags); 1298 netif_tx_lock_bh(dev);
1298 spin_lock(&priv->lock); 1299 spin_lock_irqsave(&priv->lock, flags);
1300
1299 while (!list_empty(&priv->cm.start_list)) { 1301 while (!list_empty(&priv->cm.start_list)) {
1300 p = list_entry(priv->cm.start_list.next, typeof(*p), list); 1302 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1301 list_del_init(&p->list); 1303 list_del_init(&p->list);
1302 neigh = p->neigh; 1304 neigh = p->neigh;
1303 qpn = IPOIB_QPN(neigh->neighbour->ha); 1305 qpn = IPOIB_QPN(neigh->neighbour->ha);
1304 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); 1306 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1305 spin_unlock(&priv->lock); 1307
1306 spin_unlock_irqrestore(&priv->tx_lock, flags); 1308 spin_unlock_irqrestore(&priv->lock, flags);
1309 netif_tx_unlock_bh(dev);
1310
1307 ret = ipoib_cm_tx_init(p, qpn, &pathrec); 1311 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1308 spin_lock_irqsave(&priv->tx_lock, flags); 1312
1309 spin_lock(&priv->lock); 1313 netif_tx_lock_bh(dev);
1314 spin_lock_irqsave(&priv->lock, flags);
1315
1310 if (ret) { 1316 if (ret) {
1311 neigh = p->neigh; 1317 neigh = p->neigh;
1312 if (neigh) { 1318 if (neigh) {
@@ -1320,44 +1326,52 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1320 kfree(p); 1326 kfree(p);
1321 } 1327 }
1322 } 1328 }
1323 spin_unlock(&priv->lock); 1329
1324 spin_unlock_irqrestore(&priv->tx_lock, flags); 1330 spin_unlock_irqrestore(&priv->lock, flags);
1331 netif_tx_unlock_bh(dev);
1325} 1332}
1326 1333
1327static void ipoib_cm_tx_reap(struct work_struct *work) 1334static void ipoib_cm_tx_reap(struct work_struct *work)
1328{ 1335{
1329 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1336 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1330 cm.reap_task); 1337 cm.reap_task);
1338 struct net_device *dev = priv->dev;
1331 struct ipoib_cm_tx *p; 1339 struct ipoib_cm_tx *p;
1340 unsigned long flags;
1341
1342 netif_tx_lock_bh(dev);
1343 spin_lock_irqsave(&priv->lock, flags);
1332 1344
1333 spin_lock_irq(&priv->tx_lock);
1334 spin_lock(&priv->lock);
1335 while (!list_empty(&priv->cm.reap_list)) { 1345 while (!list_empty(&priv->cm.reap_list)) {
1336 p = list_entry(priv->cm.reap_list.next, typeof(*p), list); 1346 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1337 list_del(&p->list); 1347 list_del(&p->list);
1338 spin_unlock(&priv->lock); 1348 spin_unlock_irqrestore(&priv->lock, flags);
1339 spin_unlock_irq(&priv->tx_lock); 1349 netif_tx_unlock_bh(dev);
1340 ipoib_cm_tx_destroy(p); 1350 ipoib_cm_tx_destroy(p);
1341 spin_lock_irq(&priv->tx_lock); 1351 netif_tx_lock_bh(dev);
1342 spin_lock(&priv->lock); 1352 spin_lock_irqsave(&priv->lock, flags);
1343 } 1353 }
1344 spin_unlock(&priv->lock); 1354
1345 spin_unlock_irq(&priv->tx_lock); 1355 spin_unlock_irqrestore(&priv->lock, flags);
1356 netif_tx_unlock_bh(dev);
1346} 1357}
1347 1358
1348static void ipoib_cm_skb_reap(struct work_struct *work) 1359static void ipoib_cm_skb_reap(struct work_struct *work)
1349{ 1360{
1350 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 1361 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1351 cm.skb_task); 1362 cm.skb_task);
1363 struct net_device *dev = priv->dev;
1352 struct sk_buff *skb; 1364 struct sk_buff *skb;
1353 1365 unsigned long flags;
1354 unsigned mtu = priv->mcast_mtu; 1366 unsigned mtu = priv->mcast_mtu;
1355 1367
1356 spin_lock_irq(&priv->tx_lock); 1368 netif_tx_lock_bh(dev);
1357 spin_lock(&priv->lock); 1369 spin_lock_irqsave(&priv->lock, flags);
1370
1358 while ((skb = skb_dequeue(&priv->cm.skb_queue))) { 1371 while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
1359 spin_unlock(&priv->lock); 1372 spin_unlock_irqrestore(&priv->lock, flags);
1360 spin_unlock_irq(&priv->tx_lock); 1373 netif_tx_unlock_bh(dev);
1374
1361 if (skb->protocol == htons(ETH_P_IP)) 1375 if (skb->protocol == htons(ETH_P_IP))
1362 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 1376 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1363#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1377#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -1365,11 +1379,13 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
1365 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev); 1379 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
1366#endif 1380#endif
1367 dev_kfree_skb_any(skb); 1381 dev_kfree_skb_any(skb);
1368 spin_lock_irq(&priv->tx_lock); 1382
1369 spin_lock(&priv->lock); 1383 netif_tx_lock_bh(dev);
1384 spin_lock_irqsave(&priv->lock, flags);
1370 } 1385 }
1371 spin_unlock(&priv->lock); 1386
1372 spin_unlock_irq(&priv->tx_lock); 1387 spin_unlock_irqrestore(&priv->lock, flags);
1388 netif_tx_unlock_bh(dev);
1373} 1389}
1374 1390
1375void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, 1391void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 66cafa20c246..0e748aeeae99 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -468,21 +468,22 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
468static void drain_tx_cq(struct net_device *dev) 468static void drain_tx_cq(struct net_device *dev)
469{ 469{
470 struct ipoib_dev_priv *priv = netdev_priv(dev); 470 struct ipoib_dev_priv *priv = netdev_priv(dev);
471 unsigned long flags;
472 471
473 spin_lock_irqsave(&priv->tx_lock, flags); 472 netif_tx_lock(dev);
474 while (poll_tx(priv)) 473 while (poll_tx(priv))
475 ; /* nothing */ 474 ; /* nothing */
476 475
477 if (netif_queue_stopped(dev)) 476 if (netif_queue_stopped(dev))
478 mod_timer(&priv->poll_timer, jiffies + 1); 477 mod_timer(&priv->poll_timer, jiffies + 1);
479 478
480 spin_unlock_irqrestore(&priv->tx_lock, flags); 479 netif_tx_unlock(dev);
481} 480}
482 481
483void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) 482void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
484{ 483{
485 drain_tx_cq((struct net_device *)dev_ptr); 484 struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
485
486 mod_timer(&priv->poll_timer, jiffies);
486} 487}
487 488
488static inline int post_send(struct ipoib_dev_priv *priv, 489static inline int post_send(struct ipoib_dev_priv *priv,
@@ -614,17 +615,20 @@ static void __ipoib_reap_ah(struct net_device *dev)
614 struct ipoib_dev_priv *priv = netdev_priv(dev); 615 struct ipoib_dev_priv *priv = netdev_priv(dev);
615 struct ipoib_ah *ah, *tah; 616 struct ipoib_ah *ah, *tah;
616 LIST_HEAD(remove_list); 617 LIST_HEAD(remove_list);
618 unsigned long flags;
619
620 netif_tx_lock_bh(dev);
621 spin_lock_irqsave(&priv->lock, flags);
617 622
618 spin_lock_irq(&priv->tx_lock);
619 spin_lock(&priv->lock);
620 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) 623 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
621 if ((int) priv->tx_tail - (int) ah->last_send >= 0) { 624 if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
622 list_del(&ah->list); 625 list_del(&ah->list);
623 ib_destroy_ah(ah->ah); 626 ib_destroy_ah(ah->ah);
624 kfree(ah); 627 kfree(ah);
625 } 628 }
626 spin_unlock(&priv->lock); 629
627 spin_unlock_irq(&priv->tx_lock); 630 spin_unlock_irqrestore(&priv->lock, flags);
631 netif_tx_unlock_bh(dev);
628} 632}
629 633
630void ipoib_reap_ah(struct work_struct *work) 634void ipoib_reap_ah(struct work_struct *work)
@@ -761,6 +765,14 @@ void ipoib_drain_cq(struct net_device *dev)
761{ 765{
762 struct ipoib_dev_priv *priv = netdev_priv(dev); 766 struct ipoib_dev_priv *priv = netdev_priv(dev);
763 int i, n; 767 int i, n;
768
769 /*
770 * We call completion handling routines that expect to be
771 * called from the BH-disabled NAPI poll context, so disable
772 * BHs here too.
773 */
774 local_bh_disable();
775
764 do { 776 do {
765 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); 777 n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
766 for (i = 0; i < n; ++i) { 778 for (i = 0; i < n; ++i) {
@@ -784,6 +796,8 @@ void ipoib_drain_cq(struct net_device *dev)
784 796
785 while (poll_tx(priv)) 797 while (poll_tx(priv))
786 ; /* nothing */ 798 ; /* nothing */
799
800 local_bh_enable();
787} 801}
788 802
789int ipoib_ib_dev_stop(struct net_device *dev, int flush) 803int ipoib_ib_dev_stop(struct net_device *dev, int flush)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 1b1df5cc4113..c0ee514396df 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -373,9 +373,10 @@ void ipoib_flush_paths(struct net_device *dev)
373 struct ipoib_dev_priv *priv = netdev_priv(dev); 373 struct ipoib_dev_priv *priv = netdev_priv(dev);
374 struct ipoib_path *path, *tp; 374 struct ipoib_path *path, *tp;
375 LIST_HEAD(remove_list); 375 LIST_HEAD(remove_list);
376 unsigned long flags;
376 377
377 spin_lock_irq(&priv->tx_lock); 378 netif_tx_lock_bh(dev);
378 spin_lock(&priv->lock); 379 spin_lock_irqsave(&priv->lock, flags);
379 380
380 list_splice_init(&priv->path_list, &remove_list); 381 list_splice_init(&priv->path_list, &remove_list);
381 382
@@ -385,15 +386,16 @@ void ipoib_flush_paths(struct net_device *dev)
385 list_for_each_entry_safe(path, tp, &remove_list, list) { 386 list_for_each_entry_safe(path, tp, &remove_list, list) {
386 if (path->query) 387 if (path->query)
387 ib_sa_cancel_query(path->query_id, path->query); 388 ib_sa_cancel_query(path->query_id, path->query);
388 spin_unlock(&priv->lock); 389 spin_unlock_irqrestore(&priv->lock, flags);
389 spin_unlock_irq(&priv->tx_lock); 390 netif_tx_unlock_bh(dev);
390 wait_for_completion(&path->done); 391 wait_for_completion(&path->done);
391 path_free(dev, path); 392 path_free(dev, path);
392 spin_lock_irq(&priv->tx_lock); 393 netif_tx_lock_bh(dev);
393 spin_lock(&priv->lock); 394 spin_lock_irqsave(&priv->lock, flags);
394 } 395 }
395 spin_unlock(&priv->lock); 396
396 spin_unlock_irq(&priv->tx_lock); 397 spin_unlock_irqrestore(&priv->lock, flags);
398 netif_tx_unlock_bh(dev);
397} 399}
398 400
399static void path_rec_completion(int status, 401static void path_rec_completion(int status,
@@ -404,7 +406,7 @@ static void path_rec_completion(int status,
404 struct net_device *dev = path->dev; 406 struct net_device *dev = path->dev;
405 struct ipoib_dev_priv *priv = netdev_priv(dev); 407 struct ipoib_dev_priv *priv = netdev_priv(dev);
406 struct ipoib_ah *ah = NULL; 408 struct ipoib_ah *ah = NULL;
407 struct ipoib_ah *old_ah; 409 struct ipoib_ah *old_ah = NULL;
408 struct ipoib_neigh *neigh, *tn; 410 struct ipoib_neigh *neigh, *tn;
409 struct sk_buff_head skqueue; 411 struct sk_buff_head skqueue;
410 struct sk_buff *skb; 412 struct sk_buff *skb;
@@ -428,12 +430,12 @@ static void path_rec_completion(int status,
428 430
429 spin_lock_irqsave(&priv->lock, flags); 431 spin_lock_irqsave(&priv->lock, flags);
430 432
431 old_ah = path->ah;
432 path->ah = ah;
433
434 if (ah) { 433 if (ah) {
435 path->pathrec = *pathrec; 434 path->pathrec = *pathrec;
436 435
436 old_ah = path->ah;
437 path->ah = ah;
438
437 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n", 439 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
438 ah, be16_to_cpu(pathrec->dlid), pathrec->sl); 440 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
439 441
@@ -555,6 +557,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
555 struct ipoib_dev_priv *priv = netdev_priv(dev); 557 struct ipoib_dev_priv *priv = netdev_priv(dev);
556 struct ipoib_path *path; 558 struct ipoib_path *path;
557 struct ipoib_neigh *neigh; 559 struct ipoib_neigh *neigh;
560 unsigned long flags;
558 561
559 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev); 562 neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
560 if (!neigh) { 563 if (!neigh) {
@@ -563,11 +566,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
563 return; 566 return;
564 } 567 }
565 568
566 /* 569 spin_lock_irqsave(&priv->lock, flags);
567 * We can only be called from ipoib_start_xmit, so we're
568 * inside tx_lock -- no need to save/restore flags.
569 */
570 spin_lock(&priv->lock);
571 570
572 path = __path_find(dev, skb->dst->neighbour->ha + 4); 571 path = __path_find(dev, skb->dst->neighbour->ha + 4);
573 if (!path) { 572 if (!path) {
@@ -614,7 +613,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
614 __skb_queue_tail(&neigh->queue, skb); 613 __skb_queue_tail(&neigh->queue, skb);
615 } 614 }
616 615
617 spin_unlock(&priv->lock); 616 spin_unlock_irqrestore(&priv->lock, flags);
618 return; 617 return;
619 618
620err_list: 619err_list:
@@ -626,7 +625,7 @@ err_drop:
626 ++dev->stats.tx_dropped; 625 ++dev->stats.tx_dropped;
627 dev_kfree_skb_any(skb); 626 dev_kfree_skb_any(skb);
628 627
629 spin_unlock(&priv->lock); 628 spin_unlock_irqrestore(&priv->lock, flags);
630} 629}
631 630
632static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) 631static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
@@ -650,12 +649,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
650{ 649{
651 struct ipoib_dev_priv *priv = netdev_priv(dev); 650 struct ipoib_dev_priv *priv = netdev_priv(dev);
652 struct ipoib_path *path; 651 struct ipoib_path *path;
652 unsigned long flags;
653 653
654 /* 654 spin_lock_irqsave(&priv->lock, flags);
655 * We can only be called from ipoib_start_xmit, so we're
656 * inside tx_lock -- no need to save/restore flags.
657 */
658 spin_lock(&priv->lock);
659 655
660 path = __path_find(dev, phdr->hwaddr + 4); 656 path = __path_find(dev, phdr->hwaddr + 4);
661 if (!path || !path->valid) { 657 if (!path || !path->valid) {
@@ -667,7 +663,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
667 __skb_queue_tail(&path->queue, skb); 663 __skb_queue_tail(&path->queue, skb);
668 664
669 if (path_rec_start(dev, path)) { 665 if (path_rec_start(dev, path)) {
670 spin_unlock(&priv->lock); 666 spin_unlock_irqrestore(&priv->lock, flags);
671 path_free(dev, path); 667 path_free(dev, path);
672 return; 668 return;
673 } else 669 } else
@@ -677,7 +673,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
677 dev_kfree_skb_any(skb); 673 dev_kfree_skb_any(skb);
678 } 674 }
679 675
680 spin_unlock(&priv->lock); 676 spin_unlock_irqrestore(&priv->lock, flags);
681 return; 677 return;
682 } 678 }
683 679
@@ -696,7 +692,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
696 dev_kfree_skb_any(skb); 692 dev_kfree_skb_any(skb);
697 } 693 }
698 694
699 spin_unlock(&priv->lock); 695 spin_unlock_irqrestore(&priv->lock, flags);
700} 696}
701 697
702static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) 698static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -705,13 +701,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
705 struct ipoib_neigh *neigh; 701 struct ipoib_neigh *neigh;
706 unsigned long flags; 702 unsigned long flags;
707 703
708 if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
709 return NETDEV_TX_LOCKED;
710
711 if (likely(skb->dst && skb->dst->neighbour)) { 704 if (likely(skb->dst && skb->dst->neighbour)) {
712 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 705 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
713 ipoib_path_lookup(skb, dev); 706 ipoib_path_lookup(skb, dev);
714 goto out; 707 return NETDEV_TX_OK;
715 } 708 }
716 709
717 neigh = *to_ipoib_neigh(skb->dst->neighbour); 710 neigh = *to_ipoib_neigh(skb->dst->neighbour);
@@ -721,7 +714,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
721 skb->dst->neighbour->ha + 4, 714 skb->dst->neighbour->ha + 4,
722 sizeof(union ib_gid))) || 715 sizeof(union ib_gid))) ||
723 (neigh->dev != dev))) { 716 (neigh->dev != dev))) {
724 spin_lock(&priv->lock); 717 spin_lock_irqsave(&priv->lock, flags);
725 /* 718 /*
726 * It's safe to call ipoib_put_ah() inside 719 * It's safe to call ipoib_put_ah() inside
727 * priv->lock here, because we know that 720 * priv->lock here, because we know that
@@ -732,25 +725,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
732 ipoib_put_ah(neigh->ah); 725 ipoib_put_ah(neigh->ah);
733 list_del(&neigh->list); 726 list_del(&neigh->list);
734 ipoib_neigh_free(dev, neigh); 727 ipoib_neigh_free(dev, neigh);
735 spin_unlock(&priv->lock); 728 spin_unlock_irqrestore(&priv->lock, flags);
736 ipoib_path_lookup(skb, dev); 729 ipoib_path_lookup(skb, dev);
737 goto out; 730 return NETDEV_TX_OK;
738 } 731 }
739 732
740 if (ipoib_cm_get(neigh)) { 733 if (ipoib_cm_get(neigh)) {
741 if (ipoib_cm_up(neigh)) { 734 if (ipoib_cm_up(neigh)) {
742 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); 735 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
743 goto out; 736 return NETDEV_TX_OK;
744 } 737 }
745 } else if (neigh->ah) { 738 } else if (neigh->ah) {
746 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha)); 739 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
747 goto out; 740 return NETDEV_TX_OK;
748 } 741 }
749 742
750 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { 743 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
751 spin_lock(&priv->lock); 744 spin_lock_irqsave(&priv->lock, flags);
752 __skb_queue_tail(&neigh->queue, skb); 745 __skb_queue_tail(&neigh->queue, skb);
753 spin_unlock(&priv->lock); 746 spin_unlock_irqrestore(&priv->lock, flags);
754 } else { 747 } else {
755 ++dev->stats.tx_dropped; 748 ++dev->stats.tx_dropped;
756 dev_kfree_skb_any(skb); 749 dev_kfree_skb_any(skb);
@@ -779,16 +772,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
779 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); 772 IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
780 dev_kfree_skb_any(skb); 773 dev_kfree_skb_any(skb);
781 ++dev->stats.tx_dropped; 774 ++dev->stats.tx_dropped;
782 goto out; 775 return NETDEV_TX_OK;
783 } 776 }
784 777
785 unicast_arp_send(skb, dev, phdr); 778 unicast_arp_send(skb, dev, phdr);
786 } 779 }
787 } 780 }
788 781
789out:
790 spin_unlock_irqrestore(&priv->tx_lock, flags);
791
792 return NETDEV_TX_OK; 782 return NETDEV_TX_OK;
793} 783}
794 784
@@ -1052,7 +1042,6 @@ static void ipoib_setup(struct net_device *dev)
1052 dev->type = ARPHRD_INFINIBAND; 1042 dev->type = ARPHRD_INFINIBAND;
1053 dev->tx_queue_len = ipoib_sendq_size * 2; 1043 dev->tx_queue_len = ipoib_sendq_size * 2;
1054 dev->features = (NETIF_F_VLAN_CHALLENGED | 1044 dev->features = (NETIF_F_VLAN_CHALLENGED |
1055 NETIF_F_LLTX |
1056 NETIF_F_HIGHDMA); 1045 NETIF_F_HIGHDMA);
1057 1046
1058 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 1047 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
@@ -1064,7 +1053,6 @@ static void ipoib_setup(struct net_device *dev)
1064 ipoib_lro_setup(priv); 1053 ipoib_lro_setup(priv);
1065 1054
1066 spin_lock_init(&priv->lock); 1055 spin_lock_init(&priv->lock);
1067 spin_lock_init(&priv->tx_lock);
1068 1056
1069 mutex_init(&priv->vlan_mutex); 1057 mutex_init(&priv->vlan_mutex);
1070 1058
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index aae28620a6e5..d9d1223c3fd5 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -69,14 +69,13 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
69 struct net_device *dev = mcast->dev; 69 struct net_device *dev = mcast->dev;
70 struct ipoib_dev_priv *priv = netdev_priv(dev); 70 struct ipoib_dev_priv *priv = netdev_priv(dev);
71 struct ipoib_neigh *neigh, *tmp; 71 struct ipoib_neigh *neigh, *tmp;
72 unsigned long flags;
73 int tx_dropped = 0; 72 int tx_dropped = 0;
74 73
75 ipoib_dbg_mcast(netdev_priv(dev), 74 ipoib_dbg_mcast(netdev_priv(dev),
76 "deleting multicast group " IPOIB_GID_FMT "\n", 75 "deleting multicast group " IPOIB_GID_FMT "\n",
77 IPOIB_GID_ARG(mcast->mcmember.mgid)); 76 IPOIB_GID_ARG(mcast->mcmember.mgid));
78 77
79 spin_lock_irqsave(&priv->lock, flags); 78 spin_lock_irq(&priv->lock);
80 79
81 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { 80 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
82 /* 81 /*
@@ -90,7 +89,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
90 ipoib_neigh_free(dev, neigh); 89 ipoib_neigh_free(dev, neigh);
91 } 90 }
92 91
93 spin_unlock_irqrestore(&priv->lock, flags); 92 spin_unlock_irq(&priv->lock);
94 93
95 if (mcast->ah) 94 if (mcast->ah)
96 ipoib_put_ah(mcast->ah); 95 ipoib_put_ah(mcast->ah);
@@ -100,9 +99,9 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
100 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 99 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
101 } 100 }
102 101
103 spin_lock_irqsave(&priv->tx_lock, flags); 102 netif_tx_lock_bh(dev);
104 dev->stats.tx_dropped += tx_dropped; 103 dev->stats.tx_dropped += tx_dropped;
105 spin_unlock_irqrestore(&priv->tx_lock, flags); 104 netif_tx_unlock_bh(dev);
106 105
107 kfree(mcast); 106 kfree(mcast);
108} 107}
@@ -259,10 +258,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
259 } 258 }
260 259
261 /* actually send any queued packets */ 260 /* actually send any queued packets */
262 spin_lock_irq(&priv->tx_lock); 261 netif_tx_lock_bh(dev);
263 while (!skb_queue_empty(&mcast->pkt_queue)) { 262 while (!skb_queue_empty(&mcast->pkt_queue)) {
264 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); 263 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
265 spin_unlock_irq(&priv->tx_lock); 264 netif_tx_unlock_bh(dev);
266 265
267 skb->dev = dev; 266 skb->dev = dev;
268 267
@@ -273,9 +272,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
273 272
274 if (dev_queue_xmit(skb)) 273 if (dev_queue_xmit(skb))
275 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); 274 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
276 spin_lock_irq(&priv->tx_lock); 275 netif_tx_lock_bh(dev);
277 } 276 }
278 spin_unlock_irq(&priv->tx_lock); 277 netif_tx_unlock_bh(dev);
279 278
280 return 0; 279 return 0;
281} 280}
@@ -286,7 +285,6 @@ ipoib_mcast_sendonly_join_complete(int status,
286{ 285{
287 struct ipoib_mcast *mcast = multicast->context; 286 struct ipoib_mcast *mcast = multicast->context;
288 struct net_device *dev = mcast->dev; 287 struct net_device *dev = mcast->dev;
289 struct ipoib_dev_priv *priv = netdev_priv(dev);
290 288
291 /* We trap for port events ourselves. */ 289 /* We trap for port events ourselves. */
292 if (status == -ENETRESET) 290 if (status == -ENETRESET)
@@ -302,12 +300,12 @@ ipoib_mcast_sendonly_join_complete(int status,
302 IPOIB_GID_ARG(mcast->mcmember.mgid), status); 300 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
303 301
304 /* Flush out any queued packets */ 302 /* Flush out any queued packets */
305 spin_lock_irq(&priv->tx_lock); 303 netif_tx_lock_bh(dev);
306 while (!skb_queue_empty(&mcast->pkt_queue)) { 304 while (!skb_queue_empty(&mcast->pkt_queue)) {
307 ++dev->stats.tx_dropped; 305 ++dev->stats.tx_dropped;
308 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); 306 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
309 } 307 }
310 spin_unlock_irq(&priv->tx_lock); 308 netif_tx_unlock_bh(dev);
311 309
312 /* Clear the busy flag so we try again */ 310 /* Clear the busy flag so we try again */
313 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, 311 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
@@ -662,12 +660,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
662{ 660{
663 struct ipoib_dev_priv *priv = netdev_priv(dev); 661 struct ipoib_dev_priv *priv = netdev_priv(dev);
664 struct ipoib_mcast *mcast; 662 struct ipoib_mcast *mcast;
663 unsigned long flags;
665 664
666 /* 665 spin_lock_irqsave(&priv->lock, flags);
667 * We can only be called from ipoib_start_xmit, so we're
668 * inside tx_lock -- no need to save/restore flags.
669 */
670 spin_lock(&priv->lock);
671 666
672 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || 667 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
673 !priv->broadcast || 668 !priv->broadcast ||
@@ -738,7 +733,7 @@ out:
738 } 733 }
739 734
740unlock: 735unlock:
741 spin_unlock(&priv->lock); 736 spin_unlock_irqrestore(&priv->lock, flags);
742} 737}
743 738
744void ipoib_mcast_dev_flush(struct net_device *dev) 739void ipoib_mcast_dev_flush(struct net_device *dev)