aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-26 16:18:43 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-26 16:18:43 -0500
commit5992fd1995a5e69710011fc7d7945c62bf5b551b (patch)
tree16a45f188bd66793f89e81b9d0c510ba09b36237 /drivers
parenteafb4f184cd89e8af5676ec49ae35184172553fe (diff)
parent843613b04744d5b65c2f37975c5310f366a0d070 (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IPoIB: Correct debugging output when path record lookup fails RDMA/cxgb3: Stop the EP Timer on BAD CLOSE RDMA/cxgb3: cleanups RDMA/cma: Remove unused node_guid from cma_device structure IB/cm: Remove ca_guid from cm_device structure RDMA/cma: Request reversible paths only IB/core: Set hop limit in ib_init_ah_from_wc correctly IB/uverbs: Return correct error for invalid PD in register MR IPoIB: Remove unused local_rate tracking IPoIB/cm: Improve small message bandwidth IB/mthca: Make 2 functions static
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/cm.c10
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c4
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c31
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h5
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_resource.c14
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.h1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c29
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c46
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c8
17 files changed, 76 insertions, 102 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index d446998b12a4..842cd0b53e91 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -88,7 +88,6 @@ struct cm_port {
88struct cm_device { 88struct cm_device {
89 struct list_head list; 89 struct list_head list;
90 struct ib_device *device; 90 struct ib_device *device;
91 __be64 ca_guid;
92 struct cm_port port[0]; 91 struct cm_port port[0];
93}; 92};
94 93
@@ -739,8 +738,8 @@ retest:
739 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 738 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
740 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 739 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
741 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 740 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
742 &cm_id_priv->av.port->cm_dev->ca_guid, 741 &cm_id_priv->id.device->node_guid,
743 sizeof cm_id_priv->av.port->cm_dev->ca_guid, 742 sizeof cm_id_priv->id.device->node_guid,
744 NULL, 0); 743 NULL, 0);
745 break; 744 break;
746 case IB_CM_REQ_RCVD: 745 case IB_CM_REQ_RCVD:
@@ -883,7 +882,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
883 882
884 req_msg->local_comm_id = cm_id_priv->id.local_id; 883 req_msg->local_comm_id = cm_id_priv->id.local_id;
885 req_msg->service_id = param->service_id; 884 req_msg->service_id = param->service_id;
886 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 885 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
887 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 886 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
888 cm_req_set_resp_res(req_msg, param->responder_resources); 887 cm_req_set_resp_res(req_msg, param->responder_resources);
889 cm_req_set_init_depth(req_msg, param->initiator_depth); 888 cm_req_set_init_depth(req_msg, param->initiator_depth);
@@ -1442,7 +1441,7 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
1442 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1441 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1443 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1442 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1444 cm_rep_set_srq(rep_msg, param->srq); 1443 cm_rep_set_srq(rep_msg, param->srq);
1445 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1444 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1446 1445
1447 if (param->private_data && param->private_data_len) 1446 if (param->private_data && param->private_data_len)
1448 memcpy(rep_msg->private_data, param->private_data, 1447 memcpy(rep_msg->private_data, param->private_data,
@@ -3385,7 +3384,6 @@ static void cm_add_one(struct ib_device *device)
3385 return; 3384 return;
3386 3385
3387 cm_dev->device = device; 3386 cm_dev->device = device;
3388 cm_dev->ca_guid = device->node_guid;
3389 3387
3390 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3388 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3391 for (i = 1; i <= device->phys_port_cnt; i++) { 3389 for (i = 1; i <= device->phys_port_cnt; i++) {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f8d69b3fa307..d441815a3e0c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -77,7 +77,6 @@ static int next_port;
77struct cma_device { 77struct cma_device {
78 struct list_head list; 78 struct list_head list;
79 struct ib_device *device; 79 struct ib_device *device;
80 __be64 node_guid;
81 struct completion comp; 80 struct completion comp;
82 atomic_t refcount; 81 atomic_t refcount;
83 struct list_head id_list; 82 struct list_head id_list;
@@ -1492,11 +1491,13 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1492 ib_addr_get_dgid(addr, &path_rec.dgid); 1491 ib_addr_get_dgid(addr, &path_rec.dgid);
1493 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); 1492 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
1494 path_rec.numb_path = 1; 1493 path_rec.numb_path = 1;
1494 path_rec.reversible = 1;
1495 1495
1496 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1496 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1497 id_priv->id.port_num, &path_rec, 1497 id_priv->id.port_num, &path_rec,
1498 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1498 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1499 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, 1499 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1500 IB_SA_PATH_REC_REVERSIBLE,
1500 timeout_ms, GFP_KERNEL, 1501 timeout_ms, GFP_KERNEL,
1501 cma_query_handler, work, &id_priv->query); 1502 cma_query_handler, work, &id_priv->query);
1502 1503
@@ -2672,7 +2673,6 @@ static void cma_add_one(struct ib_device *device)
2672 return; 2673 return;
2673 2674
2674 cma_dev->device = device; 2675 cma_dev->device = device;
2675 cma_dev->node_guid = device->node_guid;
2676 2676
2677 init_completion(&cma_dev->comp); 2677 init_completion(&cma_dev->comp);
2678 atomic_set(&cma_dev->refcount, 1); 2678 atomic_set(&cma_dev->refcount, 1);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index df1efbc10882..4fd75afa6a3a 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -622,8 +622,10 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
622 obj->umem.virt_base = cmd.hca_va; 622 obj->umem.virt_base = cmd.hca_va;
623 623
624 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 624 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
625 if (!pd) 625 if (!pd) {
626 ret = -EINVAL;
626 goto err_release; 627 goto err_release;
628 }
627 629
628 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 630 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
629 if (IS_ERR(mr)) { 631 if (IS_ERR(mr)) {
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 8b5dd3649bbf..ccdf93d30b01 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -167,7 +167,7 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
167 ah_attr->grh.sgid_index = (u8) gid_index; 167 ah_attr->grh.sgid_index = (u8) gid_index;
168 flow_class = be32_to_cpu(grh->version_tclass_flow); 168 flow_class = be32_to_cpu(grh->version_tclass_flow);
169 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 169 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
170 ah_attr->grh.hop_limit = grh->hop_limit; 170 ah_attr->grh.hop_limit = 0xFF;
171 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 171 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
172 } 172 }
173 return 0; 173 return 0;
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile
index 0e110f32f128..36b98989b15e 100644
--- a/drivers/infiniband/hw/cxgb3/Makefile
+++ b/drivers/infiniband/hw/cxgb3/Makefile
@@ -8,5 +8,4 @@ iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
8 8
9ifdef CONFIG_INFINIBAND_CXGB3_DEBUG 9ifdef CONFIG_INFINIBAND_CXGB3_DEBUG
10EXTRA_CFLAGS += -DDEBUG 10EXTRA_CFLAGS += -DDEBUG
11iw_cxgb3-y += cxio_dbg.o
12endif 11endif
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 114ac3b775dc..d737c738d876 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -45,7 +45,7 @@
45static LIST_HEAD(rdev_list); 45static LIST_HEAD(rdev_list);
46static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL; 46static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
47 47
48static inline struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name) 48static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
49{ 49{
50 struct cxio_rdev *rdev; 50 struct cxio_rdev *rdev;
51 51
@@ -55,8 +55,7 @@ static inline struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
55 return NULL; 55 return NULL;
56} 56}
57 57
58static inline struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev 58static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
59 *tdev)
60{ 59{
61 struct cxio_rdev *rdev; 60 struct cxio_rdev *rdev;
62 61
@@ -118,7 +117,7 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
118 return 0; 117 return 0;
119} 118}
120 119
121static inline int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid) 120static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
122{ 121{
123 struct rdma_cq_setup setup; 122 struct rdma_cq_setup setup;
124 setup.id = cqid; 123 setup.id = cqid;
@@ -130,7 +129,7 @@ static inline int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
130 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 129 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
131} 130}
132 131
133int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) 132static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
134{ 133{
135 u64 sge_cmd; 134 u64 sge_cmd;
136 struct t3_modify_qp_wr *wqe; 135 struct t3_modify_qp_wr *wqe;
@@ -425,7 +424,7 @@ void cxio_flush_hw_cq(struct t3_cq *cq)
425 } 424 }
426} 425}
427 426
428static inline int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) 427static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
429{ 428{
430 if (CQE_OPCODE(*cqe) == T3_TERMINATE) 429 if (CQE_OPCODE(*cqe) == T3_TERMINATE)
431 return 0; 430 return 0;
@@ -760,17 +759,6 @@ ret:
760 return err; 759 return err;
761} 760}
762 761
763/* IN : stag key, pdid, pbl_size
764 * Out: stag index, actaul pbl_size, and pbl_addr allocated.
765 */
766int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid,
767 enum tpt_mem_perm perm, u32 * pbl_size, u32 * pbl_addr)
768{
769 *stag = T3_STAG_UNSET;
770 return (__cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
771 perm, 0, 0ULL, 0, 0, NULL, pbl_size, pbl_addr));
772}
773
774int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 762int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
775 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 763 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
776 u8 page_size, __be64 *pbl, u32 *pbl_size, 764 u8 page_size, __be64 *pbl, u32 *pbl_size,
@@ -1029,7 +1017,7 @@ void __exit cxio_hal_exit(void)
1029 cxio_hal_destroy_rhdl_resource(); 1017 cxio_hal_destroy_rhdl_resource();
1030} 1018}
1031 1019
1032static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) 1020static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
1033{ 1021{
1034 struct t3_swsq *sqp; 1022 struct t3_swsq *sqp;
1035 __u32 ptr = wq->sq_rptr; 1023 __u32 ptr = wq->sq_rptr;
@@ -1058,9 +1046,8 @@ static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
1058 break; 1046 break;
1059} 1047}
1060 1048
1061static inline void create_read_req_cqe(struct t3_wq *wq, 1049static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
1062 struct t3_cqe *hw_cqe, 1050 struct t3_cqe *read_cqe)
1063 struct t3_cqe *read_cqe)
1064{ 1051{
1065 read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; 1052 read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
1066 read_cqe->len = wq->oldest_read->read_len; 1053 read_cqe->len = wq->oldest_read->read_len;
@@ -1073,7 +1060,7 @@ static inline void create_read_req_cqe(struct t3_wq *wq,
1073/* 1060/*
1074 * Return a ptr to the next read wr in the SWSQ or NULL. 1061 * Return a ptr to the next read wr in the SWSQ or NULL.
1075 */ 1062 */
1076static inline void advance_oldest_read(struct t3_wq *wq) 1063static void advance_oldest_read(struct t3_wq *wq)
1077{ 1064{
1078 1065
1079 u32 rptr = wq->oldest_read - wq->sq + 1; 1066 u32 rptr = wq->oldest_read - wq->sq + 1;
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index 8ab04a7c6f6e..99543d634704 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -143,7 +143,6 @@ int cxio_rdev_open(struct cxio_rdev *rdev);
143void cxio_rdev_close(struct cxio_rdev *rdev); 143void cxio_rdev_close(struct cxio_rdev *rdev);
144int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, 144int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
145 enum t3_cq_opcode op, u32 credit); 145 enum t3_cq_opcode op, u32 credit);
146int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev, u32 qpid);
147int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 146int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
148int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 147int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
149int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 148int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
@@ -154,8 +153,6 @@ int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
154int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 153int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
155 struct cxio_ucontext *uctx); 154 struct cxio_ucontext *uctx);
156int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); 155int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
157int cxio_allocate_stag(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
158 enum tpt_mem_perm perm, u32 * pbl_size, u32 * pbl_addr);
159int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 156int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
160 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 157 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
161 u8 page_size, __be64 *pbl, u32 *pbl_size, 158 u8 page_size, __be64 *pbl, u32 *pbl_size,
@@ -171,8 +168,6 @@ int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
171int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr); 168int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
172void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb); 169void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
173void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb); 170void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
174u32 cxio_hal_get_rhdl(void);
175void cxio_hal_put_rhdl(u32 rhdl);
176u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp); 171u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
177void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid); 172void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
178int __init cxio_hal_init(void); 173int __init cxio_hal_init(void);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c
index 65bf577311aa..d3095ae5bc2e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_resource.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c
@@ -179,7 +179,7 @@ tpt_err:
179/* 179/*
180 * returns 0 if no resource available 180 * returns 0 if no resource available
181 */ 181 */
182static inline u32 cxio_hal_get_resource(struct kfifo *fifo) 182static u32 cxio_hal_get_resource(struct kfifo *fifo)
183{ 183{
184 u32 entry; 184 u32 entry;
185 if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32))) 185 if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32)))
@@ -188,21 +188,11 @@ static inline u32 cxio_hal_get_resource(struct kfifo *fifo)
188 return 0; /* fifo emptry */ 188 return 0; /* fifo emptry */
189} 189}
190 190
191static inline void cxio_hal_put_resource(struct kfifo *fifo, u32 entry) 191static void cxio_hal_put_resource(struct kfifo *fifo, u32 entry)
192{ 192{
193 BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0); 193 BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0);
194} 194}
195 195
196u32 cxio_hal_get_rhdl(void)
197{
198 return cxio_hal_get_resource(rhdl_fifo);
199}
200
201void cxio_hal_put_rhdl(u32 rhdl)
202{
203 cxio_hal_put_resource(rhdl_fifo, rhdl);
204}
205
206u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp) 196u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
207{ 197{
208 return cxio_hal_get_resource(rscp->tpt_fifo); 198 return cxio_hal_get_resource(rscp->tpt_fifo);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index e5442e34b788..b21fde8b659d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -209,8 +209,7 @@ static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
209 return state; 209 return state;
210} 210}
211 211
212static inline void __state_set(struct iwch_ep_common *epc, 212static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
213 enum iwch_ep_state new)
214{ 213{
215 epc->state = new; 214 epc->state = new;
216} 215}
@@ -1459,7 +1458,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1459/* 1458/*
1460 * Returns whether an ABORT_REQ_RSS message is a negative advice. 1459 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1461 */ 1460 */
1462static inline int is_neg_adv_abort(unsigned int status) 1461static int is_neg_adv_abort(unsigned int status)
1463{ 1462{
1464 return status == CPL_ERR_RTX_NEG_ADVICE || 1463 return status == CPL_ERR_RTX_NEG_ADVICE ||
1465 status == CPL_ERR_PERSIST_NEG_ADVICE; 1464 status == CPL_ERR_PERSIST_NEG_ADVICE;
@@ -1635,6 +1634,7 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1635 1634
1636 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", 1635 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
1637 __FUNCTION__, ep->hwtid); 1636 __FUNCTION__, ep->hwtid);
1637 stop_ep_timer(ep);
1638 attrs.next_state = IWCH_QP_STATE_ERROR; 1638 attrs.next_state = IWCH_QP_STATE_ERROR;
1639 iwch_modify_qp(ep->com.qp->rhp, 1639 iwch_modify_qp(ep->com.qp->rhp,
1640 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, 1640 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 2aef122f9955..9947a144a929 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -948,7 +948,7 @@ void iwch_qp_rem_ref(struct ib_qp *qp)
948 wake_up(&(to_iwch_qp(qp)->wait)); 948 wake_up(&(to_iwch_qp(qp)->wait));
949} 949}
950 950
951struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 951static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
952{ 952{
953 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); 953 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn);
954 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); 954 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index 2af3e93b607f..de0fe1b93a0c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -178,7 +178,6 @@ static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
178 178
179void iwch_qp_add_ref(struct ib_qp *qp); 179void iwch_qp_add_ref(struct ib_qp *qp);
180void iwch_qp_rem_ref(struct ib_qp *qp); 180void iwch_qp_rem_ref(struct ib_qp *qp);
181struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn);
182 181
183struct iwch_ucontext { 182struct iwch_ucontext {
184 struct ib_ucontext ibucontext; 183 struct ib_ucontext ibucontext;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 4dda2f6da2de..9ea00cc4a5f8 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -36,8 +36,8 @@
36 36
37#define NO_SUPPORT -1 37#define NO_SUPPORT -1
38 38
39static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, 39static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
40 u8 * flit_cnt) 40 u8 * flit_cnt)
41{ 41{
42 int i; 42 int i;
43 u32 plen; 43 u32 plen;
@@ -96,8 +96,8 @@ static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
96 return 0; 96 return 0;
97} 97}
98 98
99static inline int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, 99static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
100 u8 *flit_cnt) 100 u8 *flit_cnt)
101{ 101{
102 int i; 102 int i;
103 u32 plen; 103 u32 plen;
@@ -137,8 +137,8 @@ static inline int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
137 return 0; 137 return 0;
138} 138}
139 139
140static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, 140static int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
141 u8 *flit_cnt) 141 u8 *flit_cnt)
142{ 142{
143 if (wr->num_sge > 1) 143 if (wr->num_sge > 1)
144 return -EINVAL; 144 return -EINVAL;
@@ -158,9 +158,8 @@ static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
158/* 158/*
159 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now. 159 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
160 */ 160 */
161static inline int iwch_sgl2pbl_map(struct iwch_dev *rhp, 161static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
162 struct ib_sge *sg_list, u32 num_sgle, 162 u32 num_sgle, u32 * pbl_addr, u8 * page_size)
163 u32 * pbl_addr, u8 * page_size)
164{ 163{
165 int i; 164 int i;
166 struct iwch_mr *mhp; 165 struct iwch_mr *mhp;
@@ -206,9 +205,8 @@ static inline int iwch_sgl2pbl_map(struct iwch_dev *rhp,
206 return 0; 205 return 0;
207} 206}
208 207
209static inline int iwch_build_rdma_recv(struct iwch_dev *rhp, 208static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe,
210 union t3_wr *wqe, 209 struct ib_recv_wr *wr)
211 struct ib_recv_wr *wr)
212{ 210{
213 int i, err = 0; 211 int i, err = 0;
214 u32 pbl_addr[4]; 212 u32 pbl_addr[4];
@@ -473,8 +471,7 @@ int iwch_bind_mw(struct ib_qp *qp,
473 return err; 471 return err;
474} 472}
475 473
476static inline void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, 474static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged)
477 int tagged)
478{ 475{
479 switch (t3err) { 476 switch (t3err) {
480 case TPT_ERR_STAG: 477 case TPT_ERR_STAG:
@@ -672,7 +669,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
672 spin_lock_irqsave(&qhp->lock, *flag); 669 spin_lock_irqsave(&qhp->lock, *flag);
673} 670}
674 671
675static inline void flush_qp(struct iwch_qp *qhp, unsigned long *flag) 672static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
676{ 673{
677 if (t3b_device(qhp->rhp)) 674 if (t3b_device(qhp->rhp))
678 cxio_set_wq_in_error(&qhp->wq); 675 cxio_set_wq_in_error(&qhp->wq);
@@ -684,7 +681,7 @@ static inline void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
684/* 681/*
685 * Return non zero if at least one RECV was pre-posted. 682 * Return non zero if at least one RECV was pre-posted.
686 */ 683 */
687static inline int rqes_posted(struct iwch_qp *qhp) 684static int rqes_posted(struct iwch_qp *qhp)
688{ 685{
689 return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV; 686 return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV;
690} 687}
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 6037dd3f87df..8e4846b5c641 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -310,8 +310,9 @@ int mthca_write_mtt_size(struct mthca_dev *dev)
310 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; 310 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff;
311} 311}
312 312
313void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, 313static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
314 int start_index, u64 *buffer_list, int list_len) 314 struct mthca_mtt *mtt, int start_index,
315 u64 *buffer_list, int list_len)
315{ 316{
316 u64 __iomem *mtts; 317 u64 __iomem *mtts;
317 int i; 318 int i;
@@ -323,8 +324,9 @@ void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt,
323 mtts + i); 324 mtts + i);
324} 325}
325 326
326void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, 327static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
327 int start_index, u64 *buffer_list, int list_len) 328 struct mthca_mtt *mtt, int start_index,
329 u64 *buffer_list, int list_len)
328{ 330{
329 __be64 *mtts; 331 __be64 *mtts;
330 dma_addr_t dma_handle; 332 dma_addr_t dma_handle;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 2594db2030b3..fd558267d1cb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -219,7 +219,6 @@ struct ipoib_dev_priv {
219 219
220 union ib_gid local_gid; 220 union ib_gid local_gid;
221 u16 local_lid; 221 u16 local_lid;
222 u8 local_rate;
223 222
224 unsigned int admin_mtu; 223 unsigned int admin_mtu;
225 unsigned int mcast_mtu; 224 unsigned int mcast_mtu;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 4d59682f7d4a..3484e8ba24a4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -65,14 +65,14 @@ struct ipoib_cm_id {
65static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 65static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
66 struct ib_cm_event *event); 66 struct ib_cm_event *event);
67 67
68static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, 68static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
69 u64 mapping[IPOIB_CM_RX_SG]) 69 u64 mapping[IPOIB_CM_RX_SG])
70{ 70{
71 int i; 71 int i;
72 72
73 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); 73 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
74 74
75 for (i = 0; i < IPOIB_CM_RX_SG - 1; ++i) 75 for (i = 0; i < frags; ++i)
76 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 76 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
77} 77}
78 78
@@ -90,7 +90,8 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id)
90 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); 90 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
91 if (unlikely(ret)) { 91 if (unlikely(ret)) {
92 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); 92 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
93 ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[id].mapping); 93 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
94 priv->cm.srq_ring[id].mapping);
94 dev_kfree_skb_any(priv->cm.srq_ring[id].skb); 95 dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
95 priv->cm.srq_ring[id].skb = NULL; 96 priv->cm.srq_ring[id].skb = NULL;
96 } 97 }
@@ -98,8 +99,8 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id)
98 return ret; 99 return ret;
99} 100}
100 101
101static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, 102static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags,
102 u64 mapping[IPOIB_CM_RX_SG]) 103 u64 mapping[IPOIB_CM_RX_SG])
103{ 104{
104 struct ipoib_dev_priv *priv = netdev_priv(dev); 105 struct ipoib_dev_priv *priv = netdev_priv(dev);
105 struct sk_buff *skb; 106 struct sk_buff *skb;
@@ -107,7 +108,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id,
107 108
108 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); 109 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
109 if (unlikely(!skb)) 110 if (unlikely(!skb))
110 return -ENOMEM; 111 return NULL;
111 112
112 /* 113 /*
113 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the 114 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
@@ -119,10 +120,10 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id,
119 DMA_FROM_DEVICE); 120 DMA_FROM_DEVICE);
120 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { 121 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
121 dev_kfree_skb_any(skb); 122 dev_kfree_skb_any(skb);
122 return -EIO; 123 return NULL;
123 } 124 }
124 125
125 for (i = 0; i < IPOIB_CM_RX_SG - 1; i++) { 126 for (i = 0; i < frags; i++) {
126 struct page *page = alloc_page(GFP_ATOMIC); 127 struct page *page = alloc_page(GFP_ATOMIC);
127 128
128 if (!page) 129 if (!page)
@@ -136,7 +137,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id,
136 } 137 }
137 138
138 priv->cm.srq_ring[id].skb = skb; 139 priv->cm.srq_ring[id].skb = skb;
139 return 0; 140 return skb;
140 141
141partial_error: 142partial_error:
142 143
@@ -146,7 +147,7 @@ partial_error:
146 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 147 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
147 148
148 dev_kfree_skb_any(skb); 149 dev_kfree_skb_any(skb);
149 return -ENOMEM; 150 return NULL;
150} 151}
151 152
152static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, 153static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
@@ -309,7 +310,7 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
309} 310}
310/* Adjust length of skb with fragments to match received data */ 311/* Adjust length of skb with fragments to match received data */
311static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, 312static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
312 unsigned int length) 313 unsigned int length, struct sk_buff *toskb)
313{ 314{
314 int i, num_frags; 315 int i, num_frags;
315 unsigned int size; 316 unsigned int size;
@@ -326,7 +327,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
326 327
327 if (length == 0) { 328 if (length == 0) {
328 /* don't need this page */ 329 /* don't need this page */
329 __free_page(frag->page); 330 skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
330 --skb_shinfo(skb)->nr_frags; 331 --skb_shinfo(skb)->nr_frags;
331 } else { 332 } else {
332 size = min(length, (unsigned) PAGE_SIZE); 333 size = min(length, (unsigned) PAGE_SIZE);
@@ -344,10 +345,11 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
344{ 345{
345 struct ipoib_dev_priv *priv = netdev_priv(dev); 346 struct ipoib_dev_priv *priv = netdev_priv(dev);
346 unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; 347 unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ;
347 struct sk_buff *skb; 348 struct sk_buff *skb, *newskb;
348 struct ipoib_cm_rx *p; 349 struct ipoib_cm_rx *p;
349 unsigned long flags; 350 unsigned long flags;
350 u64 mapping[IPOIB_CM_RX_SG]; 351 u64 mapping[IPOIB_CM_RX_SG];
352 int frags;
351 353
352 ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n", 354 ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n",
353 wr_id, wc->opcode, wc->status); 355 wr_id, wc->opcode, wc->status);
@@ -383,7 +385,11 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
383 } 385 }
384 } 386 }
385 387
386 if (unlikely(ipoib_cm_alloc_rx_skb(dev, wr_id, mapping))) { 388 frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
389 (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
390
391 newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping);
392 if (unlikely(!newskb)) {
387 /* 393 /*
388 * If we can't allocate a new RX buffer, dump 394 * If we can't allocate a new RX buffer, dump
389 * this packet and reuse the old buffer. 395 * this packet and reuse the old buffer.
@@ -393,13 +399,13 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
393 goto repost; 399 goto repost;
394 } 400 }
395 401
396 ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[wr_id].mapping); 402 ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping);
397 memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, sizeof mapping); 403 memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
398 404
399 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 405 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
400 wc->byte_len, wc->slid); 406 wc->byte_len, wc->slid);
401 407
402 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len); 408 skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
403 409
404 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 410 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
405 skb->mac.raw = skb->data; 411 skb->mac.raw = skb->data;
@@ -1193,7 +1199,8 @@ int ipoib_cm_dev_init(struct net_device *dev)
1193 priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; 1199 priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG;
1194 1200
1195 for (i = 0; i < ipoib_recvq_size; ++i) { 1201 for (i = 0; i < ipoib_recvq_size; ++i) {
1196 if (ipoib_cm_alloc_rx_skb(dev, i, priv->cm.srq_ring[i].mapping)) { 1202 if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1,
1203 priv->cm.srq_ring[i].mapping)) {
1197 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 1204 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
1198 ipoib_cm_dev_cleanup(dev); 1205 ipoib_cm_dev_cleanup(dev);
1199 return -ENOMEM; 1206 return -ENOMEM;
@@ -1228,7 +1235,8 @@ void ipoib_cm_dev_cleanup(struct net_device *dev)
1228 return; 1235 return;
1229 for (i = 0; i < ipoib_recvq_size; ++i) 1236 for (i = 0; i < ipoib_recvq_size; ++i)
1230 if (priv->cm.srq_ring[i].skb) { 1237 if (priv->cm.srq_ring[i].skb) {
1231 ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[i].mapping); 1238 ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
1239 priv->cm.srq_ring[i].mapping);
1232 dev_kfree_skb_any(priv->cm.srq_ring[i].skb); 1240 dev_kfree_skb_any(priv->cm.srq_ring[i].skb);
1233 priv->cm.srq_ring[i].skb = NULL; 1241 priv->cm.srq_ring[i].skb = NULL;
1234 } 1242 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 18d27fd352ad..f9dbc6f68145 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -385,7 +385,7 @@ static void path_rec_completion(int status,
385 struct sk_buff *skb; 385 struct sk_buff *skb;
386 unsigned long flags; 386 unsigned long flags;
387 387
388 if (pathrec) 388 if (!status)
389 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n", 389 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
390 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid)); 390 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
391 else 391 else
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b303ce6bc21e..bb2e3d5eee20 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -527,11 +527,9 @@ void ipoib_mcast_join_task(struct work_struct *work)
527 { 527 {
528 struct ib_port_attr attr; 528 struct ib_port_attr attr;
529 529
530 if (!ib_query_port(priv->ca, priv->port, &attr)) { 530 if (!ib_query_port(priv->ca, priv->port, &attr))
531 priv->local_lid = attr.lid; 531 priv->local_lid = attr.lid;
532 priv->local_rate = attr.active_speed * 532 else
533 ib_width_enum_to_int(attr.active_width);
534 } else
535 ipoib_warn(priv, "ib_query_port failed\n"); 533 ipoib_warn(priv, "ib_query_port failed\n");
536 } 534 }
537 535