aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c6
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c44
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c3
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c31
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c51
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c10
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h1
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c64
13 files changed, 158 insertions, 79 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ecff98043589..160ef482712d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1102,9 +1102,7 @@ static u64 fw_vers_string_to_u64(struct iwch_dev *iwch_dev)
1102 char *cp, *next; 1102 char *cp, *next;
1103 unsigned fw_maj, fw_min, fw_mic; 1103 unsigned fw_maj, fw_min, fw_mic;
1104 1104
1105 rtnl_lock();
1106 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1105 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1107 rtnl_unlock();
1108 1106
1109 next = info.fw_version + 1; 1107 next = info.fw_version + 1;
1110 cp = strsep(&next, "."); 1108 cp = strsep(&next, ".");
@@ -1192,9 +1190,7 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, ch
1192 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1190 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1193 1191
1194 PDBG("%s dev 0x%p\n", __func__, dev); 1192 PDBG("%s dev 0x%p\n", __func__, dev);
1195 rtnl_lock();
1196 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1193 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1197 rtnl_unlock();
1198 return sprintf(buf, "%s\n", info.fw_version); 1194 return sprintf(buf, "%s\n", info.fw_version);
1199} 1195}
1200 1196
@@ -1207,9 +1203,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
1207 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1203 struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev;
1208 1204
1209 PDBG("%s dev 0x%p\n", __func__, dev); 1205 PDBG("%s dev 0x%p\n", __func__, dev);
1210 rtnl_lock();
1211 lldev->ethtool_ops->get_drvinfo(lldev, &info); 1206 lldev->ethtool_ops->get_drvinfo(lldev, &info);
1212 rtnl_unlock();
1213 return sprintf(buf, "%s\n", info.driver); 1207 return sprintf(buf, "%s\n", info.driver);
1214} 1208}
1215 1209
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3e4585c2318a..19661b2f0406 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -745,7 +745,6 @@ int iwch_post_zb_read(struct iwch_qp *qhp)
745 wqe->read.rdmaop = T3_READ_REQ; 745 wqe->read.rdmaop = T3_READ_REQ;
746 wqe->read.reserved[0] = 0; 746 wqe->read.reserved[0] = 0;
747 wqe->read.reserved[1] = 0; 747 wqe->read.reserved[1] = 0;
748 wqe->read.reserved[2] = 0;
749 wqe->read.rem_stag = cpu_to_be32(1); 748 wqe->read.rem_stag = cpu_to_be32(1);
750 wqe->read.rem_to = cpu_to_be64(1); 749 wqe->read.rem_to = cpu_to_be64(1);
751 wqe->read.local_stag = cpu_to_be32(1); 750 wqe->read.local_stag = cpu_to_be32(1);
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 4df887af66a5..7fc35cf0cddf 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -163,7 +163,8 @@ struct ehca_mod_qp_parm {
163/* struct for tracking if cqes have been reported to the application */ 163/* struct for tracking if cqes have been reported to the application */
164struct ehca_qmap_entry { 164struct ehca_qmap_entry {
165 u16 app_wr_id; 165 u16 app_wr_id;
166 u16 reported; 166 u8 reported;
167 u8 cqe_req;
167}; 168};
168 169
169struct ehca_queue_map { 170struct ehca_queue_map {
@@ -171,6 +172,7 @@ struct ehca_queue_map {
171 unsigned int entries; 172 unsigned int entries;
172 unsigned int tail; 173 unsigned int tail;
173 unsigned int left_to_poll; 174 unsigned int left_to_poll;
175 unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
174}; 176};
175 177
176struct ehca_qp { 178struct ehca_qp {
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index cb55be04442c..757035ea246f 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -359,36 +359,48 @@ static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
359 *old_attr = new_attr; 359 *old_attr = new_attr;
360} 360}
361 361
362/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
363static int replay_modify_qp(struct ehca_sport *sport)
364{
365 int aqp1_destroyed;
366 unsigned long flags;
367
368 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
369
370 aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
371
372 if (sport->ibqp_sqp[IB_QPT_SMI])
373 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
374 if (!aqp1_destroyed)
375 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
376
377 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
378
379 return aqp1_destroyed;
380}
381
362static void parse_ec(struct ehca_shca *shca, u64 eqe) 382static void parse_ec(struct ehca_shca *shca, u64 eqe)
363{ 383{
364 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); 384 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
365 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); 385 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
366 u8 spec_event; 386 u8 spec_event;
367 struct ehca_sport *sport = &shca->sport[port - 1]; 387 struct ehca_sport *sport = &shca->sport[port - 1];
368 unsigned long flags;
369 388
370 switch (ec) { 389 switch (ec) {
371 case 0x30: /* port availability change */ 390 case 0x30: /* port availability change */
372 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { 391 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
373 int suppress_event; 392 /* only replay modify_qp calls in autodetect mode;
374 /* replay modify_qp for sqps */ 393 * if AQP1 was destroyed, the port is already down
375 spin_lock_irqsave(&sport->mod_sqp_lock, flags); 394 * again and we can drop the event.
376 suppress_event = !sport->ibqp_sqp[IB_QPT_GSI]; 395 */
377 if (sport->ibqp_sqp[IB_QPT_SMI]) 396 if (ehca_nr_ports < 0)
378 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]); 397 if (replay_modify_qp(sport))
379 if (!suppress_event) 398 break;
380 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
381 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
382
383 /* AQP1 was destroyed, ignore this event */
384 if (suppress_event)
385 break;
386 399
387 sport->port_state = IB_PORT_ACTIVE; 400 sport->port_state = IB_PORT_ACTIVE;
388 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, 401 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
389 "is active"); 402 "is active");
390 ehca_query_sma_attr(shca, port, 403 ehca_query_sma_attr(shca, port, &sport->saved_attr);
391 &sport->saved_attr);
392 } else { 404 } else {
393 sport->port_state = IB_PORT_DOWN; 405 sport->port_state = IB_PORT_DOWN;
394 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, 406 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index bb02a86aa526..bec7e0249358 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -994,8 +994,7 @@ static int ehca_mem_notifier(struct notifier_block *nb,
994 if (printk_timed_ratelimit(&ehca_dmem_warn_time, 994 if (printk_timed_ratelimit(&ehca_dmem_warn_time,
995 30 * 1000)) 995 30 * 1000))
996 ehca_gen_err("DMEM operations are not allowed" 996 ehca_gen_err("DMEM operations are not allowed"
997 "as long as an ehca adapter is" 997 "in conjunction with eHCA");
998 "attached to the LPAR");
999 return NOTIFY_BAD; 998 return NOTIFY_BAD;
1000 } 999 }
1001 } 1000 }
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 4d54b9f64567..cadbf0cdd910 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -435,9 +435,13 @@ static void reset_queue_map(struct ehca_queue_map *qmap)
435{ 435{
436 int i; 436 int i;
437 437
438 qmap->tail = 0; 438 qmap->tail = qmap->entries - 1;
439 for (i = 0; i < qmap->entries; i++) 439 qmap->left_to_poll = 0;
440 qmap->next_wqe_idx = 0;
441 for (i = 0; i < qmap->entries; i++) {
440 qmap->map[i].reported = 1; 442 qmap->map[i].reported = 1;
443 qmap->map[i].cqe_req = 0;
444 }
441} 445}
442 446
443/* 447/*
@@ -860,6 +864,11 @@ static struct ehca_qp *internal_create_qp(
860 if (qp_type == IB_QPT_GSI) { 864 if (qp_type == IB_QPT_GSI) {
861 h_ret = ehca_define_sqp(shca, my_qp, init_attr); 865 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
862 if (h_ret != H_SUCCESS) { 866 if (h_ret != H_SUCCESS) {
867 kfree(my_qp->mod_qp_parm);
868 my_qp->mod_qp_parm = NULL;
869 /* the QP pointer is no longer valid */
870 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
871 NULL;
863 ret = ehca2ib_return_code(h_ret); 872 ret = ehca2ib_return_code(h_ret);
864 goto create_qp_exit6; 873 goto create_qp_exit6;
865 } 874 }
@@ -1116,6 +1125,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1116 void *wqe_v; 1125 void *wqe_v;
1117 u64 q_ofs; 1126 u64 q_ofs;
1118 u32 wqe_idx; 1127 u32 wqe_idx;
1128 unsigned int tail_idx;
1119 1129
1120 /* convert real to abs address */ 1130 /* convert real to abs address */
1121 wqe_p = wqe_p & (~(1UL << 63)); 1131 wqe_p = wqe_p & (~(1UL << 63));
@@ -1128,12 +1138,17 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1128 return -EFAULT; 1138 return -EFAULT;
1129 } 1139 }
1130 1140
1141 tail_idx = (qmap->tail + 1) % qmap->entries;
1131 wqe_idx = q_ofs / ipz_queue->qe_size; 1142 wqe_idx = q_ofs / ipz_queue->qe_size;
1132 if (wqe_idx < qmap->tail)
1133 qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx;
1134 else
1135 qmap->left_to_poll = wqe_idx - qmap->tail;
1136 1143
1144 /* check all processed wqes, whether a cqe is requested or not */
1145 while (tail_idx != wqe_idx) {
1146 if (qmap->map[tail_idx].cqe_req)
1147 qmap->left_to_poll++;
1148 tail_idx = (tail_idx + 1) % qmap->entries;
1149 }
1150 /* save index in queue, where we have to start flushing */
1151 qmap->next_wqe_idx = wqe_idx;
1137 return 0; 1152 return 0;
1138} 1153}
1139 1154
@@ -1180,10 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1180 } else { 1195 } else {
1181 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); 1196 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1182 my_qp->sq_map.left_to_poll = 0; 1197 my_qp->sq_map.left_to_poll = 0;
1198 my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
1199 my_qp->sq_map.entries;
1183 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); 1200 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1184 1201
1185 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); 1202 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1186 my_qp->rq_map.left_to_poll = 0; 1203 my_qp->rq_map.left_to_poll = 0;
1204 my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
1205 my_qp->rq_map.entries;
1187 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); 1206 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1188 } 1207 }
1189 1208
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 64928079eafa..00a648f4316c 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -179,6 +179,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
179 179
180 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id); 180 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
181 qmap_entry->reported = 0; 181 qmap_entry->reported = 0;
182 qmap_entry->cqe_req = 0;
182 183
183 switch (send_wr->opcode) { 184 switch (send_wr->opcode) {
184 case IB_WR_SEND: 185 case IB_WR_SEND:
@@ -203,8 +204,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
203 204
204 if ((send_wr->send_flags & IB_SEND_SIGNALED || 205 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR) 206 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
206 && !hidden) 207 && !hidden) {
207 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; 208 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
209 qmap_entry->cqe_req = 1;
210 }
208 211
209 if (send_wr->opcode == IB_WR_SEND_WITH_IMM || 212 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
210 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 213 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
@@ -569,6 +572,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
569 qmap_entry = &my_qp->rq_map.map[rq_map_idx]; 572 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
570 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id); 573 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
571 qmap_entry->reported = 0; 574 qmap_entry->reported = 0;
575 qmap_entry->cqe_req = 1;
572 576
573 wqe_cnt++; 577 wqe_cnt++;
574 } /* eof for cur_recv_wr */ 578 } /* eof for cur_recv_wr */
@@ -706,27 +710,34 @@ repoll:
706 goto repoll; 710 goto repoll;
707 wc->qp = &my_qp->ib_qp; 711 wc->qp = &my_qp->ib_qp;
708 712
713 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
714 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
715 /* We got a send completion. */
716 qmap = &my_qp->sq_map;
717 else
718 /* We got a receive completion. */
719 qmap = &my_qp->rq_map;
720
721 /* advance the tail pointer */
722 qmap->tail = qmap_tail_idx;
723
709 if (is_error) { 724 if (is_error) {
710 /* 725 /*
711 * set left_to_poll to 0 because in error state, we will not 726 * set left_to_poll to 0 because in error state, we will not
712 * get any additional CQEs 727 * get any additional CQEs
713 */ 728 */
714 ehca_add_to_err_list(my_qp, 1); 729 my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
730 my_qp->sq_map.entries;
715 my_qp->sq_map.left_to_poll = 0; 731 my_qp->sq_map.left_to_poll = 0;
732 ehca_add_to_err_list(my_qp, 1);
716 733
734 my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
735 my_qp->rq_map.entries;
736 my_qp->rq_map.left_to_poll = 0;
717 if (HAS_RQ(my_qp)) 737 if (HAS_RQ(my_qp))
718 ehca_add_to_err_list(my_qp, 0); 738 ehca_add_to_err_list(my_qp, 0);
719 my_qp->rq_map.left_to_poll = 0;
720 } 739 }
721 740
722 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
723 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
724 /* We got a send completion. */
725 qmap = &my_qp->sq_map;
726 else
727 /* We got a receive completion. */
728 qmap = &my_qp->rq_map;
729
730 qmap_entry = &qmap->map[qmap_tail_idx]; 741 qmap_entry = &qmap->map[qmap_tail_idx];
731 if (qmap_entry->reported) { 742 if (qmap_entry->reported) {
732 ehca_warn(cq->device, "Double cqe on qp_num=%#x", 743 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
@@ -738,10 +749,6 @@ repoll:
738 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id); 749 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
739 qmap_entry->reported = 1; 750 qmap_entry->reported = 1;
740 751
741 /* this is a proper completion, we need to advance the tail pointer */
742 if (++qmap->tail == qmap->entries)
743 qmap->tail = 0;
744
745 /* if left_to_poll is decremented to 0, add the QP to the error list */ 752 /* if left_to_poll is decremented to 0, add the QP to the error list */
746 if (qmap->left_to_poll > 0) { 753 if (qmap->left_to_poll > 0) {
747 qmap->left_to_poll--; 754 qmap->left_to_poll--;
@@ -805,13 +812,14 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
805 else 812 else
806 qmap = &my_qp->rq_map; 813 qmap = &my_qp->rq_map;
807 814
808 qmap_entry = &qmap->map[qmap->tail]; 815 qmap_entry = &qmap->map[qmap->next_wqe_idx];
809 816
810 while ((nr < num_entries) && (qmap_entry->reported == 0)) { 817 while ((nr < num_entries) && (qmap_entry->reported == 0)) {
811 /* generate flush CQE */ 818 /* generate flush CQE */
819
812 memset(wc, 0, sizeof(*wc)); 820 memset(wc, 0, sizeof(*wc));
813 821
814 offset = qmap->tail * ipz_queue->qe_size; 822 offset = qmap->next_wqe_idx * ipz_queue->qe_size;
815 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); 823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
816 if (!wqe) { 824 if (!wqe) {
817 ehca_err(cq->device, "Invalid wqe offset=%#lx on " 825 ehca_err(cq->device, "Invalid wqe offset=%#lx on "
@@ -850,11 +858,12 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
850 858
851 wc->qp = &my_qp->ib_qp; 859 wc->qp = &my_qp->ib_qp;
852 860
853 /* mark as reported and advance tail pointer */ 861 /* mark as reported and advance next_wqe pointer */
854 qmap_entry->reported = 1; 862 qmap_entry->reported = 1;
855 if (++qmap->tail == qmap->entries) 863 qmap->next_wqe_idx++;
856 qmap->tail = 0; 864 if (qmap->next_wqe_idx == qmap->entries)
857 qmap_entry = &qmap->map[qmap->tail]; 865 qmap->next_wqe_idx = 0;
866 qmap_entry = &qmap->map[qmap->next_wqe_idx];
858 867
859 wc++; nr++; 868 wc++; nr++;
860 } 869 }
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index fc0f6d9e6030..2296832f94da 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -156,7 +156,7 @@ bail:
156/** 156/**
157 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE 157 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
158 * @qp: the QP 158 * @qp: the QP
159 * @wr_id_only: update wr_id only, not SGEs 159 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
160 * 160 *
161 * Return 0 if no RWQE is available, otherwise return 1. 161 * Return 0 if no RWQE is available, otherwise return 1.
162 * 162 *
@@ -173,8 +173,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
173 u32 tail; 173 u32 tail;
174 int ret; 174 int ret;
175 175
176 qp->r_sge.sg_list = qp->r_sg_list;
177
178 if (qp->ibqp.srq) { 176 if (qp->ibqp.srq) {
179 srq = to_isrq(qp->ibqp.srq); 177 srq = to_isrq(qp->ibqp.srq);
180 handler = srq->ibsrq.event_handler; 178 handler = srq->ibsrq.event_handler;
@@ -206,8 +204,10 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
206 wqe = get_rwqe_ptr(rq, tail); 204 wqe = get_rwqe_ptr(rq, tail);
207 if (++tail >= rq->size) 205 if (++tail >= rq->size)
208 tail = 0; 206 tail = 0;
209 } while (!wr_id_only && !ipath_init_sge(qp, wqe, &qp->r_len, 207 if (wr_id_only)
210 &qp->r_sge)); 208 break;
209 qp->r_sge.sg_list = qp->r_sg_list;
210 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
211 qp->r_wr_id = wqe->wr_id; 211 qp->r_wr_id = wqe->wr_id;
212 wq->tail = tail; 212 wq->tail = tail;
213 213
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index d0866a3636e2..18308494a195 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -343,6 +343,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
343{ 343{
344 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); 344 struct mlx4_ib_dev *dev = to_mdev(ibcq->device);
345 struct mlx4_ib_cq *cq = to_mcq(ibcq); 345 struct mlx4_ib_cq *cq = to_mcq(ibcq);
346 struct mlx4_mtt mtt;
346 int outst_cqe; 347 int outst_cqe;
347 int err; 348 int err;
348 349
@@ -376,10 +377,13 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
376 goto out; 377 goto out;
377 } 378 }
378 379
380 mtt = cq->buf.mtt;
381
379 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); 382 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt);
380 if (err) 383 if (err)
381 goto err_buf; 384 goto err_buf;
382 385
386 mlx4_mtt_cleanup(dev->dev, &mtt);
383 if (ibcq->uobject) { 387 if (ibcq->uobject) {
384 cq->buf = cq->resize_buf->buf; 388 cq->buf = cq->resize_buf->buf;
385 cq->ibcq.cqe = cq->resize_buf->cqe; 389 cq->ibcq.cqe = cq->resize_buf->cqe;
@@ -406,6 +410,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
406 goto out; 410 goto out;
407 411
408err_buf: 412err_buf:
413 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt);
409 if (!ibcq->uobject) 414 if (!ibcq->uobject)
410 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, 415 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf,
411 cq->resize_buf->cqe); 416 cq->resize_buf->cqe);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index 87f5c5a87b98..8e4d26d56a95 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -205,6 +205,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,
205 goto err_mr; 205 goto err_mr;
206 206
207 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key; 207 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
208 mr->umem = NULL;
208 209
209 return &mr->ibmr; 210 return &mr->ibmr;
210 211
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index a2b04d62b1a4..aa1dc41f04c8 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -95,6 +95,10 @@ unsigned int wqm_quanta = 0x10000;
95module_param(wqm_quanta, int, 0644); 95module_param(wqm_quanta, int, 0644);
96MODULE_PARM_DESC(wqm_quanta, "WQM quanta"); 96MODULE_PARM_DESC(wqm_quanta, "WQM quanta");
97 97
98static unsigned int limit_maxrdreqsz;
99module_param(limit_maxrdreqsz, bool, 0644);
100MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
101
98LIST_HEAD(nes_adapter_list); 102LIST_HEAD(nes_adapter_list);
99static LIST_HEAD(nes_dev_list); 103static LIST_HEAD(nes_dev_list);
100 104
@@ -588,6 +592,18 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
588 nesdev->nesadapter->port_count; 592 nesdev->nesadapter->port_count;
589 } 593 }
590 594
595 if ((limit_maxrdreqsz ||
596 ((nesdev->nesadapter->phy_type[0] == NES_PHY_TYPE_GLADIUS) &&
597 (hw_rev == NE020_REV1))) &&
598 (pcie_get_readrq(pcidev) > 256)) {
599 if (pcie_set_readrq(pcidev, 256))
600 printk(KERN_ERR PFX "Unable to set max read request"
601 " to 256 bytes\n");
602 else
603 nes_debug(NES_DBG_INIT, "Max read request size set"
604 " to 256 bytes\n");
605 }
606
591 tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev); 607 tasklet_init(&nesdev->dpc_tasklet, nes_dpc, (unsigned long)nesdev);
592 608
593 /* bring up the Control QP */ 609 /* bring up the Control QP */
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 610b9d859597..bc0b4de04450 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -40,6 +40,7 @@
40#define NES_PHY_TYPE_ARGUS 4 40#define NES_PHY_TYPE_ARGUS 4
41#define NES_PHY_TYPE_PUMA_1G 5 41#define NES_PHY_TYPE_PUMA_1G 5
42#define NES_PHY_TYPE_PUMA_10G 6 42#define NES_PHY_TYPE_PUMA_10G 6
43#define NES_PHY_TYPE_GLADIUS 7
43 44
44#define NES_MULTICAST_PF_MAX 8 45#define NES_MULTICAST_PF_MAX 8
45 46
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 932e56fcf774..d36c9a0bf1bb 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -220,14 +220,14 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
220 if (nesqp->ibqp_state > IB_QPS_RTS) 220 if (nesqp->ibqp_state > IB_QPS_RTS)
221 return -EINVAL; 221 return -EINVAL;
222 222
223 spin_lock_irqsave(&nesqp->lock, flags); 223 spin_lock_irqsave(&nesqp->lock, flags);
224 224
225 head = nesqp->hwqp.sq_head; 225 head = nesqp->hwqp.sq_head;
226 qsize = nesqp->hwqp.sq_tail; 226 qsize = nesqp->hwqp.sq_tail;
227 227
228 /* Check for SQ overflow */ 228 /* Check for SQ overflow */
229 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { 229 if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
230 spin_unlock_irqrestore(&nesqp->lock, flags); 230 spin_unlock_irqrestore(&nesqp->lock, flags);
231 return -EINVAL; 231 return -EINVAL;
232 } 232 }
233 233
@@ -269,7 +269,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
269 nes_write32(nesdev->regs+NES_WQE_ALLOC, 269 nes_write32(nesdev->regs+NES_WQE_ALLOC,
270 (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id); 270 (1 << 24) | 0x00800000 | nesqp->hwqp.qp_id);
271 271
272 spin_unlock_irqrestore(&nesqp->lock, flags); 272 spin_unlock_irqrestore(&nesqp->lock, flags);
273 273
274 return 0; 274 return 0;
275} 275}
@@ -349,7 +349,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
349 if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) { 349 if (nesfmr->nesmr.pbls_used > nesadapter->free_4kpbl) {
350 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 350 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
351 ret = -ENOMEM; 351 ret = -ENOMEM;
352 goto failed_vpbl_alloc; 352 goto failed_vpbl_avail;
353 } else { 353 } else {
354 nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used; 354 nesadapter->free_4kpbl -= nesfmr->nesmr.pbls_used;
355 } 355 }
@@ -357,7 +357,7 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
357 if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) { 357 if (nesfmr->nesmr.pbls_used > nesadapter->free_256pbl) {
358 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 358 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
359 ret = -ENOMEM; 359 ret = -ENOMEM;
360 goto failed_vpbl_alloc; 360 goto failed_vpbl_avail;
361 } else { 361 } else {
362 nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used; 362 nesadapter->free_256pbl -= nesfmr->nesmr.pbls_used;
363 } 363 }
@@ -391,14 +391,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
391 goto failed_vpbl_alloc; 391 goto failed_vpbl_alloc;
392 } 392 }
393 393
394 nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_KERNEL); 394 nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
395 nesfmr->root_vpbl.leaf_vpbl = kzalloc(sizeof(*nesfmr->root_vpbl.leaf_vpbl)*1024, GFP_ATOMIC);
395 if (!nesfmr->root_vpbl.leaf_vpbl) { 396 if (!nesfmr->root_vpbl.leaf_vpbl) {
396 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags); 397 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
397 ret = -ENOMEM; 398 ret = -ENOMEM;
398 goto failed_leaf_vpbl_alloc; 399 goto failed_leaf_vpbl_alloc;
399 } 400 }
400 401
401 nesfmr->leaf_pbl_cnt = nesfmr->nesmr.pbls_used-1;
402 nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p" 402 nes_debug(NES_DBG_MR, "two level pbl, root_vpbl.pbl_vbase=%p"
403 " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n", 403 " leaf_pbl_cnt=%d root_vpbl.leaf_vpbl=%p\n",
404 nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl); 404 nesfmr->root_vpbl.pbl_vbase, nesfmr->leaf_pbl_cnt, nesfmr->root_vpbl.leaf_vpbl);
@@ -519,6 +519,16 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
519 nesfmr->root_vpbl.pbl_pbase); 519 nesfmr->root_vpbl.pbl_pbase);
520 520
521 failed_vpbl_alloc: 521 failed_vpbl_alloc:
522 if (nesfmr->nesmr.pbls_used != 0) {
523 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
524 if (nesfmr->nesmr.pbl_4k)
525 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
526 else
527 nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
528 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
529 }
530
531failed_vpbl_avail:
522 kfree(nesfmr); 532 kfree(nesfmr);
523 533
524 failed_fmr_alloc: 534 failed_fmr_alloc:
@@ -534,18 +544,14 @@ static struct ib_fmr *nes_alloc_fmr(struct ib_pd *ibpd,
534 */ 544 */
535static int nes_dealloc_fmr(struct ib_fmr *ibfmr) 545static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
536{ 546{
547 unsigned long flags;
537 struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr); 548 struct nes_mr *nesmr = to_nesmr_from_ibfmr(ibfmr);
538 struct nes_fmr *nesfmr = to_nesfmr(nesmr); 549 struct nes_fmr *nesfmr = to_nesfmr(nesmr);
539 struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device); 550 struct nes_vnic *nesvnic = to_nesvnic(ibfmr->device);
540 struct nes_device *nesdev = nesvnic->nesdev; 551 struct nes_device *nesdev = nesvnic->nesdev;
541 struct nes_mr temp_nesmr = *nesmr; 552 struct nes_adapter *nesadapter = nesdev->nesadapter;
542 int i = 0; 553 int i = 0;
543 554
544 temp_nesmr.ibmw.device = ibfmr->device;
545 temp_nesmr.ibmw.pd = ibfmr->pd;
546 temp_nesmr.ibmw.rkey = ibfmr->rkey;
547 temp_nesmr.ibmw.uobject = NULL;
548
549 /* free the resources */ 555 /* free the resources */
550 if (nesfmr->leaf_pbl_cnt == 0) { 556 if (nesfmr->leaf_pbl_cnt == 0) {
551 /* single PBL case */ 557 /* single PBL case */
@@ -561,8 +567,24 @@ static int nes_dealloc_fmr(struct ib_fmr *ibfmr)
561 pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase, 567 pci_free_consistent(nesdev->pcidev, 8192, nesfmr->root_vpbl.pbl_vbase,
562 nesfmr->root_vpbl.pbl_pbase); 568 nesfmr->root_vpbl.pbl_pbase);
563 } 569 }
570 nesmr->ibmw.device = ibfmr->device;
571 nesmr->ibmw.pd = ibfmr->pd;
572 nesmr->ibmw.rkey = ibfmr->rkey;
573 nesmr->ibmw.uobject = NULL;
564 574
565 return nes_dealloc_mw(&temp_nesmr.ibmw); 575 if (nesfmr->nesmr.pbls_used != 0) {
576 spin_lock_irqsave(&nesadapter->pbl_lock, flags);
577 if (nesfmr->nesmr.pbl_4k) {
578 nesadapter->free_4kpbl += nesfmr->nesmr.pbls_used;
579 WARN_ON(nesadapter->free_4kpbl > nesadapter->max_4kpbl);
580 } else {
581 nesadapter->free_256pbl += nesfmr->nesmr.pbls_used;
582 WARN_ON(nesadapter->free_256pbl > nesadapter->max_256pbl);
583 }
584 spin_unlock_irqrestore(&nesadapter->pbl_lock, flags);
585 }
586
587 return nes_dealloc_mw(&nesmr->ibmw);
566} 588}
567 589
568 590
@@ -1595,7 +1617,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
1595 nes_ucontext->mcrqf = req.mcrqf; 1617 nes_ucontext->mcrqf = req.mcrqf;
1596 if (nes_ucontext->mcrqf) { 1618 if (nes_ucontext->mcrqf) {
1597 if (nes_ucontext->mcrqf & 0x80000000) 1619 if (nes_ucontext->mcrqf & 0x80000000)
1598 nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 12 + (nes_ucontext->mcrqf & 0xf) - 1; 1620 nescq->hw_cq.cq_number = nesvnic->nic.qp_id + 28 + 2 * ((nes_ucontext->mcrqf & 0xf) - 1);
1599 else if (nes_ucontext->mcrqf & 0x40000000) 1621 else if (nes_ucontext->mcrqf & 0x40000000)
1600 nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff; 1622 nescq->hw_cq.cq_number = nes_ucontext->mcrqf & 0xffff;
1601 else 1623 else
@@ -3212,7 +3234,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3212 if (nesqp->ibqp_state > IB_QPS_RTS) 3234 if (nesqp->ibqp_state > IB_QPS_RTS)
3213 return -EINVAL; 3235 return -EINVAL;
3214 3236
3215 spin_lock_irqsave(&nesqp->lock, flags); 3237 spin_lock_irqsave(&nesqp->lock, flags);
3216 3238
3217 head = nesqp->hwqp.sq_head; 3239 head = nesqp->hwqp.sq_head;
3218 3240
@@ -3337,7 +3359,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3337 (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id); 3359 (counter << 24) | 0x00800000 | nesqp->hwqp.qp_id);
3338 } 3360 }
3339 3361
3340 spin_unlock_irqrestore(&nesqp->lock, flags); 3362 spin_unlock_irqrestore(&nesqp->lock, flags);
3341 3363
3342 if (err) 3364 if (err)
3343 *bad_wr = ib_wr; 3365 *bad_wr = ib_wr;
@@ -3368,7 +3390,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3368 if (nesqp->ibqp_state > IB_QPS_RTS) 3390 if (nesqp->ibqp_state > IB_QPS_RTS)
3369 return -EINVAL; 3391 return -EINVAL;
3370 3392
3371 spin_lock_irqsave(&nesqp->lock, flags); 3393 spin_lock_irqsave(&nesqp->lock, flags);
3372 3394
3373 head = nesqp->hwqp.rq_head; 3395 head = nesqp->hwqp.rq_head;
3374 3396
@@ -3421,7 +3443,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
3421 nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id); 3443 nes_write32(nesdev->regs+NES_WQE_ALLOC, (counter<<24) | nesqp->hwqp.qp_id);
3422 } 3444 }
3423 3445
3424 spin_unlock_irqrestore(&nesqp->lock, flags); 3446 spin_unlock_irqrestore(&nesqp->lock, flags);
3425 3447
3426 if (err) 3448 if (err)
3427 *bad_wr = ib_wr; 3449 *bad_wr = ib_wr;
@@ -3453,7 +3475,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3453 3475
3454 nes_debug(NES_DBG_CQ, "\n"); 3476 nes_debug(NES_DBG_CQ, "\n");
3455 3477
3456 spin_lock_irqsave(&nescq->lock, flags); 3478 spin_lock_irqsave(&nescq->lock, flags);
3457 3479
3458 head = nescq->hw_cq.cq_head; 3480 head = nescq->hw_cq.cq_head;
3459 cq_size = nescq->hw_cq.cq_size; 3481 cq_size = nescq->hw_cq.cq_size;
@@ -3562,7 +3584,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
3562 nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n", 3584 nes_debug(NES_DBG_CQ, "Reporting %u completions for CQ%u.\n",
3563 cqe_count, nescq->hw_cq.cq_number); 3585 cqe_count, nescq->hw_cq.cq_number);
3564 3586
3565 spin_unlock_irqrestore(&nescq->lock, flags); 3587 spin_unlock_irqrestore(&nescq->lock, flags);
3566 3588
3567 return cqe_count; 3589 return cqe_count;
3568} 3590}