diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 20:08:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 20:08:02 -0400 |
commit | 3cc08fc35db75b059118626c30b60b0f56583802 (patch) | |
tree | 704d71199c8be8d5b822ca424675291e8cec7bde /drivers/infiniband | |
parent | faa38b5e0e092914764cdba9f83d31a3f794d182 (diff) | |
parent | 03b37ecdb3975f09832747600853d3818a50eda3 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (42 commits)
IB/qib: Add missing <linux/slab.h> include
IB/ehca: Drop unnecessary NULL test
RDMA/nes: Fix confusing if statement indentation
IB/ehca: Init irq tasklet before irq can happen
RDMA/nes: Fix misindented code
RDMA/nes: Fix showing wqm_quanta
RDMA/nes: Get rid of "set but not used" variables
RDMA/nes: Read firmware version from correct place
IB/srp: Export req_lim via sysfs
IB/srp: Make receive buffer handling more robust
IB/srp: Use print_hex_dump()
IB: Rename RAW_ETY to RAW_ETHERTYPE
RDMA/nes: Fix two sparse warnings
RDMA/cxgb3: Make needlessly global iwch_l2t_send() static
IB/iser: Make needlessly global iser_alloc_rx_descriptors() static
RDMA/cxgb4: Add timeouts when waiting for FW responses
IB/qib: Fix race between qib_error_qp() and receive packet processing
IB/qib: Limit the number of packets processed per interrupt
IB/qib: Allow writes to the diag_counters to be able to clear them
IB/qib: Set cfgctxts to number of CPUs by default
...
Diffstat (limited to 'drivers/infiniband')
46 files changed, 575 insertions, 494 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ad63b79afac1..64e0903091a8 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -2409,10 +2409,12 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id, | |||
2409 | msg_response = CM_MSG_RESPONSE_REP; | 2409 | msg_response = CM_MSG_RESPONSE_REP; |
2410 | break; | 2410 | break; |
2411 | case IB_CM_ESTABLISHED: | 2411 | case IB_CM_ESTABLISHED: |
2412 | cm_state = cm_id->state; | 2412 | if (cm_id->lap_state == IB_CM_LAP_RCVD) { |
2413 | lap_state = IB_CM_MRA_LAP_SENT; | 2413 | cm_state = cm_id->state; |
2414 | msg_response = CM_MSG_RESPONSE_OTHER; | 2414 | lap_state = IB_CM_MRA_LAP_SENT; |
2415 | break; | 2415 | msg_response = CM_MSG_RESPONSE_OTHER; |
2416 | break; | ||
2417 | } | ||
2416 | default: | 2418 | default: |
2417 | ret = -EINVAL; | 2419 | ret = -EINVAL; |
2418 | goto error1; | 2420 | goto error1; |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 6babb72b39fc..5fa856909511 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -1085,7 +1085,6 @@ err_cdev: | |||
1085 | static void ib_umad_kill_port(struct ib_umad_port *port) | 1085 | static void ib_umad_kill_port(struct ib_umad_port *port) |
1086 | { | 1086 | { |
1087 | struct ib_umad_file *file; | 1087 | struct ib_umad_file *file; |
1088 | int already_dead; | ||
1089 | int id; | 1088 | int id; |
1090 | 1089 | ||
1091 | dev_set_drvdata(port->dev, NULL); | 1090 | dev_set_drvdata(port->dev, NULL); |
@@ -1103,7 +1102,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port) | |||
1103 | 1102 | ||
1104 | list_for_each_entry(file, &port->file_list, port_list) { | 1103 | list_for_each_entry(file, &port->file_list, port_list) { |
1105 | mutex_lock(&file->mutex); | 1104 | mutex_lock(&file->mutex); |
1106 | already_dead = file->agents_dead; | ||
1107 | file->agents_dead = 1; | 1105 | file->agents_dead = 1; |
1108 | mutex_unlock(&file->mutex); | 1106 | mutex_unlock(&file->mutex); |
1109 | 1107 | ||
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index a7da9be43e61..e0fa22238715 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -310,8 +310,8 @@ EXPORT_SYMBOL(ib_create_qp); | |||
310 | 310 | ||
311 | static const struct { | 311 | static const struct { |
312 | int valid; | 312 | int valid; |
313 | enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETY + 1]; | 313 | enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1]; |
314 | enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETY + 1]; | 314 | enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1]; |
315 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { | 315 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
316 | [IB_QPS_RESET] = { | 316 | [IB_QPS_RESET] = { |
317 | [IB_QPS_RESET] = { .valid = 1 }, | 317 | [IB_QPS_RESET] = { .valid = 1 }, |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index abd683ea326d..d88077a21994 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -137,7 +137,7 @@ static void stop_ep_timer(struct iwch_ep *ep) | |||
137 | put_ep(&ep->com); | 137 | put_ep(&ep->com); |
138 | } | 138 | } |
139 | 139 | ||
140 | int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e) | 140 | static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e) |
141 | { | 141 | { |
142 | int error = 0; | 142 | int error = 0; |
143 | struct cxio_rdev *rdev; | 143 | struct cxio_rdev *rdev; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 9bbb65bba67e..c64d27bf2c15 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -371,7 +371,7 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
371 | } | 371 | } |
372 | num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, | 372 | num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, |
373 | qhp->wq.sq_size_log2); | 373 | qhp->wq.sq_size_log2); |
374 | if (num_wrs <= 0) { | 374 | if (num_wrs == 0) { |
375 | spin_unlock_irqrestore(&qhp->lock, flag); | 375 | spin_unlock_irqrestore(&qhp->lock, flag); |
376 | err = -ENOMEM; | 376 | err = -ENOMEM; |
377 | goto out; | 377 | goto out; |
@@ -554,7 +554,7 @@ int iwch_bind_mw(struct ib_qp *qp, | |||
554 | } | 554 | } |
555 | num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, | 555 | num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, |
556 | qhp->wq.sq_size_log2); | 556 | qhp->wq.sq_size_log2); |
557 | if ((num_wrs) <= 0) { | 557 | if (num_wrs == 0) { |
558 | spin_unlock_irqrestore(&qhp->lock, flag); | 558 | spin_unlock_irqrestore(&qhp->lock, flag); |
559 | return -ENOMEM; | 559 | return -ENOMEM; |
560 | } | 560 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 8c9b483a0d93..32d352a88d50 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -61,6 +61,10 @@ static char *states[] = { | |||
61 | NULL, | 61 | NULL, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static int dack_mode; | ||
65 | module_param(dack_mode, int, 0644); | ||
66 | MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); | ||
67 | |||
64 | int c4iw_max_read_depth = 8; | 68 | int c4iw_max_read_depth = 8; |
65 | module_param(c4iw_max_read_depth, int, 0644); | 69 | module_param(c4iw_max_read_depth, int, 0644); |
66 | MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); | 70 | MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); |
@@ -469,11 +473,12 @@ static int send_connect(struct c4iw_ep *ep) | |||
469 | __func__); | 473 | __func__); |
470 | return -ENOMEM; | 474 | return -ENOMEM; |
471 | } | 475 | } |
472 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx); | 476 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
473 | 477 | ||
474 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | 478 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); |
475 | wscale = compute_wscale(rcv_win); | 479 | wscale = compute_wscale(rcv_win); |
476 | opt0 = KEEP_ALIVE(1) | | 480 | opt0 = KEEP_ALIVE(1) | |
481 | DELACK(1) | | ||
477 | WND_SCALE(wscale) | | 482 | WND_SCALE(wscale) | |
478 | MSS_IDX(mtu_idx) | | 483 | MSS_IDX(mtu_idx) | |
479 | L2T_IDX(ep->l2t->idx) | | 484 | L2T_IDX(ep->l2t->idx) | |
@@ -780,11 +785,11 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) | |||
780 | event.private_data_len = ep->plen; | 785 | event.private_data_len = ep->plen; |
781 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); | 786 | event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); |
782 | } | 787 | } |
783 | if (ep->com.cm_id) { | 788 | |
784 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, | 789 | PDBG("%s ep %p tid %u status %d\n", __func__, ep, |
785 | ep->hwtid, status); | 790 | ep->hwtid, status); |
786 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); | 791 | ep->com.cm_id->event_handler(ep->com.cm_id, &event); |
787 | } | 792 | |
788 | if (status < 0) { | 793 | if (status < 0) { |
789 | ep->com.cm_id->rem_ref(ep->com.cm_id); | 794 | ep->com.cm_id->rem_ref(ep->com.cm_id); |
790 | ep->com.cm_id = NULL; | 795 | ep->com.cm_id = NULL; |
@@ -845,8 +850,10 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |||
845 | INIT_TP_WR(req, ep->hwtid); | 850 | INIT_TP_WR(req, ep->hwtid); |
846 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, | 851 | OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, |
847 | ep->hwtid)); | 852 | ep->hwtid)); |
848 | req->credit_dack = cpu_to_be32(credits); | 853 | req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | |
849 | set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx); | 854 | F_RX_DACK_CHANGE | |
855 | V_RX_DACK_MODE(dack_mode)); | ||
856 | set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); | ||
850 | c4iw_ofld_send(&ep->com.dev->rdev, skb); | 857 | c4iw_ofld_send(&ep->com.dev->rdev, skb); |
851 | return credits; | 858 | return credits; |
852 | } | 859 | } |
@@ -1264,6 +1271,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, | |||
1264 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | 1271 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); |
1265 | wscale = compute_wscale(rcv_win); | 1272 | wscale = compute_wscale(rcv_win); |
1266 | opt0 = KEEP_ALIVE(1) | | 1273 | opt0 = KEEP_ALIVE(1) | |
1274 | DELACK(1) | | ||
1267 | WND_SCALE(wscale) | | 1275 | WND_SCALE(wscale) | |
1268 | MSS_IDX(mtu_idx) | | 1276 | MSS_IDX(mtu_idx) | |
1269 | L2T_IDX(ep->l2t->idx) | | 1277 | L2T_IDX(ep->l2t->idx) | |
@@ -1287,7 +1295,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, | |||
1287 | ep->hwtid)); | 1295 | ep->hwtid)); |
1288 | rpl->opt0 = cpu_to_be64(opt0); | 1296 | rpl->opt0 = cpu_to_be64(opt0); |
1289 | rpl->opt2 = cpu_to_be32(opt2); | 1297 | rpl->opt2 = cpu_to_be32(opt2); |
1290 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx); | 1298 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
1291 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 1299 | c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
1292 | 1300 | ||
1293 | return; | 1301 | return; |
@@ -1344,7 +1352,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1344 | u16 rss_qid; | 1352 | u16 rss_qid; |
1345 | u32 mtu; | 1353 | u32 mtu; |
1346 | int step; | 1354 | int step; |
1347 | int txq_idx; | 1355 | int txq_idx, ctrlq_idx; |
1348 | 1356 | ||
1349 | parent_ep = lookup_stid(t, stid); | 1357 | parent_ep = lookup_stid(t, stid); |
1350 | PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); | 1358 | PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); |
@@ -1376,6 +1384,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1376 | smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; | 1384 | smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; |
1377 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; | 1385 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; |
1378 | txq_idx = cxgb4_port_idx(pdev) * step; | 1386 | txq_idx = cxgb4_port_idx(pdev) * step; |
1387 | ctrlq_idx = cxgb4_port_idx(pdev); | ||
1379 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 1388 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
1380 | rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; | 1389 | rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step]; |
1381 | dev_put(pdev); | 1390 | dev_put(pdev); |
@@ -1387,6 +1396,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1387 | smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1; | 1396 | smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1; |
1388 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; | 1397 | step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan; |
1389 | txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; | 1398 | txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step; |
1399 | ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev); | ||
1390 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; | 1400 | step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; |
1391 | rss_qid = dev->rdev.lldi.rxq_ids[ | 1401 | rss_qid = dev->rdev.lldi.rxq_ids[ |
1392 | cxgb4_port_idx(dst->neighbour->dev) * step]; | 1402 | cxgb4_port_idx(dst->neighbour->dev) * step]; |
@@ -1426,6 +1436,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1426 | child_ep->rss_qid = rss_qid; | 1436 | child_ep->rss_qid = rss_qid; |
1427 | child_ep->mtu = mtu; | 1437 | child_ep->mtu = mtu; |
1428 | child_ep->txq_idx = txq_idx; | 1438 | child_ep->txq_idx = txq_idx; |
1439 | child_ep->ctrlq_idx = ctrlq_idx; | ||
1429 | 1440 | ||
1430 | PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, | 1441 | PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, |
1431 | tx_chan, smac_idx, rss_qid); | 1442 | tx_chan, smac_idx, rss_qid); |
@@ -1473,8 +1484,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1473 | int closing = 0; | 1484 | int closing = 0; |
1474 | struct tid_info *t = dev->rdev.lldi.tids; | 1485 | struct tid_info *t = dev->rdev.lldi.tids; |
1475 | unsigned int tid = GET_TID(hdr); | 1486 | unsigned int tid = GET_TID(hdr); |
1476 | int start_timer = 0; | ||
1477 | int stop_timer = 0; | ||
1478 | 1487 | ||
1479 | ep = lookup_tid(t, tid); | 1488 | ep = lookup_tid(t, tid); |
1480 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1489 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
@@ -1511,7 +1520,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1511 | wake_up(&ep->com.waitq); | 1520 | wake_up(&ep->com.waitq); |
1512 | break; | 1521 | break; |
1513 | case FPDU_MODE: | 1522 | case FPDU_MODE: |
1514 | start_timer = 1; | 1523 | start_ep_timer(ep); |
1515 | __state_set(&ep->com, CLOSING); | 1524 | __state_set(&ep->com, CLOSING); |
1516 | closing = 1; | 1525 | closing = 1; |
1517 | peer_close_upcall(ep); | 1526 | peer_close_upcall(ep); |
@@ -1524,7 +1533,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1524 | disconnect = 0; | 1533 | disconnect = 0; |
1525 | break; | 1534 | break; |
1526 | case MORIBUND: | 1535 | case MORIBUND: |
1527 | stop_timer = 1; | 1536 | stop_ep_timer(ep); |
1528 | if (ep->com.cm_id && ep->com.qp) { | 1537 | if (ep->com.cm_id && ep->com.qp) { |
1529 | attrs.next_state = C4IW_QP_STATE_IDLE; | 1538 | attrs.next_state = C4IW_QP_STATE_IDLE; |
1530 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1539 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
@@ -1547,10 +1556,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1547 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1556 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
1548 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | 1557 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
1549 | } | 1558 | } |
1550 | if (start_timer) | ||
1551 | start_ep_timer(ep); | ||
1552 | if (stop_timer) | ||
1553 | stop_ep_timer(ep); | ||
1554 | if (disconnect) | 1559 | if (disconnect) |
1555 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | 1560 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); |
1556 | if (release) | 1561 | if (release) |
@@ -1579,7 +1584,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1579 | unsigned long flags; | 1584 | unsigned long flags; |
1580 | struct tid_info *t = dev->rdev.lldi.tids; | 1585 | struct tid_info *t = dev->rdev.lldi.tids; |
1581 | unsigned int tid = GET_TID(req); | 1586 | unsigned int tid = GET_TID(req); |
1582 | int stop_timer = 0; | ||
1583 | 1587 | ||
1584 | ep = lookup_tid(t, tid); | 1588 | ep = lookup_tid(t, tid); |
1585 | if (is_neg_adv_abort(req->status)) { | 1589 | if (is_neg_adv_abort(req->status)) { |
@@ -1594,10 +1598,10 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1594 | case CONNECTING: | 1598 | case CONNECTING: |
1595 | break; | 1599 | break; |
1596 | case MPA_REQ_WAIT: | 1600 | case MPA_REQ_WAIT: |
1597 | stop_timer = 1; | 1601 | stop_ep_timer(ep); |
1598 | break; | 1602 | break; |
1599 | case MPA_REQ_SENT: | 1603 | case MPA_REQ_SENT: |
1600 | stop_timer = 1; | 1604 | stop_ep_timer(ep); |
1601 | connect_reply_upcall(ep, -ECONNRESET); | 1605 | connect_reply_upcall(ep, -ECONNRESET); |
1602 | break; | 1606 | break; |
1603 | case MPA_REP_SENT: | 1607 | case MPA_REP_SENT: |
@@ -1621,7 +1625,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1621 | break; | 1625 | break; |
1622 | case MORIBUND: | 1626 | case MORIBUND: |
1623 | case CLOSING: | 1627 | case CLOSING: |
1624 | stop_timer = 1; | 1628 | stop_ep_timer(ep); |
1625 | /*FALLTHROUGH*/ | 1629 | /*FALLTHROUGH*/ |
1626 | case FPDU_MODE: | 1630 | case FPDU_MODE: |
1627 | if (ep->com.cm_id && ep->com.qp) { | 1631 | if (ep->com.cm_id && ep->com.qp) { |
@@ -1667,8 +1671,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1667 | rpl->cmd = CPL_ABORT_NO_RST; | 1671 | rpl->cmd = CPL_ABORT_NO_RST; |
1668 | c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); | 1672 | c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); |
1669 | out: | 1673 | out: |
1670 | if (stop_timer) | ||
1671 | stop_ep_timer(ep); | ||
1672 | if (release) | 1674 | if (release) |
1673 | release_ep_resources(ep); | 1675 | release_ep_resources(ep); |
1674 | return 0; | 1676 | return 0; |
@@ -1683,7 +1685,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1683 | int release = 0; | 1685 | int release = 0; |
1684 | struct tid_info *t = dev->rdev.lldi.tids; | 1686 | struct tid_info *t = dev->rdev.lldi.tids; |
1685 | unsigned int tid = GET_TID(rpl); | 1687 | unsigned int tid = GET_TID(rpl); |
1686 | int stop_timer = 0; | ||
1687 | 1688 | ||
1688 | ep = lookup_tid(t, tid); | 1689 | ep = lookup_tid(t, tid); |
1689 | 1690 | ||
@@ -1697,7 +1698,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1697 | __state_set(&ep->com, MORIBUND); | 1698 | __state_set(&ep->com, MORIBUND); |
1698 | break; | 1699 | break; |
1699 | case MORIBUND: | 1700 | case MORIBUND: |
1700 | stop_timer = 1; | 1701 | stop_ep_timer(ep); |
1701 | if ((ep->com.cm_id) && (ep->com.qp)) { | 1702 | if ((ep->com.cm_id) && (ep->com.qp)) { |
1702 | attrs.next_state = C4IW_QP_STATE_IDLE; | 1703 | attrs.next_state = C4IW_QP_STATE_IDLE; |
1703 | c4iw_modify_qp(ep->com.qp->rhp, | 1704 | c4iw_modify_qp(ep->com.qp->rhp, |
@@ -1717,8 +1718,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1717 | break; | 1718 | break; |
1718 | } | 1719 | } |
1719 | spin_unlock_irqrestore(&ep->com.lock, flags); | 1720 | spin_unlock_irqrestore(&ep->com.lock, flags); |
1720 | if (stop_timer) | ||
1721 | stop_ep_timer(ep); | ||
1722 | if (release) | 1721 | if (release) |
1723 | release_ep_resources(ep); | 1722 | release_ep_resources(ep); |
1724 | return 0; | 1723 | return 0; |
@@ -1957,6 +1956,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1957 | ep->txq_idx = cxgb4_port_idx(pdev) * step; | 1956 | ep->txq_idx = cxgb4_port_idx(pdev) * step; |
1958 | step = ep->com.dev->rdev.lldi.nrxq / | 1957 | step = ep->com.dev->rdev.lldi.nrxq / |
1959 | ep->com.dev->rdev.lldi.nchan; | 1958 | ep->com.dev->rdev.lldi.nchan; |
1959 | ep->ctrlq_idx = cxgb4_port_idx(pdev); | ||
1960 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | 1960 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ |
1961 | cxgb4_port_idx(pdev) * step]; | 1961 | cxgb4_port_idx(pdev) * step]; |
1962 | dev_put(pdev); | 1962 | dev_put(pdev); |
@@ -1971,6 +1971,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1971 | step = ep->com.dev->rdev.lldi.ntxq / | 1971 | step = ep->com.dev->rdev.lldi.ntxq / |
1972 | ep->com.dev->rdev.lldi.nchan; | 1972 | ep->com.dev->rdev.lldi.nchan; |
1973 | ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; | 1973 | ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step; |
1974 | ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev); | ||
1974 | step = ep->com.dev->rdev.lldi.nrxq / | 1975 | step = ep->com.dev->rdev.lldi.nrxq / |
1975 | ep->com.dev->rdev.lldi.nchan; | 1976 | ep->com.dev->rdev.lldi.nchan; |
1976 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ | 1977 | ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[ |
@@ -2049,8 +2050,15 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
2049 | goto fail3; | 2050 | goto fail3; |
2050 | 2051 | ||
2051 | /* wait for pass_open_rpl */ | 2052 | /* wait for pass_open_rpl */ |
2052 | wait_event(ep->com.waitq, ep->com.rpl_done); | 2053 | wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO); |
2053 | err = ep->com.rpl_err; | 2054 | if (ep->com.rpl_done) |
2055 | err = ep->com.rpl_err; | ||
2056 | else { | ||
2057 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
2058 | pci_name(ep->com.dev->rdev.lldi.pdev)); | ||
2059 | ep->com.dev->rdev.flags = T4_FATAL_ERROR; | ||
2060 | err = -EIO; | ||
2061 | } | ||
2054 | if (!err) { | 2062 | if (!err) { |
2055 | cm_id->provider_data = ep; | 2063 | cm_id->provider_data = ep; |
2056 | goto out; | 2064 | goto out; |
@@ -2079,10 +2087,17 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) | |||
2079 | err = listen_stop(ep); | 2087 | err = listen_stop(ep); |
2080 | if (err) | 2088 | if (err) |
2081 | goto done; | 2089 | goto done; |
2082 | wait_event(ep->com.waitq, ep->com.rpl_done); | 2090 | wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO); |
2091 | if (ep->com.rpl_done) | ||
2092 | err = ep->com.rpl_err; | ||
2093 | else { | ||
2094 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
2095 | pci_name(ep->com.dev->rdev.lldi.pdev)); | ||
2096 | ep->com.dev->rdev.flags = T4_FATAL_ERROR; | ||
2097 | err = -EIO; | ||
2098 | } | ||
2083 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); | 2099 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); |
2084 | done: | 2100 | done: |
2085 | err = ep->com.rpl_err; | ||
2086 | cm_id->rem_ref(cm_id); | 2101 | cm_id->rem_ref(cm_id); |
2087 | c4iw_put_ep(&ep->com); | 2102 | c4iw_put_ep(&ep->com); |
2088 | return err; | 2103 | return err; |
@@ -2095,8 +2110,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
2095 | int close = 0; | 2110 | int close = 0; |
2096 | int fatal = 0; | 2111 | int fatal = 0; |
2097 | struct c4iw_rdev *rdev; | 2112 | struct c4iw_rdev *rdev; |
2098 | int start_timer = 0; | ||
2099 | int stop_timer = 0; | ||
2100 | 2113 | ||
2101 | spin_lock_irqsave(&ep->com.lock, flags); | 2114 | spin_lock_irqsave(&ep->com.lock, flags); |
2102 | 2115 | ||
@@ -2120,7 +2133,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
2120 | ep->com.state = ABORTING; | 2133 | ep->com.state = ABORTING; |
2121 | else { | 2134 | else { |
2122 | ep->com.state = CLOSING; | 2135 | ep->com.state = CLOSING; |
2123 | start_timer = 1; | 2136 | start_ep_timer(ep); |
2124 | } | 2137 | } |
2125 | set_bit(CLOSE_SENT, &ep->com.flags); | 2138 | set_bit(CLOSE_SENT, &ep->com.flags); |
2126 | break; | 2139 | break; |
@@ -2128,7 +2141,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
2128 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { | 2141 | if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { |
2129 | close = 1; | 2142 | close = 1; |
2130 | if (abrupt) { | 2143 | if (abrupt) { |
2131 | stop_timer = 1; | 2144 | stop_ep_timer(ep); |
2132 | ep->com.state = ABORTING; | 2145 | ep->com.state = ABORTING; |
2133 | } else | 2146 | } else |
2134 | ep->com.state = MORIBUND; | 2147 | ep->com.state = MORIBUND; |
@@ -2146,10 +2159,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
2146 | } | 2159 | } |
2147 | 2160 | ||
2148 | spin_unlock_irqrestore(&ep->com.lock, flags); | 2161 | spin_unlock_irqrestore(&ep->com.lock, flags); |
2149 | if (start_timer) | ||
2150 | start_ep_timer(ep); | ||
2151 | if (stop_timer) | ||
2152 | stop_ep_timer(ep); | ||
2153 | if (close) { | 2162 | if (close) { |
2154 | if (abrupt) | 2163 | if (abrupt) |
2155 | ret = abort_connection(ep, NULL, gfp); | 2164 | ret = abort_connection(ep, NULL, gfp); |
@@ -2244,7 +2253,7 @@ static void process_work(struct work_struct *work) | |||
2244 | { | 2253 | { |
2245 | struct sk_buff *skb = NULL; | 2254 | struct sk_buff *skb = NULL; |
2246 | struct c4iw_dev *dev; | 2255 | struct c4iw_dev *dev; |
2247 | struct cpl_act_establish *rpl = cplhdr(skb); | 2256 | struct cpl_act_establish *rpl; |
2248 | unsigned int opcode; | 2257 | unsigned int opcode; |
2249 | int ret; | 2258 | int ret; |
2250 | 2259 | ||
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index fac5c6e68011..b3daf39eed4a 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -43,7 +43,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
43 | int ret; | 43 | int ret; |
44 | 44 | ||
45 | wr_len = sizeof *res_wr + sizeof *res; | 45 | wr_len = sizeof *res_wr + sizeof *res; |
46 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | 46 | skb = alloc_skb(wr_len, GFP_KERNEL); |
47 | if (!skb) | 47 | if (!skb) |
48 | return -ENOMEM; | 48 | return -ENOMEM; |
49 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | 49 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); |
@@ -118,7 +118,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
118 | /* build fw_ri_res_wr */ | 118 | /* build fw_ri_res_wr */ |
119 | wr_len = sizeof *res_wr + sizeof *res; | 119 | wr_len = sizeof *res_wr + sizeof *res; |
120 | 120 | ||
121 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | 121 | skb = alloc_skb(wr_len, GFP_KERNEL); |
122 | if (!skb) { | 122 | if (!skb) { |
123 | ret = -ENOMEM; | 123 | ret = -ENOMEM; |
124 | goto err4; | 124 | goto err4; |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index d33e1a668811..ed459b8f800f 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -619,6 +619,7 @@ struct c4iw_ep { | |||
619 | u16 plen; | 619 | u16 plen; |
620 | u16 rss_qid; | 620 | u16 rss_qid; |
621 | u16 txq_idx; | 621 | u16 txq_idx; |
622 | u16 ctrlq_idx; | ||
622 | u8 tos; | 623 | u8 tos; |
623 | }; | 624 | }; |
624 | 625 | ||
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 82b5703b8947..269373a62f22 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -59,7 +59,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, | |||
59 | wr_len = roundup(sizeof *req + sizeof *sc + | 59 | wr_len = roundup(sizeof *req + sizeof *sc + |
60 | roundup(copy_len, T4_ULPTX_MIN_IO), 16); | 60 | roundup(copy_len, T4_ULPTX_MIN_IO), 16); |
61 | 61 | ||
62 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | 62 | skb = alloc_skb(wr_len, GFP_KERNEL); |
63 | if (!skb) | 63 | if (!skb) |
64 | return -ENOMEM; | 64 | return -ENOMEM; |
65 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | 65 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 86b93f2ecca3..93f6e5bf0ec5 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -130,7 +130,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
130 | /* build fw_ri_res_wr */ | 130 | /* build fw_ri_res_wr */ |
131 | wr_len = sizeof *res_wr + 2 * sizeof *res; | 131 | wr_len = sizeof *res_wr + 2 * sizeof *res; |
132 | 132 | ||
133 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | 133 | skb = alloc_skb(wr_len, GFP_KERNEL); |
134 | if (!skb) { | 134 | if (!skb) { |
135 | ret = -ENOMEM; | 135 | ret = -ENOMEM; |
136 | goto err7; | 136 | goto err7; |
@@ -162,7 +162,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
162 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( | 162 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( |
163 | V_FW_RI_RES_WR_DCAEN(0) | | 163 | V_FW_RI_RES_WR_DCAEN(0) | |
164 | V_FW_RI_RES_WR_DCACPU(0) | | 164 | V_FW_RI_RES_WR_DCACPU(0) | |
165 | V_FW_RI_RES_WR_FBMIN(3) | | 165 | V_FW_RI_RES_WR_FBMIN(2) | |
166 | V_FW_RI_RES_WR_FBMAX(3) | | 166 | V_FW_RI_RES_WR_FBMAX(3) | |
167 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | | 167 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | |
168 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | | 168 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | |
@@ -185,7 +185,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
185 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( | 185 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( |
186 | V_FW_RI_RES_WR_DCAEN(0) | | 186 | V_FW_RI_RES_WR_DCAEN(0) | |
187 | V_FW_RI_RES_WR_DCACPU(0) | | 187 | V_FW_RI_RES_WR_DCACPU(0) | |
188 | V_FW_RI_RES_WR_FBMIN(3) | | 188 | V_FW_RI_RES_WR_FBMIN(2) | |
189 | V_FW_RI_RES_WR_FBMAX(3) | | 189 | V_FW_RI_RES_WR_FBMAX(3) | |
190 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | | 190 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | |
191 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | | 191 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | |
@@ -235,12 +235,78 @@ err1: | |||
235 | return -ENOMEM; | 235 | return -ENOMEM; |
236 | } | 236 | } |
237 | 237 | ||
238 | static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | 238 | static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, |
239 | struct ib_send_wr *wr, int max, u32 *plenp) | ||
239 | { | 240 | { |
241 | u8 *dstp, *srcp; | ||
242 | u32 plen = 0; | ||
240 | int i; | 243 | int i; |
244 | int rem, len; | ||
245 | |||
246 | dstp = (u8 *)immdp->data; | ||
247 | for (i = 0; i < wr->num_sge; i++) { | ||
248 | if ((plen + wr->sg_list[i].length) > max) | ||
249 | return -EMSGSIZE; | ||
250 | srcp = (u8 *)(unsigned long)wr->sg_list[i].addr; | ||
251 | plen += wr->sg_list[i].length; | ||
252 | rem = wr->sg_list[i].length; | ||
253 | while (rem) { | ||
254 | if (dstp == (u8 *)&sq->queue[sq->size]) | ||
255 | dstp = (u8 *)sq->queue; | ||
256 | if (rem <= (u8 *)&sq->queue[sq->size] - dstp) | ||
257 | len = rem; | ||
258 | else | ||
259 | len = (u8 *)&sq->queue[sq->size] - dstp; | ||
260 | memcpy(dstp, srcp, len); | ||
261 | dstp += len; | ||
262 | srcp += len; | ||
263 | rem -= len; | ||
264 | } | ||
265 | } | ||
266 | immdp->op = FW_RI_DATA_IMMD; | ||
267 | immdp->r1 = 0; | ||
268 | immdp->r2 = 0; | ||
269 | immdp->immdlen = cpu_to_be32(plen); | ||
270 | *plenp = plen; | ||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | static int build_isgl(__be64 *queue_start, __be64 *queue_end, | ||
275 | struct fw_ri_isgl *isglp, struct ib_sge *sg_list, | ||
276 | int num_sge, u32 *plenp) | ||
277 | |||
278 | { | ||
279 | int i; | ||
280 | u32 plen = 0; | ||
281 | __be64 *flitp = (__be64 *)isglp->sge; | ||
282 | |||
283 | for (i = 0; i < num_sge; i++) { | ||
284 | if ((plen + sg_list[i].length) < plen) | ||
285 | return -EMSGSIZE; | ||
286 | plen += sg_list[i].length; | ||
287 | *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) | | ||
288 | sg_list[i].length); | ||
289 | if (++flitp == queue_end) | ||
290 | flitp = queue_start; | ||
291 | *flitp = cpu_to_be64(sg_list[i].addr); | ||
292 | if (++flitp == queue_end) | ||
293 | flitp = queue_start; | ||
294 | } | ||
295 | isglp->op = FW_RI_DATA_ISGL; | ||
296 | isglp->r1 = 0; | ||
297 | isglp->nsge = cpu_to_be16(num_sge); | ||
298 | isglp->r2 = 0; | ||
299 | if (plenp) | ||
300 | *plenp = plen; | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, | ||
305 | struct ib_send_wr *wr, u8 *len16) | ||
306 | { | ||
241 | u32 plen; | 307 | u32 plen; |
242 | int size; | 308 | int size; |
243 | u8 *datap; | 309 | int ret; |
244 | 310 | ||
245 | if (wr->num_sge > T4_MAX_SEND_SGE) | 311 | if (wr->num_sge > T4_MAX_SEND_SGE) |
246 | return -EINVAL; | 312 | return -EINVAL; |
@@ -267,43 +333,23 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |||
267 | default: | 333 | default: |
268 | return -EINVAL; | 334 | return -EINVAL; |
269 | } | 335 | } |
336 | |||
270 | plen = 0; | 337 | plen = 0; |
271 | if (wr->num_sge) { | 338 | if (wr->num_sge) { |
272 | if (wr->send_flags & IB_SEND_INLINE) { | 339 | if (wr->send_flags & IB_SEND_INLINE) { |
273 | datap = (u8 *)wqe->send.u.immd_src[0].data; | 340 | ret = build_immd(sq, wqe->send.u.immd_src, wr, |
274 | for (i = 0; i < wr->num_sge; i++) { | 341 | T4_MAX_SEND_INLINE, &plen); |
275 | if ((plen + wr->sg_list[i].length) > | 342 | if (ret) |
276 | T4_MAX_SEND_INLINE) { | 343 | return ret; |
277 | return -EMSGSIZE; | ||
278 | } | ||
279 | plen += wr->sg_list[i].length; | ||
280 | memcpy(datap, | ||
281 | (void *)(unsigned long)wr->sg_list[i].addr, | ||
282 | wr->sg_list[i].length); | ||
283 | datap += wr->sg_list[i].length; | ||
284 | } | ||
285 | wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; | ||
286 | wqe->send.u.immd_src[0].r1 = 0; | ||
287 | wqe->send.u.immd_src[0].r2 = 0; | ||
288 | wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen); | ||
289 | size = sizeof wqe->send + sizeof(struct fw_ri_immd) + | 344 | size = sizeof wqe->send + sizeof(struct fw_ri_immd) + |
290 | plen; | 345 | plen; |
291 | } else { | 346 | } else { |
292 | for (i = 0; i < wr->num_sge; i++) { | 347 | ret = build_isgl((__be64 *)sq->queue, |
293 | if ((plen + wr->sg_list[i].length) < plen) | 348 | (__be64 *)&sq->queue[sq->size], |
294 | return -EMSGSIZE; | 349 | wqe->send.u.isgl_src, |
295 | plen += wr->sg_list[i].length; | 350 | wr->sg_list, wr->num_sge, &plen); |
296 | wqe->send.u.isgl_src[0].sge[i].stag = | 351 | if (ret) |
297 | cpu_to_be32(wr->sg_list[i].lkey); | 352 | return ret; |
298 | wqe->send.u.isgl_src[0].sge[i].len = | ||
299 | cpu_to_be32(wr->sg_list[i].length); | ||
300 | wqe->send.u.isgl_src[0].sge[i].to = | ||
301 | cpu_to_be64(wr->sg_list[i].addr); | ||
302 | } | ||
303 | wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL; | ||
304 | wqe->send.u.isgl_src[0].r1 = 0; | ||
305 | wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge); | ||
306 | wqe->send.u.isgl_src[0].r2 = 0; | ||
307 | size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + | 353 | size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + |
308 | wr->num_sge * sizeof(struct fw_ri_sge); | 354 | wr->num_sge * sizeof(struct fw_ri_sge); |
309 | } | 355 | } |
@@ -313,62 +359,40 @@ static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |||
313 | wqe->send.u.immd_src[0].r2 = 0; | 359 | wqe->send.u.immd_src[0].r2 = 0; |
314 | wqe->send.u.immd_src[0].immdlen = 0; | 360 | wqe->send.u.immd_src[0].immdlen = 0; |
315 | size = sizeof wqe->send + sizeof(struct fw_ri_immd); | 361 | size = sizeof wqe->send + sizeof(struct fw_ri_immd); |
362 | plen = 0; | ||
316 | } | 363 | } |
317 | *len16 = DIV_ROUND_UP(size, 16); | 364 | *len16 = DIV_ROUND_UP(size, 16); |
318 | wqe->send.plen = cpu_to_be32(plen); | 365 | wqe->send.plen = cpu_to_be32(plen); |
319 | return 0; | 366 | return 0; |
320 | } | 367 | } |
321 | 368 | ||
322 | static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | 369 | static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, |
370 | struct ib_send_wr *wr, u8 *len16) | ||
323 | { | 371 | { |
324 | int i; | ||
325 | u32 plen; | 372 | u32 plen; |
326 | int size; | 373 | int size; |
327 | u8 *datap; | 374 | int ret; |
328 | 375 | ||
329 | if (wr->num_sge > T4_MAX_WRITE_SGE) | 376 | if (wr->num_sge > T4_MAX_SEND_SGE) |
330 | return -EINVAL; | 377 | return -EINVAL; |
331 | wqe->write.r2 = 0; | 378 | wqe->write.r2 = 0; |
332 | wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); | 379 | wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); |
333 | wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); | 380 | wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); |
334 | plen = 0; | ||
335 | if (wr->num_sge) { | 381 | if (wr->num_sge) { |
336 | if (wr->send_flags & IB_SEND_INLINE) { | 382 | if (wr->send_flags & IB_SEND_INLINE) { |
337 | datap = (u8 *)wqe->write.u.immd_src[0].data; | 383 | ret = build_immd(sq, wqe->write.u.immd_src, wr, |
338 | for (i = 0; i < wr->num_sge; i++) { | 384 | T4_MAX_WRITE_INLINE, &plen); |
339 | if ((plen + wr->sg_list[i].length) > | 385 | if (ret) |
340 | T4_MAX_WRITE_INLINE) { | 386 | return ret; |
341 | return -EMSGSIZE; | ||
342 | } | ||
343 | plen += wr->sg_list[i].length; | ||
344 | memcpy(datap, | ||
345 | (void *)(unsigned long)wr->sg_list[i].addr, | ||
346 | wr->sg_list[i].length); | ||
347 | datap += wr->sg_list[i].length; | ||
348 | } | ||
349 | wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; | ||
350 | wqe->write.u.immd_src[0].r1 = 0; | ||
351 | wqe->write.u.immd_src[0].r2 = 0; | ||
352 | wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen); | ||
353 | size = sizeof wqe->write + sizeof(struct fw_ri_immd) + | 387 | size = sizeof wqe->write + sizeof(struct fw_ri_immd) + |
354 | plen; | 388 | plen; |
355 | } else { | 389 | } else { |
356 | for (i = 0; i < wr->num_sge; i++) { | 390 | ret = build_isgl((__be64 *)sq->queue, |
357 | if ((plen + wr->sg_list[i].length) < plen) | 391 | (__be64 *)&sq->queue[sq->size], |
358 | return -EMSGSIZE; | 392 | wqe->write.u.isgl_src, |
359 | plen += wr->sg_list[i].length; | 393 | wr->sg_list, wr->num_sge, &plen); |
360 | wqe->write.u.isgl_src[0].sge[i].stag = | 394 | if (ret) |
361 | cpu_to_be32(wr->sg_list[i].lkey); | 395 | return ret; |
362 | wqe->write.u.isgl_src[0].sge[i].len = | ||
363 | cpu_to_be32(wr->sg_list[i].length); | ||
364 | wqe->write.u.isgl_src[0].sge[i].to = | ||
365 | cpu_to_be64(wr->sg_list[i].addr); | ||
366 | } | ||
367 | wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL; | ||
368 | wqe->write.u.isgl_src[0].r1 = 0; | ||
369 | wqe->write.u.isgl_src[0].nsge = | ||
370 | cpu_to_be16(wr->num_sge); | ||
371 | wqe->write.u.isgl_src[0].r2 = 0; | ||
372 | size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + | 396 | size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + |
373 | wr->num_sge * sizeof(struct fw_ri_sge); | 397 | wr->num_sge * sizeof(struct fw_ri_sge); |
374 | } | 398 | } |
@@ -378,6 +402,7 @@ static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |||
378 | wqe->write.u.immd_src[0].r2 = 0; | 402 | wqe->write.u.immd_src[0].r2 = 0; |
379 | wqe->write.u.immd_src[0].immdlen = 0; | 403 | wqe->write.u.immd_src[0].immdlen = 0; |
380 | size = sizeof wqe->write + sizeof(struct fw_ri_immd); | 404 | size = sizeof wqe->write + sizeof(struct fw_ri_immd); |
405 | plen = 0; | ||
381 | } | 406 | } |
382 | *len16 = DIV_ROUND_UP(size, 16); | 407 | *len16 = DIV_ROUND_UP(size, 16); |
383 | wqe->write.plen = cpu_to_be32(plen); | 408 | wqe->write.plen = cpu_to_be32(plen); |
@@ -416,29 +441,13 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |||
416 | static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, | 441 | static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, |
417 | struct ib_recv_wr *wr, u8 *len16) | 442 | struct ib_recv_wr *wr, u8 *len16) |
418 | { | 443 | { |
419 | int i; | 444 | int ret; |
420 | int plen = 0; | ||
421 | 445 | ||
422 | for (i = 0; i < wr->num_sge; i++) { | 446 | ret = build_isgl((__be64 *)qhp->wq.rq.queue, |
423 | if ((plen + wr->sg_list[i].length) < plen) | 447 | (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], |
424 | return -EMSGSIZE; | 448 | &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL); |
425 | plen += wr->sg_list[i].length; | 449 | if (ret) |
426 | wqe->recv.isgl.sge[i].stag = | 450 | return ret; |
427 | cpu_to_be32(wr->sg_list[i].lkey); | ||
428 | wqe->recv.isgl.sge[i].len = | ||
429 | cpu_to_be32(wr->sg_list[i].length); | ||
430 | wqe->recv.isgl.sge[i].to = | ||
431 | cpu_to_be64(wr->sg_list[i].addr); | ||
432 | } | ||
433 | for (; i < T4_MAX_RECV_SGE; i++) { | ||
434 | wqe->recv.isgl.sge[i].stag = 0; | ||
435 | wqe->recv.isgl.sge[i].len = 0; | ||
436 | wqe->recv.isgl.sge[i].to = 0; | ||
437 | } | ||
438 | wqe->recv.isgl.op = FW_RI_DATA_ISGL; | ||
439 | wqe->recv.isgl.r1 = 0; | ||
440 | wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge); | ||
441 | wqe->recv.isgl.r2 = 0; | ||
442 | *len16 = DIV_ROUND_UP(sizeof wqe->recv + | 451 | *len16 = DIV_ROUND_UP(sizeof wqe->recv + |
443 | wr->num_sge * sizeof(struct fw_ri_sge), 16); | 452 | wr->num_sge * sizeof(struct fw_ri_sge), 16); |
444 | return 0; | 453 | return 0; |
@@ -547,7 +556,9 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
547 | *bad_wr = wr; | 556 | *bad_wr = wr; |
548 | break; | 557 | break; |
549 | } | 558 | } |
550 | wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx]; | 559 | wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + |
560 | qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); | ||
561 | |||
551 | fw_flags = 0; | 562 | fw_flags = 0; |
552 | if (wr->send_flags & IB_SEND_SOLICITED) | 563 | if (wr->send_flags & IB_SEND_SOLICITED) |
553 | fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; | 564 | fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; |
@@ -564,12 +575,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
564 | swsqe->opcode = FW_RI_SEND; | 575 | swsqe->opcode = FW_RI_SEND; |
565 | else | 576 | else |
566 | swsqe->opcode = FW_RI_SEND_WITH_INV; | 577 | swsqe->opcode = FW_RI_SEND_WITH_INV; |
567 | err = build_rdma_send(wqe, wr, &len16); | 578 | err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); |
568 | break; | 579 | break; |
569 | case IB_WR_RDMA_WRITE: | 580 | case IB_WR_RDMA_WRITE: |
570 | fw_opcode = FW_RI_RDMA_WRITE_WR; | 581 | fw_opcode = FW_RI_RDMA_WRITE_WR; |
571 | swsqe->opcode = FW_RI_RDMA_WRITE; | 582 | swsqe->opcode = FW_RI_RDMA_WRITE; |
572 | err = build_rdma_write(wqe, wr, &len16); | 583 | err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); |
573 | break; | 584 | break; |
574 | case IB_WR_RDMA_READ: | 585 | case IB_WR_RDMA_READ: |
575 | case IB_WR_RDMA_READ_WITH_INV: | 586 | case IB_WR_RDMA_READ_WITH_INV: |
@@ -619,8 +630,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
619 | swsqe->opcode, swsqe->read_len); | 630 | swsqe->opcode, swsqe->read_len); |
620 | wr = wr->next; | 631 | wr = wr->next; |
621 | num_wrs--; | 632 | num_wrs--; |
622 | t4_sq_produce(&qhp->wq); | 633 | t4_sq_produce(&qhp->wq, len16); |
623 | idx++; | 634 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); |
624 | } | 635 | } |
625 | if (t4_wq_db_enabled(&qhp->wq)) | 636 | if (t4_wq_db_enabled(&qhp->wq)) |
626 | t4_ring_sq_db(&qhp->wq, idx); | 637 | t4_ring_sq_db(&qhp->wq, idx); |
@@ -656,7 +667,9 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
656 | *bad_wr = wr; | 667 | *bad_wr = wr; |
657 | break; | 668 | break; |
658 | } | 669 | } |
659 | wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx]; | 670 | wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + |
671 | qhp->wq.rq.wq_pidx * | ||
672 | T4_EQ_ENTRY_SIZE); | ||
660 | if (num_wrs) | 673 | if (num_wrs) |
661 | err = build_rdma_recv(qhp, wqe, wr, &len16); | 674 | err = build_rdma_recv(qhp, wqe, wr, &len16); |
662 | else | 675 | else |
@@ -675,15 +688,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
675 | wqe->recv.r2[1] = 0; | 688 | wqe->recv.r2[1] = 0; |
676 | wqe->recv.r2[2] = 0; | 689 | wqe->recv.r2[2] = 0; |
677 | wqe->recv.len16 = len16; | 690 | wqe->recv.len16 = len16; |
678 | if (len16 < 5) | ||
679 | wqe->flits[8] = 0; | ||
680 | |||
681 | PDBG("%s cookie 0x%llx pidx %u\n", __func__, | 691 | PDBG("%s cookie 0x%llx pidx %u\n", __func__, |
682 | (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); | 692 | (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); |
683 | t4_rq_produce(&qhp->wq); | 693 | t4_rq_produce(&qhp->wq, len16); |
694 | idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); | ||
684 | wr = wr->next; | 695 | wr = wr->next; |
685 | num_wrs--; | 696 | num_wrs--; |
686 | idx++; | ||
687 | } | 697 | } |
688 | if (t4_wq_db_enabled(&qhp->wq)) | 698 | if (t4_wq_db_enabled(&qhp->wq)) |
689 | t4_ring_rq_db(&qhp->wq, idx); | 699 | t4_ring_rq_db(&qhp->wq, idx); |
@@ -951,7 +961,8 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag) | |||
951 | __flush_qp(qhp, rchp, schp, flag); | 961 | __flush_qp(qhp, rchp, schp, flag); |
952 | } | 962 | } |
953 | 963 | ||
954 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | 964 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, |
965 | struct c4iw_ep *ep) | ||
955 | { | 966 | { |
956 | struct fw_ri_wr *wqe; | 967 | struct fw_ri_wr *wqe; |
957 | int ret; | 968 | int ret; |
@@ -959,12 +970,12 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
959 | struct sk_buff *skb; | 970 | struct sk_buff *skb; |
960 | 971 | ||
961 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | 972 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, |
962 | qhp->ep->hwtid); | 973 | ep->hwtid); |
963 | 974 | ||
964 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); | 975 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL); |
965 | if (!skb) | 976 | if (!skb) |
966 | return -ENOMEM; | 977 | return -ENOMEM; |
967 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | 978 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); |
968 | 979 | ||
969 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | 980 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); |
970 | memset(wqe, 0, sizeof *wqe); | 981 | memset(wqe, 0, sizeof *wqe); |
@@ -972,7 +983,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
972 | FW_WR_OP(FW_RI_INIT_WR) | | 983 | FW_WR_OP(FW_RI_INIT_WR) | |
973 | FW_WR_COMPL(1)); | 984 | FW_WR_COMPL(1)); |
974 | wqe->flowid_len16 = cpu_to_be32( | 985 | wqe->flowid_len16 = cpu_to_be32( |
975 | FW_WR_FLOWID(qhp->ep->hwtid) | | 986 | FW_WR_FLOWID(ep->hwtid) | |
976 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | 987 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); |
977 | wqe->cookie = (u64)&wr_wait; | 988 | wqe->cookie = (u64)&wr_wait; |
978 | 989 | ||
@@ -1035,7 +1046,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
1035 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | 1046 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, |
1036 | qhp->ep->hwtid); | 1047 | qhp->ep->hwtid); |
1037 | 1048 | ||
1038 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); | 1049 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL); |
1039 | if (!skb) | 1050 | if (!skb) |
1040 | return -ENOMEM; | 1051 | return -ENOMEM; |
1041 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | 1052 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); |
@@ -1202,17 +1213,16 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1202 | case C4IW_QP_STATE_CLOSING: | 1213 | case C4IW_QP_STATE_CLOSING: |
1203 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); | 1214 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); |
1204 | qhp->attr.state = C4IW_QP_STATE_CLOSING; | 1215 | qhp->attr.state = C4IW_QP_STATE_CLOSING; |
1216 | ep = qhp->ep; | ||
1205 | if (!internal) { | 1217 | if (!internal) { |
1206 | abort = 0; | 1218 | abort = 0; |
1207 | disconnect = 1; | 1219 | disconnect = 1; |
1208 | ep = qhp->ep; | ||
1209 | c4iw_get_ep(&ep->com); | 1220 | c4iw_get_ep(&ep->com); |
1210 | } | 1221 | } |
1211 | spin_unlock_irqrestore(&qhp->lock, flag); | 1222 | spin_unlock_irqrestore(&qhp->lock, flag); |
1212 | ret = rdma_fini(rhp, qhp); | 1223 | ret = rdma_fini(rhp, qhp, ep); |
1213 | spin_lock_irqsave(&qhp->lock, flag); | 1224 | spin_lock_irqsave(&qhp->lock, flag); |
1214 | if (ret) { | 1225 | if (ret) { |
1215 | ep = qhp->ep; | ||
1216 | c4iw_get_ep(&ep->com); | 1226 | c4iw_get_ep(&ep->com); |
1217 | disconnect = abort = 1; | 1227 | disconnect = abort = 1; |
1218 | goto err; | 1228 | goto err; |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 9cf8d85bfcff..aef55f42bea4 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -65,10 +65,10 @@ struct t4_status_page { | |||
65 | u8 db_off; | 65 | u8 db_off; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | #define T4_EQ_SIZE 64 | 68 | #define T4_EQ_ENTRY_SIZE 64 |
69 | 69 | ||
70 | #define T4_SQ_NUM_SLOTS 4 | 70 | #define T4_SQ_NUM_SLOTS 4 |
71 | #define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS) | 71 | #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS) |
72 | #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ | 72 | #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ |
73 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) | 73 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) |
74 | #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ | 74 | #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ |
@@ -84,7 +84,7 @@ struct t4_status_page { | |||
84 | #define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) | 84 | #define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) |
85 | 85 | ||
86 | #define T4_RQ_NUM_SLOTS 2 | 86 | #define T4_RQ_NUM_SLOTS 2 |
87 | #define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS) | 87 | #define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) |
88 | #define T4_MAX_RECV_SGE 4 | 88 | #define T4_MAX_RECV_SGE 4 |
89 | 89 | ||
90 | union t4_wr { | 90 | union t4_wr { |
@@ -97,20 +97,18 @@ union t4_wr { | |||
97 | struct fw_ri_fr_nsmr_wr fr; | 97 | struct fw_ri_fr_nsmr_wr fr; |
98 | struct fw_ri_inv_lstag_wr inv; | 98 | struct fw_ri_inv_lstag_wr inv; |
99 | struct t4_status_page status; | 99 | struct t4_status_page status; |
100 | __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; | 100 | __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS]; |
101 | }; | 101 | }; |
102 | 102 | ||
103 | union t4_recv_wr { | 103 | union t4_recv_wr { |
104 | struct fw_ri_recv_wr recv; | 104 | struct fw_ri_recv_wr recv; |
105 | struct t4_status_page status; | 105 | struct t4_status_page status; |
106 | __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; | 106 | __be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS]; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, | 109 | static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, |
110 | enum fw_wr_opcodes opcode, u8 flags, u8 len16) | 110 | enum fw_wr_opcodes opcode, u8 flags, u8 len16) |
111 | { | 111 | { |
112 | int slots_used; | ||
113 | |||
114 | wqe->send.opcode = (u8)opcode; | 112 | wqe->send.opcode = (u8)opcode; |
115 | wqe->send.flags = flags; | 113 | wqe->send.flags = flags; |
116 | wqe->send.wrid = wrid; | 114 | wqe->send.wrid = wrid; |
@@ -118,12 +116,6 @@ static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, | |||
118 | wqe->send.r1[1] = 0; | 116 | wqe->send.r1[1] = 0; |
119 | wqe->send.r1[2] = 0; | 117 | wqe->send.r1[2] = 0; |
120 | wqe->send.len16 = len16; | 118 | wqe->send.len16 = len16; |
121 | |||
122 | slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE); | ||
123 | while (slots_used < T4_SQ_NUM_SLOTS) { | ||
124 | wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0; | ||
125 | slots_used++; | ||
126 | } | ||
127 | } | 119 | } |
128 | 120 | ||
129 | /* CQE/AE status codes */ | 121 | /* CQE/AE status codes */ |
@@ -289,6 +281,7 @@ struct t4_sq { | |||
289 | u16 size; | 281 | u16 size; |
290 | u16 cidx; | 282 | u16 cidx; |
291 | u16 pidx; | 283 | u16 pidx; |
284 | u16 wq_pidx; | ||
292 | }; | 285 | }; |
293 | 286 | ||
294 | struct t4_swrqe { | 287 | struct t4_swrqe { |
@@ -310,6 +303,7 @@ struct t4_rq { | |||
310 | u16 size; | 303 | u16 size; |
311 | u16 cidx; | 304 | u16 cidx; |
312 | u16 pidx; | 305 | u16 pidx; |
306 | u16 wq_pidx; | ||
313 | }; | 307 | }; |
314 | 308 | ||
315 | struct t4_wq { | 309 | struct t4_wq { |
@@ -340,11 +334,14 @@ static inline u32 t4_rq_avail(struct t4_wq *wq) | |||
340 | return wq->rq.size - 1 - wq->rq.in_use; | 334 | return wq->rq.size - 1 - wq->rq.in_use; |
341 | } | 335 | } |
342 | 336 | ||
343 | static inline void t4_rq_produce(struct t4_wq *wq) | 337 | static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) |
344 | { | 338 | { |
345 | wq->rq.in_use++; | 339 | wq->rq.in_use++; |
346 | if (++wq->rq.pidx == wq->rq.size) | 340 | if (++wq->rq.pidx == wq->rq.size) |
347 | wq->rq.pidx = 0; | 341 | wq->rq.pidx = 0; |
342 | wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); | ||
343 | if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) | ||
344 | wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; | ||
348 | } | 345 | } |
349 | 346 | ||
350 | static inline void t4_rq_consume(struct t4_wq *wq) | 347 | static inline void t4_rq_consume(struct t4_wq *wq) |
@@ -370,11 +367,14 @@ static inline u32 t4_sq_avail(struct t4_wq *wq) | |||
370 | return wq->sq.size - 1 - wq->sq.in_use; | 367 | return wq->sq.size - 1 - wq->sq.in_use; |
371 | } | 368 | } |
372 | 369 | ||
373 | static inline void t4_sq_produce(struct t4_wq *wq) | 370 | static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) |
374 | { | 371 | { |
375 | wq->sq.in_use++; | 372 | wq->sq.in_use++; |
376 | if (++wq->sq.pidx == wq->sq.size) | 373 | if (++wq->sq.pidx == wq->sq.size) |
377 | wq->sq.pidx = 0; | 374 | wq->sq.pidx = 0; |
375 | wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); | ||
376 | if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) | ||
377 | wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; | ||
378 | } | 378 | } |
379 | 379 | ||
380 | static inline void t4_sq_consume(struct t4_wq *wq) | 380 | static inline void t4_sq_consume(struct t4_wq *wq) |
@@ -386,14 +386,12 @@ static inline void t4_sq_consume(struct t4_wq *wq) | |||
386 | 386 | ||
387 | static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) | 387 | static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc) |
388 | { | 388 | { |
389 | inc *= T4_SQ_NUM_SLOTS; | ||
390 | wmb(); | 389 | wmb(); |
391 | writel(QID(wq->sq.qid) | PIDX(inc), wq->db); | 390 | writel(QID(wq->sq.qid) | PIDX(inc), wq->db); |
392 | } | 391 | } |
393 | 392 | ||
394 | static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) | 393 | static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) |
395 | { | 394 | { |
396 | inc *= T4_RQ_NUM_SLOTS; | ||
397 | wmb(); | 395 | wmb(); |
398 | writel(QID(wq->rq.qid) | PIDX(inc), wq->db); | 396 | writel(QID(wq->rq.qid) | PIDX(inc), wq->db); |
399 | } | 397 | } |
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index fc706bd07fae..dc193c292671 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | |||
@@ -826,4 +826,14 @@ struct ulptx_idata { | |||
826 | #define S_ULPTX_NSGE 0 | 826 | #define S_ULPTX_NSGE 0 |
827 | #define M_ULPTX_NSGE 0xFFFF | 827 | #define M_ULPTX_NSGE 0xFFFF |
828 | #define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE) | 828 | #define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE) |
829 | |||
830 | #define S_RX_DACK_MODE 29 | ||
831 | #define M_RX_DACK_MODE 0x3 | ||
832 | #define V_RX_DACK_MODE(x) ((x) << S_RX_DACK_MODE) | ||
833 | #define G_RX_DACK_MODE(x) (((x) >> S_RX_DACK_MODE) & M_RX_DACK_MODE) | ||
834 | |||
835 | #define S_RX_DACK_CHANGE 31 | ||
836 | #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) | ||
837 | #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) | ||
838 | |||
829 | #endif /* _T4FW_RI_API_H_ */ | 839 | #endif /* _T4FW_RI_API_H_ */ |
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/infiniband/hw/ehca/ehca_eq.c index 3b87589b8ea0..d9b1bb40f480 100644 --- a/drivers/infiniband/hw/ehca/ehca_eq.c +++ b/drivers/infiniband/hw/ehca/ehca_eq.c | |||
@@ -122,21 +122,21 @@ int ehca_create_eq(struct ehca_shca *shca, | |||
122 | 122 | ||
123 | /* register interrupt handlers and initialize work queues */ | 123 | /* register interrupt handlers and initialize work queues */ |
124 | if (type == EHCA_EQ) { | 124 | if (type == EHCA_EQ) { |
125 | tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); | ||
126 | |||
125 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, | 127 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq, |
126 | IRQF_DISABLED, "ehca_eq", | 128 | IRQF_DISABLED, "ehca_eq", |
127 | (void *)shca); | 129 | (void *)shca); |
128 | if (ret < 0) | 130 | if (ret < 0) |
129 | ehca_err(ib_dev, "Can't map interrupt handler."); | 131 | ehca_err(ib_dev, "Can't map interrupt handler."); |
130 | |||
131 | tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca); | ||
132 | } else if (type == EHCA_NEQ) { | 132 | } else if (type == EHCA_NEQ) { |
133 | tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); | ||
134 | |||
133 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, | 135 | ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq, |
134 | IRQF_DISABLED, "ehca_neq", | 136 | IRQF_DISABLED, "ehca_neq", |
135 | (void *)shca); | 137 | (void *)shca); |
136 | if (ret < 0) | 138 | if (ret < 0) |
137 | ehca_err(ib_dev, "Can't map interrupt handler."); | 139 | ehca_err(ib_dev, "Can't map interrupt handler."); |
138 | |||
139 | tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca); | ||
140 | } | 140 | } |
141 | 141 | ||
142 | eq->is_initialized = 1; | 142 | eq->is_initialized = 1; |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index ecb51b396c42..cfc4de7a5da4 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -360,7 +360,8 @@ static int ehca_sense_attributes(struct ehca_shca *shca) | |||
360 | * a firmware property, so it's valid across all adapters | 360 | * a firmware property, so it's valid across all adapters |
361 | */ | 361 | */ |
362 | if (ehca_lock_hcalls == -1) | 362 | if (ehca_lock_hcalls == -1) |
363 | ehca_lock_hcalls = !(shca->hca_cap & HCA_CAP_H_ALLOC_RES_SYNC); | 363 | ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC, |
364 | shca->hca_cap); | ||
364 | 365 | ||
365 | /* translate supported MR page sizes; always support 4K */ | 366 | /* translate supported MR page sizes; always support 4K */ |
366 | shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; | 367 | shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 31a68b9c52d0..53f4cd4fc19a 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -933,11 +933,6 @@ int ehca_unmap_fmr(struct list_head *fmr_list) | |||
933 | /* check all FMR belong to same SHCA, and check internal flag */ | 933 | /* check all FMR belong to same SHCA, and check internal flag */ |
934 | list_for_each_entry(ib_fmr, fmr_list, list) { | 934 | list_for_each_entry(ib_fmr, fmr_list, list) { |
935 | prev_shca = shca; | 935 | prev_shca = shca; |
936 | if (!ib_fmr) { | ||
937 | ehca_gen_err("bad fmr=%p in list", ib_fmr); | ||
938 | ret = -EINVAL; | ||
939 | goto unmap_fmr_exit0; | ||
940 | } | ||
941 | shca = container_of(ib_fmr->device, struct ehca_shca, | 936 | shca = container_of(ib_fmr->device, struct ehca_shca, |
942 | ib_device); | 937 | ib_device); |
943 | e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); | 938 | e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 47d388ec1cde..32fb34201aba 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -251,7 +251,7 @@ static inline int ibqptype2servicetype(enum ib_qp_type ibqptype) | |||
251 | return ST_UD; | 251 | return ST_UD; |
252 | case IB_QPT_RAW_IPV6: | 252 | case IB_QPT_RAW_IPV6: |
253 | return -EINVAL; | 253 | return -EINVAL; |
254 | case IB_QPT_RAW_ETY: | 254 | case IB_QPT_RAW_ETHERTYPE: |
255 | return -EINVAL; | 255 | return -EINVAL; |
256 | default: | 256 | default: |
257 | ehca_gen_err("Invalid ibqptype=%x", ibqptype); | 257 | ehca_gen_err("Invalid ibqptype=%x", ibqptype); |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index 4d5dc3304d42..e6f9cdd94c7a 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c | |||
@@ -269,6 +269,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, | |||
269 | struct ehca_cq *cq, | 269 | struct ehca_cq *cq, |
270 | struct ehca_alloc_cq_parms *param) | 270 | struct ehca_alloc_cq_parms *param) |
271 | { | 271 | { |
272 | int rc; | ||
272 | u64 ret; | 273 | u64 ret; |
273 | unsigned long outs[PLPAR_HCALL9_BUFSIZE]; | 274 | unsigned long outs[PLPAR_HCALL9_BUFSIZE]; |
274 | 275 | ||
@@ -283,8 +284,19 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, | |||
283 | param->act_nr_of_entries = (u32)outs[3]; | 284 | param->act_nr_of_entries = (u32)outs[3]; |
284 | param->act_pages = (u32)outs[4]; | 285 | param->act_pages = (u32)outs[4]; |
285 | 286 | ||
286 | if (ret == H_SUCCESS) | 287 | if (ret == H_SUCCESS) { |
287 | hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]); | 288 | rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]); |
289 | if (rc) { | ||
290 | ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx", | ||
291 | rc, outs[5]); | ||
292 | |||
293 | ehca_plpar_hcall_norets(H_FREE_RESOURCE, | ||
294 | adapter_handle.handle, /* r4 */ | ||
295 | cq->ipz_cq_handle.handle, /* r5 */ | ||
296 | 0, 0, 0, 0, 0); | ||
297 | ret = H_NO_MEM; | ||
298 | } | ||
299 | } | ||
288 | 300 | ||
289 | if (ret == H_NOT_ENOUGH_RESOURCES) | 301 | if (ret == H_NOT_ENOUGH_RESOURCES) |
290 | ehca_gen_err("Not enough resources. ret=%lli", ret); | 302 | ehca_gen_err("Not enough resources. ret=%lli", ret); |
@@ -295,6 +307,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, | |||
295 | u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, | 307 | u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, |
296 | struct ehca_alloc_qp_parms *parms, int is_user) | 308 | struct ehca_alloc_qp_parms *parms, int is_user) |
297 | { | 309 | { |
310 | int rc; | ||
298 | u64 ret; | 311 | u64 ret; |
299 | u64 allocate_controls, max_r10_reg, r11, r12; | 312 | u64 allocate_controls, max_r10_reg, r11, r12; |
300 | unsigned long outs[PLPAR_HCALL9_BUFSIZE]; | 313 | unsigned long outs[PLPAR_HCALL9_BUFSIZE]; |
@@ -358,8 +371,19 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, | |||
358 | parms->rqueue.queue_size = | 371 | parms->rqueue.queue_size = |
359 | (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); | 372 | (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); |
360 | 373 | ||
361 | if (ret == H_SUCCESS) | 374 | if (ret == H_SUCCESS) { |
362 | hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]); | 375 | rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]); |
376 | if (rc) { | ||
377 | ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx", | ||
378 | rc, outs[6]); | ||
379 | |||
380 | ehca_plpar_hcall_norets(H_FREE_RESOURCE, | ||
381 | adapter_handle.handle, /* r4 */ | ||
382 | parms->qp_handle.handle, /* r5 */ | ||
383 | 0, 0, 0, 0, 0); | ||
384 | ret = H_NO_MEM; | ||
385 | } | ||
386 | } | ||
363 | 387 | ||
364 | if (ret == H_NOT_ENOUGH_RESOURCES) | 388 | if (ret == H_NOT_ENOUGH_RESOURCES) |
365 | ehca_gen_err("Not enough resources. ret=%lli", ret); | 389 | ehca_gen_err("Not enough resources. ret=%lli", ret); |
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c index b3e0e72e8a73..077376ff3d28 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.c +++ b/drivers/infiniband/hw/ehca/hcp_phyp.c | |||
@@ -42,10 +42,9 @@ | |||
42 | #include "ehca_classes.h" | 42 | #include "ehca_classes.h" |
43 | #include "hipz_hw.h" | 43 | #include "hipz_hw.h" |
44 | 44 | ||
45 | int hcall_map_page(u64 physaddr, u64 *mapaddr) | 45 | u64 hcall_map_page(u64 physaddr) |
46 | { | 46 | { |
47 | *mapaddr = (u64)(ioremap(physaddr, EHCA_PAGESIZE)); | 47 | return (u64)ioremap(physaddr, EHCA_PAGESIZE); |
48 | return 0; | ||
49 | } | 48 | } |
50 | 49 | ||
51 | int hcall_unmap_page(u64 mapaddr) | 50 | int hcall_unmap_page(u64 mapaddr) |
@@ -58,9 +57,9 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user, | |||
58 | u64 paddr_kernel, u64 paddr_user) | 57 | u64 paddr_kernel, u64 paddr_user) |
59 | { | 58 | { |
60 | if (!is_user) { | 59 | if (!is_user) { |
61 | int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); | 60 | galpas->kernel.fw_handle = hcall_map_page(paddr_kernel); |
62 | if (ret) | 61 | if (!galpas->kernel.fw_handle) |
63 | return ret; | 62 | return -ENOMEM; |
64 | } else | 63 | } else |
65 | galpas->kernel.fw_handle = 0; | 64 | galpas->kernel.fw_handle = 0; |
66 | 65 | ||
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h index 204227d5303a..d1b029910249 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.h +++ b/drivers/infiniband/hw/ehca/hcp_phyp.h | |||
@@ -83,7 +83,7 @@ int hcp_galpas_ctor(struct h_galpas *galpas, int is_user, | |||
83 | 83 | ||
84 | int hcp_galpas_dtor(struct h_galpas *galpas); | 84 | int hcp_galpas_dtor(struct h_galpas *galpas); |
85 | 85 | ||
86 | int hcall_map_page(u64 physaddr, u64 * mapaddr); | 86 | u64 hcall_map_page(u64 physaddr); |
87 | 87 | ||
88 | int hcall_unmap_page(u64 mapaddr); | 88 | int hcall_unmap_page(u64 mapaddr); |
89 | 89 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 21337468c652..765f0fc1da76 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -390,6 +390,8 @@ done: | |||
390 | ipath_enable_armlaunch(dd); | 390 | ipath_enable_armlaunch(dd); |
391 | } | 391 | } |
392 | 392 | ||
393 | static void cleanup_device(struct ipath_devdata *dd); | ||
394 | |||
393 | static int __devinit ipath_init_one(struct pci_dev *pdev, | 395 | static int __devinit ipath_init_one(struct pci_dev *pdev, |
394 | const struct pci_device_id *ent) | 396 | const struct pci_device_id *ent) |
395 | { | 397 | { |
@@ -616,8 +618,13 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, | |||
616 | goto bail; | 618 | goto bail; |
617 | 619 | ||
618 | bail_irqsetup: | 620 | bail_irqsetup: |
619 | if (pdev->irq) | 621 | cleanup_device(dd); |
620 | free_irq(pdev->irq, dd); | 622 | |
623 | if (dd->ipath_irq) | ||
624 | dd->ipath_f_free_irq(dd); | ||
625 | |||
626 | if (dd->ipath_f_cleanup) | ||
627 | dd->ipath_f_cleanup(dd); | ||
621 | 628 | ||
622 | bail_iounmap: | 629 | bail_iounmap: |
623 | iounmap((volatile void __iomem *) dd->ipath_kregbase); | 630 | iounmap((volatile void __iomem *) dd->ipath_kregbase); |
@@ -635,7 +642,7 @@ bail: | |||
635 | return ret; | 642 | return ret; |
636 | } | 643 | } |
637 | 644 | ||
638 | static void __devexit cleanup_device(struct ipath_devdata *dd) | 645 | static void cleanup_device(struct ipath_devdata *dd) |
639 | { | 646 | { |
640 | int port; | 647 | int port; |
641 | struct ipath_portdata **tmp; | 648 | struct ipath_portdata **tmp; |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 3603ae89b606..f4ceecd9684b 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -1817,7 +1817,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, | |||
1817 | case IB_QPT_RAW_IPV6: | 1817 | case IB_QPT_RAW_IPV6: |
1818 | op_mod = 2; | 1818 | op_mod = 2; |
1819 | break; | 1819 | break; |
1820 | case IB_QPT_RAW_ETY: | 1820 | case IB_QPT_RAW_ETHERTYPE: |
1821 | op_mod = 3; | 1821 | op_mod = 3; |
1822 | break; | 1822 | break; |
1823 | default: | 1823 | default: |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index de7b9d7166f3..0c9f0aa5d4ea 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -110,8 +110,8 @@ static unsigned int sysfs_nonidx_addr; | |||
110 | static unsigned int sysfs_idx_addr; | 110 | static unsigned int sysfs_idx_addr; |
111 | 111 | ||
112 | static struct pci_device_id nes_pci_table[] = { | 112 | static struct pci_device_id nes_pci_table[] = { |
113 | {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID}, | 113 | { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020), }, |
114 | {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID}, | 114 | { PCI_VDEVICE(NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR), }, |
115 | {0} | 115 | {0} |
116 | }; | 116 | }; |
117 | 117 | ||
@@ -259,13 +259,11 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r | |||
259 | unsigned long flags; | 259 | unsigned long flags; |
260 | struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; | 260 | struct nes_qp *nesqp = cqp_request->cqp_callback_pointer; |
261 | struct nes_adapter *nesadapter = nesdev->nesadapter; | 261 | struct nes_adapter *nesadapter = nesdev->nesadapter; |
262 | u32 qp_id; | ||
263 | 262 | ||
264 | atomic_inc(&qps_destroyed); | 263 | atomic_inc(&qps_destroyed); |
265 | 264 | ||
266 | /* Free the control structures */ | 265 | /* Free the control structures */ |
267 | 266 | ||
268 | qp_id = nesqp->hwqp.qp_id; | ||
269 | if (nesqp->pbl_vbase) { | 267 | if (nesqp->pbl_vbase) { |
270 | pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, | 268 | pci_free_consistent(nesdev->pcidev, nesqp->qp_mem_size, |
271 | nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase); | 269 | nesqp->hwqp.q2_vbase, nesqp->hwqp.q2_pbase); |
@@ -441,7 +439,6 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i | |||
441 | struct net_device *netdev = NULL; | 439 | struct net_device *netdev = NULL; |
442 | struct nes_device *nesdev = NULL; | 440 | struct nes_device *nesdev = NULL; |
443 | int ret = 0; | 441 | int ret = 0; |
444 | struct nes_vnic *nesvnic = NULL; | ||
445 | void __iomem *mmio_regs = NULL; | 442 | void __iomem *mmio_regs = NULL; |
446 | u8 hw_rev; | 443 | u8 hw_rev; |
447 | 444 | ||
@@ -664,25 +661,21 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i | |||
664 | nes_notifiers_registered++; | 661 | nes_notifiers_registered++; |
665 | 662 | ||
666 | /* Initialize network devices */ | 663 | /* Initialize network devices */ |
667 | if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) { | 664 | if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL) |
668 | goto bail7; | 665 | goto bail7; |
669 | } | ||
670 | |||
671 | /* Register network device */ | ||
672 | ret = register_netdev(netdev); | ||
673 | if (ret) { | ||
674 | printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret); | ||
675 | nes_netdev_destroy(netdev); | ||
676 | goto bail7; | ||
677 | } | ||
678 | 666 | ||
679 | nes_print_macaddr(netdev); | 667 | /* Register network device */ |
680 | /* create a CM core for this netdev */ | 668 | ret = register_netdev(netdev); |
681 | nesvnic = netdev_priv(netdev); | 669 | if (ret) { |
670 | printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret); | ||
671 | nes_netdev_destroy(netdev); | ||
672 | goto bail7; | ||
673 | } | ||
682 | 674 | ||
683 | nesdev->netdev_count++; | 675 | nes_print_macaddr(netdev); |
684 | nesdev->nesadapter->netdev_count++; | ||
685 | 676 | ||
677 | nesdev->netdev_count++; | ||
678 | nesdev->nesadapter->netdev_count++; | ||
686 | 679 | ||
687 | printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n", | 680 | printk(KERN_ERR PFX "%s: NetEffect RNIC driver successfully loaded.\n", |
688 | pci_name(pcidev)); | 681 | pci_name(pcidev)); |
@@ -1104,7 +1097,7 @@ static ssize_t nes_show_wqm_quanta(struct device_driver *ddp, char *buf) | |||
1104 | i++; | 1097 | i++; |
1105 | } | 1098 | } |
1106 | 1099 | ||
1107 | return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta); | 1100 | return snprintf(buf, PAGE_SIZE, "0x%X\n", wqm_quanta_value); |
1108 | } | 1101 | } |
1109 | 1102 | ||
1110 | 1103 | ||
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index cc78fee1dd51..b3d145e82b4c 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h | |||
@@ -262,6 +262,7 @@ struct nes_device { | |||
262 | u16 base_doorbell_index; | 262 | u16 base_doorbell_index; |
263 | u16 currcq_count; | 263 | u16 currcq_count; |
264 | u16 deepcq_count; | 264 | u16 deepcq_count; |
265 | u8 iw_status; | ||
265 | u8 msi_enabled; | 266 | u8 msi_enabled; |
266 | u8 netdev_count; | 267 | u8 netdev_count; |
267 | u8 napi_isr_ran; | 268 | u8 napi_isr_ran; |
@@ -527,6 +528,7 @@ void nes_cm_disconn_worker(void *); | |||
527 | int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32); | 528 | int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32); |
528 | int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); | 529 | int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); |
529 | struct nes_ib_device *nes_init_ofa_device(struct net_device *); | 530 | struct nes_ib_device *nes_init_ofa_device(struct net_device *); |
531 | void nes_port_ibevent(struct nes_vnic *nesvnic); | ||
530 | void nes_destroy_ofa_device(struct nes_ib_device *); | 532 | void nes_destroy_ofa_device(struct nes_ib_device *); |
531 | int nes_register_ofa_device(struct nes_ib_device *); | 533 | int nes_register_ofa_device(struct nes_ib_device *); |
532 | 534 | ||
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index d876d0435cd4..443cea55daac 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -1719,8 +1719,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1719 | { | 1719 | { |
1720 | int datasize = 0; | 1720 | int datasize = 0; |
1721 | u32 inc_sequence; | 1721 | u32 inc_sequence; |
1722 | u32 rem_seq_ack; | ||
1723 | u32 rem_seq; | ||
1724 | int ret = 0; | 1722 | int ret = 0; |
1725 | int optionsize; | 1723 | int optionsize; |
1726 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); | 1724 | optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); |
@@ -1730,8 +1728,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
1730 | 1728 | ||
1731 | skb_pull(skb, tcph->doff << 2); | 1729 | skb_pull(skb, tcph->doff << 2); |
1732 | inc_sequence = ntohl(tcph->seq); | 1730 | inc_sequence = ntohl(tcph->seq); |
1733 | rem_seq = ntohl(tcph->seq); | ||
1734 | rem_seq_ack = ntohl(tcph->ack_seq); | ||
1735 | datasize = skb->len; | 1731 | datasize = skb->len; |
1736 | switch (cm_node->state) { | 1732 | switch (cm_node->state) { |
1737 | case NES_CM_STATE_SYN_RCVD: | 1733 | case NES_CM_STATE_SYN_RCVD: |
@@ -2565,7 +2561,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) | |||
2565 | u16 last_ae; | 2561 | u16 last_ae; |
2566 | u8 original_hw_tcp_state; | 2562 | u8 original_hw_tcp_state; |
2567 | u8 original_ibqp_state; | 2563 | u8 original_ibqp_state; |
2568 | enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK; | 2564 | enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK; |
2569 | int issue_disconn = 0; | 2565 | int issue_disconn = 0; |
2570 | int issue_close = 0; | 2566 | int issue_close = 0; |
2571 | int issue_flush = 0; | 2567 | int issue_flush = 0; |
@@ -3128,17 +3124,15 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
3128 | struct nes_vnic *nesvnic; | 3124 | struct nes_vnic *nesvnic; |
3129 | struct nes_cm_listener *cm_node; | 3125 | struct nes_cm_listener *cm_node; |
3130 | struct nes_cm_info cm_info; | 3126 | struct nes_cm_info cm_info; |
3131 | struct nes_adapter *adapter; | ||
3132 | int err; | 3127 | int err; |
3133 | 3128 | ||
3134 | |||
3135 | nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n", | 3129 | nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n", |
3136 | cm_id, ntohs(cm_id->local_addr.sin_port)); | 3130 | cm_id, ntohs(cm_id->local_addr.sin_port)); |
3137 | 3131 | ||
3138 | nesvnic = to_nesvnic(cm_id->device); | 3132 | nesvnic = to_nesvnic(cm_id->device); |
3139 | if (!nesvnic) | 3133 | if (!nesvnic) |
3140 | return -EINVAL; | 3134 | return -EINVAL; |
3141 | adapter = nesvnic->nesdev->nesadapter; | 3135 | |
3142 | nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n", | 3136 | nes_debug(NES_DBG_CM, "nesvnic=%p, netdev=%p, %s\n", |
3143 | nesvnic, nesvnic->netdev, nesvnic->netdev->name); | 3137 | nesvnic, nesvnic->netdev, nesvnic->netdev->name); |
3144 | 3138 | ||
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 57874a165083..f8233c851c69 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -1970,7 +1970,7 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic) | |||
1970 | dev_kfree_skb( | 1970 | dev_kfree_skb( |
1971 | nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]); | 1971 | nesvnic->nic.tx_skb[nesvnic->nic.sq_tail]); |
1972 | 1972 | ||
1973 | nesvnic->nic.sq_tail = (++nesvnic->nic.sq_tail) | 1973 | nesvnic->nic.sq_tail = (nesvnic->nic.sq_tail + 1) |
1974 | & (nesvnic->nic.sq_size - 1); | 1974 | & (nesvnic->nic.sq_size - 1); |
1975 | } | 1975 | } |
1976 | 1976 | ||
@@ -2737,9 +2737,9 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) | |||
2737 | nesnic->sq_tail &= nesnic->sq_size-1; | 2737 | nesnic->sq_tail &= nesnic->sq_size-1; |
2738 | if (sq_cqes > 128) { | 2738 | if (sq_cqes > 128) { |
2739 | barrier(); | 2739 | barrier(); |
2740 | /* restart the queue if it had been stopped */ | 2740 | /* restart the queue if it had been stopped */ |
2741 | if (netif_queue_stopped(nesvnic->netdev)) | 2741 | if (netif_queue_stopped(nesvnic->netdev)) |
2742 | netif_wake_queue(nesvnic->netdev); | 2742 | netif_wake_queue(nesvnic->netdev); |
2743 | sq_cqes = 0; | 2743 | sq_cqes = 0; |
2744 | } | 2744 | } |
2745 | } else { | 2745 | } else { |
@@ -2999,11 +2999,8 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq) | |||
2999 | 2999 | ||
3000 | static u8 *locate_mpa(u8 *pkt, u32 aeq_info) | 3000 | static u8 *locate_mpa(u8 *pkt, u32 aeq_info) |
3001 | { | 3001 | { |
3002 | u16 pkt_len; | ||
3003 | |||
3004 | if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) { | 3002 | if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) { |
3005 | /* skip over ethernet header */ | 3003 | /* skip over ethernet header */ |
3006 | pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2)); | ||
3007 | pkt += ETH_HLEN; | 3004 | pkt += ETH_HLEN; |
3008 | 3005 | ||
3009 | /* Skip over IP and TCP headers */ | 3006 | /* Skip over IP and TCP headers */ |
@@ -3283,9 +3280,15 @@ static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *n | |||
3283 | else | 3280 | else |
3284 | mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG; | 3281 | mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG; |
3285 | 3282 | ||
3286 | nes_terminate_start_timer(nesqp); | 3283 | if (!nesdev->iw_status) { |
3287 | nesqp->term_flags |= NES_TERM_SENT; | 3284 | nesqp->term_flags = NES_TERM_DONE; |
3288 | nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0); | 3285 | nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_ERROR, 0, 0); |
3286 | nes_cm_disconn(nesqp); | ||
3287 | } else { | ||
3288 | nes_terminate_start_timer(nesqp); | ||
3289 | nesqp->term_flags |= NES_TERM_SENT; | ||
3290 | nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0); | ||
3291 | } | ||
3289 | } | 3292 | } |
3290 | 3293 | ||
3291 | static void nes_terminate_send_fin(struct nes_device *nesdev, | 3294 | static void nes_terminate_send_fin(struct nes_device *nesdev, |
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index bbbfe9fc5a5a..aa9183db32b1 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h | |||
@@ -1100,11 +1100,12 @@ struct nes_adapter { | |||
1100 | u32 wqm_wat; | 1100 | u32 wqm_wat; |
1101 | u32 core_clock; | 1101 | u32 core_clock; |
1102 | u32 firmware_version; | 1102 | u32 firmware_version; |
1103 | u32 eeprom_version; | ||
1103 | 1104 | ||
1104 | u32 nic_rx_eth_route_err; | 1105 | u32 nic_rx_eth_route_err; |
1105 | 1106 | ||
1106 | u32 et_rx_coalesce_usecs; | 1107 | u32 et_rx_coalesce_usecs; |
1107 | u32 et_rx_max_coalesced_frames; | 1108 | u32 et_rx_max_coalesced_frames; |
1108 | u32 et_rx_coalesce_usecs_irq; | 1109 | u32 et_rx_coalesce_usecs_irq; |
1109 | u32 et_rx_max_coalesced_frames_irq; | 1110 | u32 et_rx_max_coalesced_frames_irq; |
1110 | u32 et_pkt_rate_low; | 1111 | u32 et_pkt_rate_low; |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 42e7aad1ec23..6dfdd49cdbcf 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
@@ -232,6 +232,13 @@ static int nes_netdev_open(struct net_device *netdev) | |||
232 | NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR)); | 232 | NES_MAC_INT_TX_UNDERFLOW | NES_MAC_INT_TX_ERROR)); |
233 | first_nesvnic = nesvnic; | 233 | first_nesvnic = nesvnic; |
234 | } | 234 | } |
235 | |||
236 | if (nesvnic->of_device_registered) { | ||
237 | nesdev->iw_status = 1; | ||
238 | nesdev->nesadapter->send_term_ok = 1; | ||
239 | nes_port_ibevent(nesvnic); | ||
240 | } | ||
241 | |||
235 | if (first_nesvnic->linkup) { | 242 | if (first_nesvnic->linkup) { |
236 | /* Enable network packets */ | 243 | /* Enable network packets */ |
237 | nesvnic->linkup = 1; | 244 | nesvnic->linkup = 1; |
@@ -309,9 +316,9 @@ static int nes_netdev_stop(struct net_device *netdev) | |||
309 | 316 | ||
310 | 317 | ||
311 | if (nesvnic->of_device_registered) { | 318 | if (nesvnic->of_device_registered) { |
312 | nes_destroy_ofa_device(nesvnic->nesibdev); | 319 | nesdev->nesadapter->send_term_ok = 0; |
313 | nesvnic->nesibdev = NULL; | 320 | nesdev->iw_status = 0; |
314 | nesvnic->of_device_registered = 0; | 321 | nes_port_ibevent(nesvnic); |
315 | } | 322 | } |
316 | nes_destroy_nic_qp(nesvnic); | 323 | nes_destroy_nic_qp(nesvnic); |
317 | 324 | ||
@@ -463,7 +470,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
463 | u16 nhoffset; | 470 | u16 nhoffset; |
464 | u16 wqes_needed; | 471 | u16 wqes_needed; |
465 | u16 wqes_available; | 472 | u16 wqes_available; |
466 | u32 old_head; | ||
467 | u32 wqe_misc; | 473 | u32 wqe_misc; |
468 | 474 | ||
469 | /* | 475 | /* |
@@ -503,7 +509,6 @@ sq_no_longer_full: | |||
503 | if (skb_is_gso(skb)) { | 509 | if (skb_is_gso(skb)) { |
504 | nesvnic->segmented_tso_requests++; | 510 | nesvnic->segmented_tso_requests++; |
505 | nesvnic->tso_requests++; | 511 | nesvnic->tso_requests++; |
506 | old_head = nesnic->sq_head; | ||
507 | /* Basically 4 fragments available per WQE with extended fragments */ | 512 | /* Basically 4 fragments available per WQE with extended fragments */ |
508 | wqes_needed = nr_frags >> 2; | 513 | wqes_needed = nr_frags >> 2; |
509 | wqes_needed += (nr_frags&3)?1:0; | 514 | wqes_needed += (nr_frags&3)?1:0; |
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c index a9f5dd272f1a..f9c417c6b3b3 100644 --- a/drivers/infiniband/hw/nes/nes_utils.c +++ b/drivers/infiniband/hw/nes/nes_utils.c | |||
@@ -190,6 +190,11 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada | |||
190 | nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + | 190 | nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) + |
191 | (u32)((u8)eeprom_data); | 191 | (u32)((u8)eeprom_data); |
192 | 192 | ||
193 | eeprom_data = nes_read16_eeprom(nesdev->regs, next_section_address + 10); | ||
194 | printk(PFX "EEPROM version %u.%u\n", (u8)(eeprom_data>>8), (u8)eeprom_data); | ||
195 | nesadapter->eeprom_version = (((u32)(u8)(eeprom_data>>8)) << 16) + | ||
196 | (u32)((u8)eeprom_data); | ||
197 | |||
193 | no_fw_rev: | 198 | no_fw_rev: |
194 | /* eeprom is valid */ | 199 | /* eeprom is valid */ |
195 | eeprom_offset = nesadapter->software_eeprom_offset; | 200 | eeprom_offset = nesadapter->software_eeprom_offset; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 9bc2d744b2ea..9046e6675686 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -518,7 +518,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop | |||
518 | memset(props, 0, sizeof(*props)); | 518 | memset(props, 0, sizeof(*props)); |
519 | memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6); | 519 | memcpy(&props->sys_image_guid, nesvnic->netdev->dev_addr, 6); |
520 | 520 | ||
521 | props->fw_ver = nesdev->nesadapter->fw_ver; | 521 | props->fw_ver = nesdev->nesadapter->firmware_version; |
522 | props->device_cap_flags = nesdev->nesadapter->device_cap_flags; | 522 | props->device_cap_flags = nesdev->nesadapter->device_cap_flags; |
523 | props->vendor_id = nesdev->nesadapter->vendor_id; | 523 | props->vendor_id = nesdev->nesadapter->vendor_id; |
524 | props->vendor_part_id = nesdev->nesadapter->vendor_part_id; | 524 | props->vendor_part_id = nesdev->nesadapter->vendor_part_id; |
@@ -1941,7 +1941,7 @@ static int nes_reg_mr(struct nes_device *nesdev, struct nes_pd *nespd, | |||
1941 | u8 use_256_pbls = 0; | 1941 | u8 use_256_pbls = 0; |
1942 | u8 use_4k_pbls = 0; | 1942 | u8 use_4k_pbls = 0; |
1943 | u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0; | 1943 | u16 use_two_level = (pbl_count_4k > 1) ? 1 : 0; |
1944 | struct nes_root_vpbl new_root = {0, 0, 0}; | 1944 | struct nes_root_vpbl new_root = { 0, NULL, NULL }; |
1945 | u32 opcode = 0; | 1945 | u32 opcode = 0; |
1946 | u16 major_code; | 1946 | u16 major_code; |
1947 | 1947 | ||
@@ -2112,13 +2112,12 @@ static struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd, | |||
2112 | u32 driver_key = 0; | 2112 | u32 driver_key = 0; |
2113 | u32 root_pbl_index = 0; | 2113 | u32 root_pbl_index = 0; |
2114 | u32 cur_pbl_index = 0; | 2114 | u32 cur_pbl_index = 0; |
2115 | int err = 0, pbl_depth = 0; | 2115 | int err = 0; |
2116 | int ret = 0; | 2116 | int ret = 0; |
2117 | u16 pbl_count = 0; | 2117 | u16 pbl_count = 0; |
2118 | u8 single_page = 1; | 2118 | u8 single_page = 1; |
2119 | u8 stag_key = 0; | 2119 | u8 stag_key = 0; |
2120 | 2120 | ||
2121 | pbl_depth = 0; | ||
2122 | region_length = 0; | 2121 | region_length = 0; |
2123 | vpbl.pbl_vbase = NULL; | 2122 | vpbl.pbl_vbase = NULL; |
2124 | root_vpbl.pbl_vbase = NULL; | 2123 | root_vpbl.pbl_vbase = NULL; |
@@ -2931,7 +2930,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
2931 | int ret; | 2930 | int ret; |
2932 | u16 original_last_aeq; | 2931 | u16 original_last_aeq; |
2933 | u8 issue_modify_qp = 0; | 2932 | u8 issue_modify_qp = 0; |
2934 | u8 issue_disconnect = 0; | ||
2935 | u8 dont_wait = 0; | 2933 | u8 dont_wait = 0; |
2936 | 2934 | ||
2937 | nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u," | 2935 | nes_debug(NES_DBG_MOD_QP, "QP%u: QP State=%u, cur QP State=%u," |
@@ -3058,6 +3056,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
3058 | nesqp->hte_added = 0; | 3056 | nesqp->hte_added = 0; |
3059 | } | 3057 | } |
3060 | if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) && | 3058 | if ((nesqp->hw_tcp_state > NES_AEQE_TCP_STATE_CLOSED) && |
3059 | (nesdev->iw_status) && | ||
3061 | (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) { | 3060 | (nesqp->hw_tcp_state != NES_AEQE_TCP_STATE_TIME_WAIT)) { |
3062 | next_iwarp_state |= NES_CQP_QP_RESET; | 3061 | next_iwarp_state |= NES_CQP_QP_RESET; |
3063 | } else { | 3062 | } else { |
@@ -3082,7 +3081,6 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
3082 | nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK; | 3081 | nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK; |
3083 | nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n", | 3082 | nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n", |
3084 | nesqp->iwarp_state); | 3083 | nesqp->iwarp_state); |
3085 | issue_disconnect = 1; | ||
3086 | } else { | 3084 | } else { |
3087 | nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK; | 3085 | nesqp->iwarp_state = next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK; |
3088 | nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n", | 3086 | nes_debug(NES_DBG_MOD_QP, "Change nesqp->iwarp_state=%08x\n", |
@@ -3936,6 +3934,17 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) | |||
3936 | return nesibdev; | 3934 | return nesibdev; |
3937 | } | 3935 | } |
3938 | 3936 | ||
3937 | void nes_port_ibevent(struct nes_vnic *nesvnic) | ||
3938 | { | ||
3939 | struct nes_ib_device *nesibdev = nesvnic->nesibdev; | ||
3940 | struct nes_device *nesdev = nesvnic->nesdev; | ||
3941 | struct ib_event event; | ||
3942 | event.device = &nesibdev->ibdev; | ||
3943 | event.element.port_num = nesvnic->logical_port + 1; | ||
3944 | event.event = nesdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; | ||
3945 | ib_dispatch_event(&event); | ||
3946 | } | ||
3947 | |||
3939 | 3948 | ||
3940 | /** | 3949 | /** |
3941 | * nes_destroy_ofa_device | 3950 | * nes_destroy_ofa_device |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 3593983df7ba..61de0654820e 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
46 | #include <linux/list.h> | 46 | #include <linux/list.h> |
47 | #include <linux/scatterlist.h> | 47 | #include <linux/scatterlist.h> |
48 | #include <linux/slab.h> | ||
48 | #include <linux/io.h> | 49 | #include <linux/io.h> |
49 | #include <linux/fs.h> | 50 | #include <linux/fs.h> |
50 | #include <linux/completion.h> | 51 | #include <linux/completion.h> |
@@ -326,6 +327,9 @@ struct qib_verbs_txreq { | |||
326 | 327 | ||
327 | #define QIB_DEFAULT_MTU 4096 | 328 | #define QIB_DEFAULT_MTU 4096 |
328 | 329 | ||
330 | /* max number of IB ports supported per HCA */ | ||
331 | #define QIB_MAX_IB_PORTS 2 | ||
332 | |||
329 | /* | 333 | /* |
330 | * Possible IB config parameters for f_get/set_ib_table() | 334 | * Possible IB config parameters for f_get/set_ib_table() |
331 | */ | 335 | */ |
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h index b3955ed8f794..145da4040883 100644 --- a/drivers/infiniband/hw/qib/qib_common.h +++ b/drivers/infiniband/hw/qib/qib_common.h | |||
@@ -279,7 +279,7 @@ struct qib_base_info { | |||
279 | * may not be implemented; the user code must deal with this if it | 279 | * may not be implemented; the user code must deal with this if it |
280 | * cares, or it must abort after initialization reports the difference. | 280 | * cares, or it must abort after initialization reports the difference. |
281 | */ | 281 | */ |
282 | #define QIB_USER_SWMINOR 10 | 282 | #define QIB_USER_SWMINOR 11 |
283 | 283 | ||
284 | #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) | 284 | #define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR) |
285 | 285 | ||
@@ -302,6 +302,18 @@ struct qib_base_info { | |||
302 | #define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION) | 302 | #define QIB_KERN_SWVERSION ((QIB_KERN_TYPE << 31) | QIB_USER_SWVERSION) |
303 | 303 | ||
304 | /* | 304 | /* |
305 | * If the unit is specified via open, HCA choice is fixed. If port is | ||
306 | * specified, it's also fixed. Otherwise we try to spread contexts | ||
307 | * across ports and HCAs, using different algorithims. WITHIN is | ||
308 | * the old default, prior to this mechanism. | ||
309 | */ | ||
310 | #define QIB_PORT_ALG_ACROSS 0 /* round robin contexts across HCAs, then | ||
311 | * ports; this is the default */ | ||
312 | #define QIB_PORT_ALG_WITHIN 1 /* use all contexts on an HCA (round robin | ||
313 | * active ports within), then next HCA */ | ||
314 | #define QIB_PORT_ALG_COUNT 2 /* number of algorithm choices */ | ||
315 | |||
316 | /* | ||
305 | * This structure is passed to qib_userinit() to tell the driver where | 317 | * This structure is passed to qib_userinit() to tell the driver where |
306 | * user code buffers are, sizes, etc. The offsets and sizes of the | 318 | * user code buffers are, sizes, etc. The offsets and sizes of the |
307 | * fields must remain unchanged, for binary compatibility. It can | 319 | * fields must remain unchanged, for binary compatibility. It can |
@@ -319,7 +331,7 @@ struct qib_user_info { | |||
319 | /* size of struct base_info to write to */ | 331 | /* size of struct base_info to write to */ |
320 | __u32 spu_base_info_size; | 332 | __u32 spu_base_info_size; |
321 | 333 | ||
322 | __u32 _spu_unused3; | 334 | __u32 spu_port_alg; /* which QIB_PORT_ALG_*; unused user minor < 11 */ |
323 | 335 | ||
324 | /* | 336 | /* |
325 | * If two or more processes wish to share a context, each process | 337 | * If two or more processes wish to share a context, each process |
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index f15ce076ac49..9cd193603fb1 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
@@ -335,7 +335,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) | |||
335 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ | 335 | smp_rmb(); /* prevent speculative reads of dma'ed hdrq */ |
336 | } | 336 | } |
337 | 337 | ||
338 | for (last = 0, i = 1; !last; i += !last) { | 338 | for (last = 0, i = 1; !last && i <= 64; i += !last) { |
339 | hdr = dd->f_get_msgheader(dd, rhf_addr); | 339 | hdr = dd->f_get_msgheader(dd, rhf_addr); |
340 | eflags = qib_hdrget_err_flags(rhf_addr); | 340 | eflags = qib_hdrget_err_flags(rhf_addr); |
341 | etype = qib_hdrget_rcv_type(rhf_addr); | 341 | etype = qib_hdrget_rcv_type(rhf_addr); |
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index a142a9eb5226..6b11645edf35 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -1294,128 +1294,130 @@ bail: | |||
1294 | return ret; | 1294 | return ret; |
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | static inline int usable(struct qib_pportdata *ppd, int active_only) | 1297 | static inline int usable(struct qib_pportdata *ppd) |
1298 | { | 1298 | { |
1299 | struct qib_devdata *dd = ppd->dd; | 1299 | struct qib_devdata *dd = ppd->dd; |
1300 | u32 linkok = active_only ? QIBL_LINKACTIVE : | ||
1301 | (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE); | ||
1302 | 1300 | ||
1303 | return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && | 1301 | return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && |
1304 | (ppd->lflags & linkok); | 1302 | (ppd->lflags & QIBL_LINKACTIVE); |
1305 | } | 1303 | } |
1306 | 1304 | ||
1307 | static int find_free_ctxt(int unit, struct file *fp, | 1305 | /* |
1308 | const struct qib_user_info *uinfo) | 1306 | * Select a context on the given device, either using a requested port |
1307 | * or the port based on the context number. | ||
1308 | */ | ||
1309 | static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port, | ||
1310 | const struct qib_user_info *uinfo) | ||
1309 | { | 1311 | { |
1310 | struct qib_devdata *dd = qib_lookup(unit); | ||
1311 | struct qib_pportdata *ppd = NULL; | 1312 | struct qib_pportdata *ppd = NULL; |
1312 | int ret; | 1313 | int ret, ctxt; |
1313 | u32 ctxt; | ||
1314 | 1314 | ||
1315 | if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) { | 1315 | if (port) { |
1316 | ret = -ENODEV; | 1316 | if (!usable(dd->pport + port - 1)) { |
1317 | goto bail; | ||
1318 | } | ||
1319 | |||
1320 | /* | ||
1321 | * If users requests specific port, only try that one port, else | ||
1322 | * select "best" port below, based on context. | ||
1323 | */ | ||
1324 | if (uinfo->spu_port) { | ||
1325 | ppd = dd->pport + uinfo->spu_port - 1; | ||
1326 | if (!usable(ppd, 0)) { | ||
1327 | ret = -ENETDOWN; | 1317 | ret = -ENETDOWN; |
1328 | goto bail; | 1318 | goto done; |
1329 | } | 1319 | } else |
1320 | ppd = dd->pport + port - 1; | ||
1330 | } | 1321 | } |
1331 | 1322 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt]; | |
1332 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { | 1323 | ctxt++) |
1333 | if (dd->rcd[ctxt]) | 1324 | ; |
1334 | continue; | 1325 | if (ctxt == dd->cfgctxts) { |
1335 | /* | 1326 | ret = -EBUSY; |
1336 | * The setting and clearing of user context rcd[x] protected | 1327 | goto done; |
1337 | * by the qib_mutex | 1328 | } |
1338 | */ | 1329 | if (!ppd) { |
1339 | if (!ppd) { | 1330 | u32 pidx = ctxt % dd->num_pports; |
1340 | /* choose port based on ctxt, if up, else 1st up */ | 1331 | if (usable(dd->pport + pidx)) |
1341 | ppd = dd->pport + (ctxt % dd->num_pports); | 1332 | ppd = dd->pport + pidx; |
1342 | if (!usable(ppd, 0)) { | 1333 | else { |
1343 | int i; | 1334 | for (pidx = 0; pidx < dd->num_pports && !ppd; |
1344 | for (i = 0; i < dd->num_pports; i++) { | 1335 | pidx++) |
1345 | ppd = dd->pport + i; | 1336 | if (usable(dd->pport + pidx)) |
1346 | if (usable(ppd, 0)) | 1337 | ppd = dd->pport + pidx; |
1347 | break; | ||
1348 | } | ||
1349 | if (i == dd->num_pports) { | ||
1350 | ret = -ENETDOWN; | ||
1351 | goto bail; | ||
1352 | } | ||
1353 | } | ||
1354 | } | 1338 | } |
1355 | ret = setup_ctxt(ppd, ctxt, fp, uinfo); | ||
1356 | goto bail; | ||
1357 | } | 1339 | } |
1358 | ret = -EBUSY; | 1340 | ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN; |
1341 | done: | ||
1342 | return ret; | ||
1343 | } | ||
1344 | |||
1345 | static int find_free_ctxt(int unit, struct file *fp, | ||
1346 | const struct qib_user_info *uinfo) | ||
1347 | { | ||
1348 | struct qib_devdata *dd = qib_lookup(unit); | ||
1349 | int ret; | ||
1350 | |||
1351 | if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) | ||
1352 | ret = -ENODEV; | ||
1353 | else | ||
1354 | ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo); | ||
1359 | 1355 | ||
1360 | bail: | ||
1361 | return ret; | 1356 | return ret; |
1362 | } | 1357 | } |
1363 | 1358 | ||
1364 | static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo) | 1359 | static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, |
1360 | unsigned alg) | ||
1365 | { | 1361 | { |
1366 | struct qib_pportdata *ppd; | 1362 | struct qib_devdata *udd = NULL; |
1367 | int ret = 0, devmax; | 1363 | int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i; |
1368 | int npresent, nup; | ||
1369 | int ndev; | ||
1370 | u32 port = uinfo->spu_port, ctxt; | 1364 | u32 port = uinfo->spu_port, ctxt; |
1371 | 1365 | ||
1372 | devmax = qib_count_units(&npresent, &nup); | 1366 | devmax = qib_count_units(&npresent, &nup); |
1367 | if (!npresent) { | ||
1368 | ret = -ENXIO; | ||
1369 | goto done; | ||
1370 | } | ||
1371 | if (nup == 0) { | ||
1372 | ret = -ENETDOWN; | ||
1373 | goto done; | ||
1374 | } | ||
1373 | 1375 | ||
1374 | for (ndev = 0; ndev < devmax; ndev++) { | 1376 | if (alg == QIB_PORT_ALG_ACROSS) { |
1375 | struct qib_devdata *dd = qib_lookup(ndev); | 1377 | unsigned inuse = ~0U; |
1376 | 1378 | /* find device (with ACTIVE ports) with fewest ctxts in use */ | |
1377 | /* device portion of usable() */ | 1379 | for (ndev = 0; ndev < devmax; ndev++) { |
1378 | if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) | 1380 | struct qib_devdata *dd = qib_lookup(ndev); |
1379 | continue; | 1381 | unsigned cused = 0, cfree = 0; |
1380 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) { | 1382 | if (!dd) |
1381 | if (dd->rcd[ctxt]) | ||
1382 | continue; | 1383 | continue; |
1383 | if (port) { | 1384 | if (port && port <= dd->num_pports && |
1384 | if (port > dd->num_pports) | 1385 | usable(dd->pport + port - 1)) |
1385 | continue; | 1386 | dusable = 1; |
1386 | ppd = dd->pport + port - 1; | 1387 | else |
1387 | if (!usable(ppd, 0)) | 1388 | for (i = 0; i < dd->num_pports; i++) |
1388 | continue; | 1389 | if (usable(dd->pport + i)) |
1389 | } else { | 1390 | dusable++; |
1390 | /* | 1391 | if (!dusable) |
1391 | * choose port based on ctxt, if up, else | 1392 | continue; |
1392 | * first port that's up for multi-port HCA | 1393 | for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; |
1393 | */ | 1394 | ctxt++) |
1394 | ppd = dd->pport + (ctxt % dd->num_pports); | 1395 | if (dd->rcd[ctxt]) |
1395 | if (!usable(ppd, 0)) { | 1396 | cused++; |
1396 | int j; | 1397 | else |
1397 | 1398 | cfree++; | |
1398 | ppd = NULL; | 1399 | if (cfree && cused < inuse) { |
1399 | for (j = 0; j < dd->num_pports && | 1400 | udd = dd; |
1400 | !ppd; j++) | 1401 | inuse = cused; |
1401 | if (usable(dd->pport + j, 0)) | ||
1402 | ppd = dd->pport + j; | ||
1403 | if (!ppd) | ||
1404 | continue; /* to next unit */ | ||
1405 | } | ||
1406 | } | 1402 | } |
1407 | ret = setup_ctxt(ppd, ctxt, fp, uinfo); | 1403 | } |
1404 | if (udd) { | ||
1405 | ret = choose_port_ctxt(fp, udd, port, uinfo); | ||
1408 | goto done; | 1406 | goto done; |
1409 | } | 1407 | } |
1408 | } else { | ||
1409 | for (ndev = 0; ndev < devmax; ndev++) { | ||
1410 | struct qib_devdata *dd = qib_lookup(ndev); | ||
1411 | if (dd) { | ||
1412 | ret = choose_port_ctxt(fp, dd, port, uinfo); | ||
1413 | if (!ret) | ||
1414 | goto done; | ||
1415 | if (ret == -EBUSY) | ||
1416 | dusable++; | ||
1417 | } | ||
1418 | } | ||
1410 | } | 1419 | } |
1411 | 1420 | ret = dusable ? -EBUSY : -ENETDOWN; | |
1412 | if (npresent) { | ||
1413 | if (nup == 0) | ||
1414 | ret = -ENETDOWN; | ||
1415 | else | ||
1416 | ret = -EBUSY; | ||
1417 | } else | ||
1418 | ret = -ENXIO; | ||
1419 | 1421 | ||
1420 | done: | 1422 | done: |
1421 | return ret; | 1423 | return ret; |
@@ -1481,7 +1483,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) | |||
1481 | { | 1483 | { |
1482 | int ret; | 1484 | int ret; |
1483 | int i_minor; | 1485 | int i_minor; |
1484 | unsigned swmajor, swminor; | 1486 | unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS; |
1485 | 1487 | ||
1486 | /* Check to be sure we haven't already initialized this file */ | 1488 | /* Check to be sure we haven't already initialized this file */ |
1487 | if (ctxt_fp(fp)) { | 1489 | if (ctxt_fp(fp)) { |
@@ -1498,6 +1500,9 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) | |||
1498 | 1500 | ||
1499 | swminor = uinfo->spu_userversion & 0xffff; | 1501 | swminor = uinfo->spu_userversion & 0xffff; |
1500 | 1502 | ||
1503 | if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT) | ||
1504 | alg = uinfo->spu_port_alg; | ||
1505 | |||
1501 | mutex_lock(&qib_mutex); | 1506 | mutex_lock(&qib_mutex); |
1502 | 1507 | ||
1503 | if (qib_compatible_subctxts(swmajor, swminor) && | 1508 | if (qib_compatible_subctxts(swmajor, swminor) && |
@@ -1514,7 +1519,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo) | |||
1514 | if (i_minor) | 1519 | if (i_minor) |
1515 | ret = find_free_ctxt(i_minor - 1, fp, uinfo); | 1520 | ret = find_free_ctxt(i_minor - 1, fp, uinfo); |
1516 | else | 1521 | else |
1517 | ret = get_a_ctxt(fp, uinfo); | 1522 | ret = get_a_ctxt(fp, uinfo, alg); |
1518 | 1523 | ||
1519 | done_chk_sdma: | 1524 | done_chk_sdma: |
1520 | if (!ret) { | 1525 | if (!ret) { |
@@ -1862,7 +1867,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd) | |||
1862 | { | 1867 | { |
1863 | int ret = 0; | 1868 | int ret = 0; |
1864 | 1869 | ||
1865 | if (!usable(rcd->ppd, 1)) { | 1870 | if (!usable(rcd->ppd)) { |
1866 | int i; | 1871 | int i; |
1867 | /* | 1872 | /* |
1868 | * if link is down, or otherwise not usable, delay | 1873 | * if link is down, or otherwise not usable, delay |
@@ -1881,7 +1886,7 @@ static int disarm_req_delay(struct qib_ctxtdata *rcd) | |||
1881 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, | 1886 | set_bit(_QIB_EVENT_DISARM_BUFS_BIT, |
1882 | &rcd->user_event_mask[i]); | 1887 | &rcd->user_event_mask[i]); |
1883 | } | 1888 | } |
1884 | for (i = 0; !usable(rcd->ppd, 1) && i < 300; i++) | 1889 | for (i = 0; !usable(rcd->ppd) && i < 300; i++) |
1885 | msleep(100); | 1890 | msleep(100); |
1886 | ret = -ENETDOWN; | 1891 | ret = -ENETDOWN; |
1887 | } | 1892 | } |
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 844954bf417b..9f989c0ba9d3 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c | |||
@@ -135,8 +135,8 @@ static ssize_t driver_names_read(struct file *file, char __user *buf, | |||
135 | } | 135 | } |
136 | 136 | ||
137 | static const struct file_operations driver_ops[] = { | 137 | static const struct file_operations driver_ops[] = { |
138 | { .read = driver_stats_read, }, | 138 | { .read = driver_stats_read, .llseek = generic_file_llseek, }, |
139 | { .read = driver_names_read, }, | 139 | { .read = driver_names_read, .llseek = generic_file_llseek, }, |
140 | }; | 140 | }; |
141 | 141 | ||
142 | /* read the per-device counters */ | 142 | /* read the per-device counters */ |
@@ -164,8 +164,8 @@ static ssize_t dev_names_read(struct file *file, char __user *buf, | |||
164 | } | 164 | } |
165 | 165 | ||
166 | static const struct file_operations cntr_ops[] = { | 166 | static const struct file_operations cntr_ops[] = { |
167 | { .read = dev_counters_read, }, | 167 | { .read = dev_counters_read, .llseek = generic_file_llseek, }, |
168 | { .read = dev_names_read, }, | 168 | { .read = dev_names_read, .llseek = generic_file_llseek, }, |
169 | }; | 169 | }; |
170 | 170 | ||
171 | /* | 171 | /* |
@@ -210,9 +210,9 @@ static ssize_t portcntrs_2_read(struct file *file, char __user *buf, | |||
210 | } | 210 | } |
211 | 211 | ||
212 | static const struct file_operations portcntr_ops[] = { | 212 | static const struct file_operations portcntr_ops[] = { |
213 | { .read = portnames_read, }, | 213 | { .read = portnames_read, .llseek = generic_file_llseek, }, |
214 | { .read = portcntrs_1_read, }, | 214 | { .read = portcntrs_1_read, .llseek = generic_file_llseek, }, |
215 | { .read = portcntrs_2_read, }, | 215 | { .read = portcntrs_2_read, .llseek = generic_file_llseek, }, |
216 | }; | 216 | }; |
217 | 217 | ||
218 | /* | 218 | /* |
@@ -261,8 +261,8 @@ static ssize_t qsfp_2_read(struct file *file, char __user *buf, | |||
261 | } | 261 | } |
262 | 262 | ||
263 | static const struct file_operations qsfp_ops[] = { | 263 | static const struct file_operations qsfp_ops[] = { |
264 | { .read = qsfp_1_read, }, | 264 | { .read = qsfp_1_read, .llseek = generic_file_llseek, }, |
265 | { .read = qsfp_2_read, }, | 265 | { .read = qsfp_2_read, .llseek = generic_file_llseek, }, |
266 | }; | 266 | }; |
267 | 267 | ||
268 | static ssize_t flash_read(struct file *file, char __user *buf, | 268 | static ssize_t flash_read(struct file *file, char __user *buf, |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 5eedf83e2c3b..584d443b5335 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -5864,7 +5864,7 @@ static void write_7322_initregs(struct qib_devdata *dd) | |||
5864 | * Doesn't clear any of the error bits that might be set. | 5864 | * Doesn't clear any of the error bits that might be set. |
5865 | */ | 5865 | */ |
5866 | val = TIDFLOW_ERRBITS; /* these are W1C */ | 5866 | val = TIDFLOW_ERRBITS; /* these are W1C */ |
5867 | for (i = 0; i < dd->ctxtcnt; i++) { | 5867 | for (i = 0; i < dd->cfgctxts; i++) { |
5868 | int flow; | 5868 | int flow; |
5869 | for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) | 5869 | for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) |
5870 | qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); | 5870 | qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); |
@@ -7271,6 +7271,8 @@ static int serdes_7322_init(struct qib_pportdata *ppd) | |||
7271 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ | 7271 | ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */ |
7272 | 7272 | ||
7273 | data = qib_read_kreg_port(ppd, krp_serdesctrl); | 7273 | data = qib_read_kreg_port(ppd, krp_serdesctrl); |
7274 | /* Turn off IB latency mode */ | ||
7275 | data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE); | ||
7274 | qib_write_kreg_port(ppd, krp_serdesctrl, data | | 7276 | qib_write_kreg_port(ppd, krp_serdesctrl, data | |
7275 | SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); | 7277 | SYM_MASK(IBSerdesCtrl_0, RXLOSEN)); |
7276 | 7278 | ||
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index a873dd596e81..f1d16d3a01f6 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -93,7 +93,7 @@ unsigned long *qib_cpulist; | |||
93 | void qib_set_ctxtcnt(struct qib_devdata *dd) | 93 | void qib_set_ctxtcnt(struct qib_devdata *dd) |
94 | { | 94 | { |
95 | if (!qib_cfgctxts) | 95 | if (!qib_cfgctxts) |
96 | dd->cfgctxts = dd->ctxtcnt; | 96 | dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); |
97 | else if (qib_cfgctxts < dd->num_pports) | 97 | else if (qib_cfgctxts < dd->num_pports) |
98 | dd->cfgctxts = dd->ctxtcnt; | 98 | dd->cfgctxts = dd->ctxtcnt; |
99 | else if (qib_cfgctxts <= dd->ctxtcnt) | 99 | else if (qib_cfgctxts <= dd->ctxtcnt) |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index e0f65e39076b..6c39851d2ded 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -450,7 +450,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
450 | * | 450 | * |
451 | * Flushes both send and receive work queues. | 451 | * Flushes both send and receive work queues. |
452 | * Returns true if last WQE event should be generated. | 452 | * Returns true if last WQE event should be generated. |
453 | * The QP s_lock should be held and interrupts disabled. | 453 | * The QP r_lock and s_lock should be held and interrupts disabled. |
454 | * If we are already in error state, just return. | 454 | * If we are already in error state, just return. |
455 | */ | 455 | */ |
456 | int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) | 456 | int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 40c0a373719c..a0931119bd78 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -868,7 +868,7 @@ done: | |||
868 | 868 | ||
869 | /* | 869 | /* |
870 | * Back up requester to resend the last un-ACKed request. | 870 | * Back up requester to resend the last un-ACKed request. |
871 | * The QP s_lock should be held and interrupts disabled. | 871 | * The QP r_lock and s_lock should be held and interrupts disabled. |
872 | */ | 872 | */ |
873 | static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) | 873 | static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) |
874 | { | 874 | { |
@@ -911,7 +911,8 @@ static void rc_timeout(unsigned long arg) | |||
911 | struct qib_ibport *ibp; | 911 | struct qib_ibport *ibp; |
912 | unsigned long flags; | 912 | unsigned long flags; |
913 | 913 | ||
914 | spin_lock_irqsave(&qp->s_lock, flags); | 914 | spin_lock_irqsave(&qp->r_lock, flags); |
915 | spin_lock(&qp->s_lock); | ||
915 | if (qp->s_flags & QIB_S_TIMER) { | 916 | if (qp->s_flags & QIB_S_TIMER) { |
916 | ibp = to_iport(qp->ibqp.device, qp->port_num); | 917 | ibp = to_iport(qp->ibqp.device, qp->port_num); |
917 | ibp->n_rc_timeouts++; | 918 | ibp->n_rc_timeouts++; |
@@ -920,7 +921,8 @@ static void rc_timeout(unsigned long arg) | |||
920 | qib_restart_rc(qp, qp->s_last_psn + 1, 1); | 921 | qib_restart_rc(qp, qp->s_last_psn + 1, 1); |
921 | qib_schedule_send(qp); | 922 | qib_schedule_send(qp); |
922 | } | 923 | } |
923 | spin_unlock_irqrestore(&qp->s_lock, flags); | 924 | spin_unlock(&qp->s_lock); |
925 | spin_unlock_irqrestore(&qp->r_lock, flags); | ||
924 | } | 926 | } |
925 | 927 | ||
926 | /* | 928 | /* |
@@ -1414,10 +1416,6 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
1414 | 1416 | ||
1415 | spin_lock_irqsave(&qp->s_lock, flags); | 1417 | spin_lock_irqsave(&qp->s_lock, flags); |
1416 | 1418 | ||
1417 | /* Double check we can process this now that we hold the s_lock. */ | ||
1418 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1419 | goto ack_done; | ||
1420 | |||
1421 | /* Ignore invalid responses. */ | 1419 | /* Ignore invalid responses. */ |
1422 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) | 1420 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) |
1423 | goto ack_done; | 1421 | goto ack_done; |
@@ -1661,9 +1659,6 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, | |||
1661 | ibp->n_rc_dupreq++; | 1659 | ibp->n_rc_dupreq++; |
1662 | 1660 | ||
1663 | spin_lock_irqsave(&qp->s_lock, flags); | 1661 | spin_lock_irqsave(&qp->s_lock, flags); |
1664 | /* Double check we can process this now that we hold the s_lock. */ | ||
1665 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1666 | goto unlock_done; | ||
1667 | 1662 | ||
1668 | for (i = qp->r_head_ack_queue; ; i = prev) { | 1663 | for (i = qp->r_head_ack_queue; ; i = prev) { |
1669 | if (i == qp->s_tail_ack_queue) | 1664 | if (i == qp->s_tail_ack_queue) |
@@ -1878,9 +1873,6 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
1878 | psn = be32_to_cpu(ohdr->bth[2]); | 1873 | psn = be32_to_cpu(ohdr->bth[2]); |
1879 | opcode >>= 24; | 1874 | opcode >>= 24; |
1880 | 1875 | ||
1881 | /* Prevent simultaneous processing after APM on different CPUs */ | ||
1882 | spin_lock(&qp->r_lock); | ||
1883 | |||
1884 | /* | 1876 | /* |
1885 | * Process responses (ACKs) before anything else. Note that the | 1877 | * Process responses (ACKs) before anything else. Note that the |
1886 | * packet sequence number will be for something in the send work | 1878 | * packet sequence number will be for something in the send work |
@@ -1891,14 +1883,14 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
1891 | opcode <= OP(ATOMIC_ACKNOWLEDGE)) { | 1883 | opcode <= OP(ATOMIC_ACKNOWLEDGE)) { |
1892 | qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, | 1884 | qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, |
1893 | hdrsize, pmtu, rcd); | 1885 | hdrsize, pmtu, rcd); |
1894 | goto runlock; | 1886 | return; |
1895 | } | 1887 | } |
1896 | 1888 | ||
1897 | /* Compute 24 bits worth of difference. */ | 1889 | /* Compute 24 bits worth of difference. */ |
1898 | diff = qib_cmp24(psn, qp->r_psn); | 1890 | diff = qib_cmp24(psn, qp->r_psn); |
1899 | if (unlikely(diff)) { | 1891 | if (unlikely(diff)) { |
1900 | if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) | 1892 | if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) |
1901 | goto runlock; | 1893 | return; |
1902 | goto send_ack; | 1894 | goto send_ack; |
1903 | } | 1895 | } |
1904 | 1896 | ||
@@ -2090,9 +2082,6 @@ send_last: | |||
2090 | if (next > QIB_MAX_RDMA_ATOMIC) | 2082 | if (next > QIB_MAX_RDMA_ATOMIC) |
2091 | next = 0; | 2083 | next = 0; |
2092 | spin_lock_irqsave(&qp->s_lock, flags); | 2084 | spin_lock_irqsave(&qp->s_lock, flags); |
2093 | /* Double check we can process this while holding the s_lock. */ | ||
2094 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
2095 | goto srunlock; | ||
2096 | if (unlikely(next == qp->s_tail_ack_queue)) { | 2085 | if (unlikely(next == qp->s_tail_ack_queue)) { |
2097 | if (!qp->s_ack_queue[next].sent) | 2086 | if (!qp->s_ack_queue[next].sent) |
2098 | goto nack_inv_unlck; | 2087 | goto nack_inv_unlck; |
@@ -2146,7 +2135,7 @@ send_last: | |||
2146 | qp->s_flags |= QIB_S_RESP_PENDING; | 2135 | qp->s_flags |= QIB_S_RESP_PENDING; |
2147 | qib_schedule_send(qp); | 2136 | qib_schedule_send(qp); |
2148 | 2137 | ||
2149 | goto srunlock; | 2138 | goto sunlock; |
2150 | } | 2139 | } |
2151 | 2140 | ||
2152 | case OP(COMPARE_SWAP): | 2141 | case OP(COMPARE_SWAP): |
@@ -2165,9 +2154,6 @@ send_last: | |||
2165 | if (next > QIB_MAX_RDMA_ATOMIC) | 2154 | if (next > QIB_MAX_RDMA_ATOMIC) |
2166 | next = 0; | 2155 | next = 0; |
2167 | spin_lock_irqsave(&qp->s_lock, flags); | 2156 | spin_lock_irqsave(&qp->s_lock, flags); |
2168 | /* Double check we can process this while holding the s_lock. */ | ||
2169 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
2170 | goto srunlock; | ||
2171 | if (unlikely(next == qp->s_tail_ack_queue)) { | 2157 | if (unlikely(next == qp->s_tail_ack_queue)) { |
2172 | if (!qp->s_ack_queue[next].sent) | 2158 | if (!qp->s_ack_queue[next].sent) |
2173 | goto nack_inv_unlck; | 2159 | goto nack_inv_unlck; |
@@ -2213,7 +2199,7 @@ send_last: | |||
2213 | qp->s_flags |= QIB_S_RESP_PENDING; | 2199 | qp->s_flags |= QIB_S_RESP_PENDING; |
2214 | qib_schedule_send(qp); | 2200 | qib_schedule_send(qp); |
2215 | 2201 | ||
2216 | goto srunlock; | 2202 | goto sunlock; |
2217 | } | 2203 | } |
2218 | 2204 | ||
2219 | default: | 2205 | default: |
@@ -2227,7 +2213,7 @@ send_last: | |||
2227 | /* Send an ACK if requested or required. */ | 2213 | /* Send an ACK if requested or required. */ |
2228 | if (psn & (1 << 31)) | 2214 | if (psn & (1 << 31)) |
2229 | goto send_ack; | 2215 | goto send_ack; |
2230 | goto runlock; | 2216 | return; |
2231 | 2217 | ||
2232 | rnr_nak: | 2218 | rnr_nak: |
2233 | qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; | 2219 | qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; |
@@ -2238,7 +2224,7 @@ rnr_nak: | |||
2238 | atomic_inc(&qp->refcount); | 2224 | atomic_inc(&qp->refcount); |
2239 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | 2225 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); |
2240 | } | 2226 | } |
2241 | goto runlock; | 2227 | return; |
2242 | 2228 | ||
2243 | nack_op_err: | 2229 | nack_op_err: |
2244 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | 2230 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); |
@@ -2250,7 +2236,7 @@ nack_op_err: | |||
2250 | atomic_inc(&qp->refcount); | 2236 | atomic_inc(&qp->refcount); |
2251 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | 2237 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); |
2252 | } | 2238 | } |
2253 | goto runlock; | 2239 | return; |
2254 | 2240 | ||
2255 | nack_inv_unlck: | 2241 | nack_inv_unlck: |
2256 | spin_unlock_irqrestore(&qp->s_lock, flags); | 2242 | spin_unlock_irqrestore(&qp->s_lock, flags); |
@@ -2264,7 +2250,7 @@ nack_inv: | |||
2264 | atomic_inc(&qp->refcount); | 2250 | atomic_inc(&qp->refcount); |
2265 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); | 2251 | list_add_tail(&qp->rspwait, &rcd->qp_wait_list); |
2266 | } | 2252 | } |
2267 | goto runlock; | 2253 | return; |
2268 | 2254 | ||
2269 | nack_acc_unlck: | 2255 | nack_acc_unlck: |
2270 | spin_unlock_irqrestore(&qp->s_lock, flags); | 2256 | spin_unlock_irqrestore(&qp->s_lock, flags); |
@@ -2274,13 +2260,6 @@ nack_acc: | |||
2274 | qp->r_ack_psn = qp->r_psn; | 2260 | qp->r_ack_psn = qp->r_psn; |
2275 | send_ack: | 2261 | send_ack: |
2276 | qib_send_rc_ack(qp); | 2262 | qib_send_rc_ack(qp); |
2277 | runlock: | ||
2278 | spin_unlock(&qp->r_lock); | ||
2279 | return; | ||
2280 | |||
2281 | srunlock: | ||
2282 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
2283 | spin_unlock(&qp->r_lock); | ||
2284 | return; | 2263 | return; |
2285 | 2264 | ||
2286 | sunlock: | 2265 | sunlock: |
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index b8456881f7f6..cad44491320b 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c | |||
@@ -656,6 +656,7 @@ unmap: | |||
656 | } | 656 | } |
657 | qp = tx->qp; | 657 | qp = tx->qp; |
658 | qib_put_txreq(tx); | 658 | qib_put_txreq(tx); |
659 | spin_lock(&qp->r_lock); | ||
659 | spin_lock(&qp->s_lock); | 660 | spin_lock(&qp->s_lock); |
660 | if (qp->ibqp.qp_type == IB_QPT_RC) { | 661 | if (qp->ibqp.qp_type == IB_QPT_RC) { |
661 | /* XXX what about error sending RDMA read responses? */ | 662 | /* XXX what about error sending RDMA read responses? */ |
@@ -664,6 +665,7 @@ unmap: | |||
664 | } else if (qp->s_wqe) | 665 | } else if (qp->s_wqe) |
665 | qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); | 666 | qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); |
666 | spin_unlock(&qp->s_lock); | 667 | spin_unlock(&qp->s_lock); |
668 | spin_unlock(&qp->r_lock); | ||
667 | /* return zero to process the next send work request */ | 669 | /* return zero to process the next send work request */ |
668 | goto unlock; | 670 | goto unlock; |
669 | 671 | ||
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index dab4d9f4a2cc..d50a33fe8bbc 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c | |||
@@ -347,7 +347,7 @@ static struct kobj_type qib_sl2vl_ktype = { | |||
347 | 347 | ||
348 | #define QIB_DIAGC_ATTR(N) \ | 348 | #define QIB_DIAGC_ATTR(N) \ |
349 | static struct qib_diagc_attr qib_diagc_attr_##N = { \ | 349 | static struct qib_diagc_attr qib_diagc_attr_##N = { \ |
350 | .attr = { .name = __stringify(N), .mode = 0444 }, \ | 350 | .attr = { .name = __stringify(N), .mode = 0664 }, \ |
351 | .counter = offsetof(struct qib_ibport, n_##N) \ | 351 | .counter = offsetof(struct qib_ibport, n_##N) \ |
352 | } | 352 | } |
353 | 353 | ||
@@ -403,8 +403,27 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, | |||
403 | return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); | 403 | return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); |
404 | } | 404 | } |
405 | 405 | ||
406 | static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, | ||
407 | const char *buf, size_t size) | ||
408 | { | ||
409 | struct qib_diagc_attr *dattr = | ||
410 | container_of(attr, struct qib_diagc_attr, attr); | ||
411 | struct qib_pportdata *ppd = | ||
412 | container_of(kobj, struct qib_pportdata, diagc_kobj); | ||
413 | struct qib_ibport *qibp = &ppd->ibport_data; | ||
414 | char *endp; | ||
415 | long val = simple_strtol(buf, &endp, 0); | ||
416 | |||
417 | if (val < 0 || endp == buf) | ||
418 | return -EINVAL; | ||
419 | |||
420 | *(u32 *)((char *) qibp + dattr->counter) = val; | ||
421 | return size; | ||
422 | } | ||
423 | |||
406 | static const struct sysfs_ops qib_diagc_ops = { | 424 | static const struct sysfs_ops qib_diagc_ops = { |
407 | .show = diagc_attr_show, | 425 | .show = diagc_attr_show, |
426 | .store = diagc_attr_store, | ||
408 | }; | 427 | }; |
409 | 428 | ||
410 | static struct kobj_type qib_diagc_ktype = { | 429 | static struct kobj_type qib_diagc_ktype = { |
diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c index af30232b6831..7f36454c225e 100644 --- a/drivers/infiniband/hw/qib/qib_tx.c +++ b/drivers/infiniband/hw/qib/qib_tx.c | |||
@@ -170,7 +170,7 @@ static int find_ctxt(struct qib_devdata *dd, unsigned bufn) | |||
170 | void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, | 170 | void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, |
171 | unsigned cnt) | 171 | unsigned cnt) |
172 | { | 172 | { |
173 | struct qib_pportdata *ppd, *pppd[dd->num_pports]; | 173 | struct qib_pportdata *ppd, *pppd[QIB_MAX_IB_PORTS]; |
174 | unsigned i; | 174 | unsigned i; |
175 | unsigned long flags; | 175 | unsigned long flags; |
176 | 176 | ||
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 6c7fe78cca64..b9c8b6346c1b 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -272,9 +272,6 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
272 | opcode >>= 24; | 272 | opcode >>= 24; |
273 | memset(&wc, 0, sizeof wc); | 273 | memset(&wc, 0, sizeof wc); |
274 | 274 | ||
275 | /* Prevent simultaneous processing after APM on different CPUs */ | ||
276 | spin_lock(&qp->r_lock); | ||
277 | |||
278 | /* Compare the PSN verses the expected PSN. */ | 275 | /* Compare the PSN verses the expected PSN. */ |
279 | if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { | 276 | if (unlikely(qib_cmp24(psn, qp->r_psn) != 0)) { |
280 | /* | 277 | /* |
@@ -534,7 +531,6 @@ rdma_last: | |||
534 | } | 531 | } |
535 | qp->r_psn++; | 532 | qp->r_psn++; |
536 | qp->r_state = opcode; | 533 | qp->r_state = opcode; |
537 | spin_unlock(&qp->r_lock); | ||
538 | return; | 534 | return; |
539 | 535 | ||
540 | rewind: | 536 | rewind: |
@@ -542,12 +538,10 @@ rewind: | |||
542 | qp->r_sge.num_sge = 0; | 538 | qp->r_sge.num_sge = 0; |
543 | drop: | 539 | drop: |
544 | ibp->n_pkt_drops++; | 540 | ibp->n_pkt_drops++; |
545 | spin_unlock(&qp->r_lock); | ||
546 | return; | 541 | return; |
547 | 542 | ||
548 | op_err: | 543 | op_err: |
549 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | 544 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); |
550 | spin_unlock(&qp->r_lock); | ||
551 | return; | 545 | return; |
552 | 546 | ||
553 | sunlock: | 547 | sunlock: |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index c838cda73347..e1b3da2a1f85 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -535,13 +535,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
535 | wc.byte_len = tlen + sizeof(struct ib_grh); | 535 | wc.byte_len = tlen + sizeof(struct ib_grh); |
536 | 536 | ||
537 | /* | 537 | /* |
538 | * We need to serialize getting a receive work queue entry and | ||
539 | * generating a completion for it against QPs sending to this QP | ||
540 | * locally. | ||
541 | */ | ||
542 | spin_lock(&qp->r_lock); | ||
543 | |||
544 | /* | ||
545 | * Get the next work request entry to find where to put the data. | 538 | * Get the next work request entry to find where to put the data. |
546 | */ | 539 | */ |
547 | if (qp->r_flags & QIB_R_REUSE_SGE) | 540 | if (qp->r_flags & QIB_R_REUSE_SGE) |
@@ -552,19 +545,19 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
552 | ret = qib_get_rwqe(qp, 0); | 545 | ret = qib_get_rwqe(qp, 0); |
553 | if (ret < 0) { | 546 | if (ret < 0) { |
554 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); | 547 | qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); |
555 | goto bail_unlock; | 548 | return; |
556 | } | 549 | } |
557 | if (!ret) { | 550 | if (!ret) { |
558 | if (qp->ibqp.qp_num == 0) | 551 | if (qp->ibqp.qp_num == 0) |
559 | ibp->n_vl15_dropped++; | 552 | ibp->n_vl15_dropped++; |
560 | goto bail_unlock; | 553 | return; |
561 | } | 554 | } |
562 | } | 555 | } |
563 | /* Silently drop packets which are too big. */ | 556 | /* Silently drop packets which are too big. */ |
564 | if (unlikely(wc.byte_len > qp->r_len)) { | 557 | if (unlikely(wc.byte_len > qp->r_len)) { |
565 | qp->r_flags |= QIB_R_REUSE_SGE; | 558 | qp->r_flags |= QIB_R_REUSE_SGE; |
566 | ibp->n_pkt_drops++; | 559 | ibp->n_pkt_drops++; |
567 | goto bail_unlock; | 560 | return; |
568 | } | 561 | } |
569 | if (has_grh) { | 562 | if (has_grh) { |
570 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, | 563 | qib_copy_sge(&qp->r_sge, &hdr->u.l.grh, |
@@ -579,7 +572,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
579 | qp->r_sge.sge = *qp->r_sge.sg_list++; | 572 | qp->r_sge.sge = *qp->r_sge.sg_list++; |
580 | } | 573 | } |
581 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 574 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
582 | goto bail_unlock; | 575 | return; |
583 | wc.wr_id = qp->r_wr_id; | 576 | wc.wr_id = qp->r_wr_id; |
584 | wc.status = IB_WC_SUCCESS; | 577 | wc.status = IB_WC_SUCCESS; |
585 | wc.opcode = IB_WC_RECV; | 578 | wc.opcode = IB_WC_RECV; |
@@ -601,7 +594,5 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
601 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, | 594 | qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, |
602 | (ohdr->bth[0] & | 595 | (ohdr->bth[0] & |
603 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); | 596 | cpu_to_be32(IB_BTH_SOLICITED)) != 0); |
604 | bail_unlock: | ||
605 | spin_unlock(&qp->r_lock); | ||
606 | bail:; | 597 | bail:; |
607 | } | 598 | } |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index cda8f4173d23..9fab40488850 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -550,10 +550,12 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
550 | { | 550 | { |
551 | struct qib_ibport *ibp = &rcd->ppd->ibport_data; | 551 | struct qib_ibport *ibp = &rcd->ppd->ibport_data; |
552 | 552 | ||
553 | spin_lock(&qp->r_lock); | ||
554 | |||
553 | /* Check for valid receive state. */ | 555 | /* Check for valid receive state. */ |
554 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { | 556 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { |
555 | ibp->n_pkt_drops++; | 557 | ibp->n_pkt_drops++; |
556 | return; | 558 | goto unlock; |
557 | } | 559 | } |
558 | 560 | ||
559 | switch (qp->ibqp.qp_type) { | 561 | switch (qp->ibqp.qp_type) { |
@@ -577,6 +579,9 @@ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, | |||
577 | default: | 579 | default: |
578 | break; | 580 | break; |
579 | } | 581 | } |
582 | |||
583 | unlock: | ||
584 | spin_unlock(&qp->r_lock); | ||
580 | } | 585 | } |
581 | 586 | ||
582 | /** | 587 | /** |
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 0b9ef0716588..95a08a8ca8aa 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c | |||
@@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn, | |||
170 | } | 170 | } |
171 | 171 | ||
172 | 172 | ||
173 | int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) | 173 | static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) |
174 | { | 174 | { |
175 | int i, j; | 175 | int i, j; |
176 | u64 dma_addr; | 176 | u64 dma_addr; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index ed3f9ebae882..7f8f16bad753 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -811,6 +811,38 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
811 | return len; | 811 | return len; |
812 | } | 812 | } |
813 | 813 | ||
814 | static int srp_post_recv(struct srp_target_port *target) | ||
815 | { | ||
816 | unsigned long flags; | ||
817 | struct srp_iu *iu; | ||
818 | struct ib_sge list; | ||
819 | struct ib_recv_wr wr, *bad_wr; | ||
820 | unsigned int next; | ||
821 | int ret; | ||
822 | |||
823 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
824 | |||
825 | next = target->rx_head & (SRP_RQ_SIZE - 1); | ||
826 | wr.wr_id = next; | ||
827 | iu = target->rx_ring[next]; | ||
828 | |||
829 | list.addr = iu->dma; | ||
830 | list.length = iu->size; | ||
831 | list.lkey = target->srp_host->srp_dev->mr->lkey; | ||
832 | |||
833 | wr.next = NULL; | ||
834 | wr.sg_list = &list; | ||
835 | wr.num_sge = 1; | ||
836 | |||
837 | ret = ib_post_recv(target->qp, &wr, &bad_wr); | ||
838 | if (!ret) | ||
839 | ++target->rx_head; | ||
840 | |||
841 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
842 | |||
843 | return ret; | ||
844 | } | ||
845 | |||
814 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | 846 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) |
815 | { | 847 | { |
816 | struct srp_request *req; | 848 | struct srp_request *req; |
@@ -868,6 +900,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
868 | { | 900 | { |
869 | struct ib_device *dev; | 901 | struct ib_device *dev; |
870 | struct srp_iu *iu; | 902 | struct srp_iu *iu; |
903 | int res; | ||
871 | u8 opcode; | 904 | u8 opcode; |
872 | 905 | ||
873 | iu = target->rx_ring[wc->wr_id]; | 906 | iu = target->rx_ring[wc->wr_id]; |
@@ -879,21 +912,10 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
879 | opcode = *(u8 *) iu->buf; | 912 | opcode = *(u8 *) iu->buf; |
880 | 913 | ||
881 | if (0) { | 914 | if (0) { |
882 | int i; | ||
883 | |||
884 | shost_printk(KERN_ERR, target->scsi_host, | 915 | shost_printk(KERN_ERR, target->scsi_host, |
885 | PFX "recv completion, opcode 0x%02x\n", opcode); | 916 | PFX "recv completion, opcode 0x%02x\n", opcode); |
886 | 917 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1, | |
887 | for (i = 0; i < wc->byte_len; ++i) { | 918 | iu->buf, wc->byte_len, true); |
888 | if (i % 8 == 0) | ||
889 | printk(KERN_ERR " [%02x] ", i); | ||
890 | printk(" %02x", ((u8 *) iu->buf)[i]); | ||
891 | if ((i + 1) % 8 == 0) | ||
892 | printk("\n"); | ||
893 | } | ||
894 | |||
895 | if (wc->byte_len % 8) | ||
896 | printk("\n"); | ||
897 | } | 919 | } |
898 | 920 | ||
899 | switch (opcode) { | 921 | switch (opcode) { |
@@ -915,6 +937,11 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
915 | 937 | ||
916 | ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, | 938 | ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, |
917 | DMA_FROM_DEVICE); | 939 | DMA_FROM_DEVICE); |
940 | |||
941 | res = srp_post_recv(target); | ||
942 | if (res != 0) | ||
943 | shost_printk(KERN_ERR, target->scsi_host, | ||
944 | PFX "Recv failed with error code %d\n", res); | ||
918 | } | 945 | } |
919 | 946 | ||
920 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) | 947 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) |
@@ -954,45 +981,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
954 | } | 981 | } |
955 | } | 982 | } |
956 | 983 | ||
957 | static int __srp_post_recv(struct srp_target_port *target) | ||
958 | { | ||
959 | struct srp_iu *iu; | ||
960 | struct ib_sge list; | ||
961 | struct ib_recv_wr wr, *bad_wr; | ||
962 | unsigned int next; | ||
963 | int ret; | ||
964 | |||
965 | next = target->rx_head & (SRP_RQ_SIZE - 1); | ||
966 | wr.wr_id = next; | ||
967 | iu = target->rx_ring[next]; | ||
968 | |||
969 | list.addr = iu->dma; | ||
970 | list.length = iu->size; | ||
971 | list.lkey = target->srp_host->srp_dev->mr->lkey; | ||
972 | |||
973 | wr.next = NULL; | ||
974 | wr.sg_list = &list; | ||
975 | wr.num_sge = 1; | ||
976 | |||
977 | ret = ib_post_recv(target->qp, &wr, &bad_wr); | ||
978 | if (!ret) | ||
979 | ++target->rx_head; | ||
980 | |||
981 | return ret; | ||
982 | } | ||
983 | |||
984 | static int srp_post_recv(struct srp_target_port *target) | ||
985 | { | ||
986 | unsigned long flags; | ||
987 | int ret; | ||
988 | |||
989 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
990 | ret = __srp_post_recv(target); | ||
991 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
992 | |||
993 | return ret; | ||
994 | } | ||
995 | |||
996 | /* | 984 | /* |
997 | * Must be called with target->scsi_host->host_lock held to protect | 985 | * Must be called with target->scsi_host->host_lock held to protect |
998 | * req_lim and tx_head. Lock cannot be dropped between call here and | 986 | * req_lim and tx_head. Lock cannot be dropped between call here and |
@@ -1102,11 +1090,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
1102 | goto err; | 1090 | goto err; |
1103 | } | 1091 | } |
1104 | 1092 | ||
1105 | if (__srp_post_recv(target)) { | ||
1106 | shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed\n"); | ||
1107 | goto err_unmap; | ||
1108 | } | ||
1109 | |||
1110 | ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, | 1093 | ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, |
1111 | DMA_TO_DEVICE); | 1094 | DMA_TO_DEVICE); |
1112 | 1095 | ||
@@ -1249,6 +1232,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1249 | int attr_mask = 0; | 1232 | int attr_mask = 0; |
1250 | int comp = 0; | 1233 | int comp = 0; |
1251 | int opcode = 0; | 1234 | int opcode = 0; |
1235 | int i; | ||
1252 | 1236 | ||
1253 | switch (event->event) { | 1237 | switch (event->event) { |
1254 | case IB_CM_REQ_ERROR: | 1238 | case IB_CM_REQ_ERROR: |
@@ -1298,7 +1282,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1298 | if (target->status) | 1282 | if (target->status) |
1299 | break; | 1283 | break; |
1300 | 1284 | ||
1301 | target->status = srp_post_recv(target); | 1285 | for (i = 0; i < SRP_RQ_SIZE; i++) { |
1286 | target->status = srp_post_recv(target); | ||
1287 | if (target->status) | ||
1288 | break; | ||
1289 | } | ||
1302 | if (target->status) | 1290 | if (target->status) |
1303 | break; | 1291 | break; |
1304 | 1292 | ||
@@ -1564,6 +1552,18 @@ static ssize_t show_orig_dgid(struct device *dev, | |||
1564 | return sprintf(buf, "%pI6\n", target->orig_dgid); | 1552 | return sprintf(buf, "%pI6\n", target->orig_dgid); |
1565 | } | 1553 | } |
1566 | 1554 | ||
1555 | static ssize_t show_req_lim(struct device *dev, | ||
1556 | struct device_attribute *attr, char *buf) | ||
1557 | { | ||
1558 | struct srp_target_port *target = host_to_target(class_to_shost(dev)); | ||
1559 | |||
1560 | if (target->state == SRP_TARGET_DEAD || | ||
1561 | target->state == SRP_TARGET_REMOVED) | ||
1562 | return -ENODEV; | ||
1563 | |||
1564 | return sprintf(buf, "%d\n", target->req_lim); | ||
1565 | } | ||
1566 | |||
1567 | static ssize_t show_zero_req_lim(struct device *dev, | 1567 | static ssize_t show_zero_req_lim(struct device *dev, |
1568 | struct device_attribute *attr, char *buf) | 1568 | struct device_attribute *attr, char *buf) |
1569 | { | 1569 | { |
@@ -1598,6 +1598,7 @@ static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL); | |||
1598 | static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); | 1598 | static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL); |
1599 | static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); | 1599 | static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL); |
1600 | static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); | 1600 | static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL); |
1601 | static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL); | ||
1601 | static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); | 1602 | static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL); |
1602 | static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); | 1603 | static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL); |
1603 | static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); | 1604 | static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL); |
@@ -1609,6 +1610,7 @@ static struct device_attribute *srp_host_attrs[] = { | |||
1609 | &dev_attr_pkey, | 1610 | &dev_attr_pkey, |
1610 | &dev_attr_dgid, | 1611 | &dev_attr_dgid, |
1611 | &dev_attr_orig_dgid, | 1612 | &dev_attr_orig_dgid, |
1613 | &dev_attr_req_lim, | ||
1612 | &dev_attr_zero_req_lim, | 1614 | &dev_attr_zero_req_lim, |
1613 | &dev_attr_local_ib_port, | 1615 | &dev_attr_local_ib_port, |
1614 | &dev_attr_local_ib_device, | 1616 | &dev_attr_local_ib_device, |