aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/qp.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c105
1 files changed, 103 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5f940aeaab1e..45aedf1d9338 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -34,10 +34,19 @@
34 34
35#include "iw_cxgb4.h" 35#include "iw_cxgb4.h"
36 36
37static int db_delay_usecs = 1;
38module_param(db_delay_usecs, int, 0644);
39MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
40
37static int ocqp_support = 1; 41static int ocqp_support = 1;
38module_param(ocqp_support, int, 0644); 42module_param(ocqp_support, int, 0644);
39MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); 43MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
40 44
45int db_fc_threshold = 2000;
46module_param(db_fc_threshold, int, 0644);
47MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
48 "db flow control mode (default = 2000)");
49
41static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 50static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
42{ 51{
43 unsigned long flag; 52 unsigned long flag;
@@ -1128,6 +1137,35 @@ out:
1128 return ret; 1137 return ret;
1129} 1138}
1130 1139
1140/*
1141 * Called by the library when the qp has user dbs disabled due to
1142 * a DB_FULL condition. This function will single-thread all user
1143 * DB rings to avoid overflowing the hw db-fifo.
1144 */
1145static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
1146{
1147 int delay = db_delay_usecs;
1148
1149 mutex_lock(&qhp->rhp->db_mutex);
1150 do {
1151
1152 /*
1153 * The interrupt threshold is dbfifo_int_thresh << 6. So
1154 * make sure we don't cross that and generate an interrupt.
1155 */
1156 if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
1157 (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
1158 writel(V_QID(qid) | V_PIDX(inc), qhp->wq.db);
1159 break;
1160 }
1161 set_current_state(TASK_UNINTERRUPTIBLE);
1162 schedule_timeout(usecs_to_jiffies(delay));
1163 delay = min(delay << 1, 2000);
1164 } while (1);
1165 mutex_unlock(&qhp->rhp->db_mutex);
1166 return 0;
1167}
1168
1131int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, 1169int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1132 enum c4iw_qp_attr_mask mask, 1170 enum c4iw_qp_attr_mask mask,
1133 struct c4iw_qp_attributes *attrs, 1171 struct c4iw_qp_attributes *attrs,
@@ -1176,6 +1214,15 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1176 qhp->attr = newattr; 1214 qhp->attr = newattr;
1177 } 1215 }
1178 1216
1217 if (mask & C4IW_QP_ATTR_SQ_DB) {
1218 ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
1219 goto out;
1220 }
1221 if (mask & C4IW_QP_ATTR_RQ_DB) {
1222 ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
1223 goto out;
1224 }
1225
1179 if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) 1226 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1180 goto out; 1227 goto out;
1181 if (qhp->attr.state == attrs->next_state) 1228 if (qhp->attr.state == attrs->next_state)
@@ -1352,6 +1399,14 @@ out:
1352 return ret; 1399 return ret;
1353} 1400}
1354 1401
1402static int enable_qp_db(int id, void *p, void *data)
1403{
1404 struct c4iw_qp *qp = p;
1405
1406 t4_enable_wq_db(&qp->wq);
1407 return 0;
1408}
1409
1355int c4iw_destroy_qp(struct ib_qp *ib_qp) 1410int c4iw_destroy_qp(struct ib_qp *ib_qp)
1356{ 1411{
1357 struct c4iw_dev *rhp; 1412 struct c4iw_dev *rhp;
@@ -1369,7 +1424,16 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1369 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1424 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1370 wait_event(qhp->wait, !qhp->ep); 1425 wait_event(qhp->wait, !qhp->ep);
1371 1426
1372 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); 1427 spin_lock_irq(&rhp->lock);
1428 remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1429 rhp->qpcnt--;
1430 BUG_ON(rhp->qpcnt < 0);
1431 if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
1432 rhp->rdev.stats.db_state_transitions++;
1433 rhp->db_state = NORMAL;
1434 idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
1435 }
1436 spin_unlock_irq(&rhp->lock);
1373 atomic_dec(&qhp->refcnt); 1437 atomic_dec(&qhp->refcnt);
1374 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 1438 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1375 1439
@@ -1383,6 +1447,14 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1383 return 0; 1447 return 0;
1384} 1448}
1385 1449
1450static int disable_qp_db(int id, void *p, void *data)
1451{
1452 struct c4iw_qp *qp = p;
1453
1454 t4_disable_wq_db(&qp->wq);
1455 return 0;
1456}
1457
1386struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, 1458struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1387 struct ib_udata *udata) 1459 struct ib_udata *udata)
1388{ 1460{
@@ -1469,7 +1541,16 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1469 init_waitqueue_head(&qhp->wait); 1541 init_waitqueue_head(&qhp->wait);
1470 atomic_set(&qhp->refcnt, 1); 1542 atomic_set(&qhp->refcnt, 1);
1471 1543
1472 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1544 spin_lock_irq(&rhp->lock);
1545 if (rhp->db_state != NORMAL)
1546 t4_disable_wq_db(&qhp->wq);
1547 if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
1548 rhp->rdev.stats.db_state_transitions++;
1549 rhp->db_state = FLOW_CONTROL;
1550 idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
1551 }
1552 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1553 spin_unlock_irq(&rhp->lock);
1473 if (ret) 1554 if (ret)
1474 goto err2; 1555 goto err2;
1475 1556
@@ -1613,6 +1694,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1613 C4IW_QP_ATTR_ENABLE_RDMA_WRITE | 1694 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1614 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; 1695 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1615 1696
1697 /*
1698 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1699 * ringing the queue db when we're in DB_FULL mode.
1700 */
1701 attrs.sq_db_inc = attr->sq_psn;
1702 attrs.rq_db_inc = attr->rq_psn;
1703 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1704 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1705
1616 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); 1706 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1617} 1707}
1618 1708
@@ -1621,3 +1711,14 @@ struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1621 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); 1711 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1622 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); 1712 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1623} 1713}
1714
1715int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1716 int attr_mask, struct ib_qp_init_attr *init_attr)
1717{
1718 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1719
1720 memset(attr, 0, sizeof *attr);
1721 memset(init_attr, 0, sizeof *init_attr);
1722 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1723 return 0;
1724}