aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/mad.c105
1 files changed, 27 insertions, 78 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index d66ecf8243ec..ebe8c3a45410 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -58,7 +58,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
58static void remove_mad_reg_req(struct ib_mad_agent_private *priv); 58static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
59static struct ib_mad_agent_private *find_mad_agent( 59static struct ib_mad_agent_private *find_mad_agent(
60 struct ib_mad_port_private *port_priv, 60 struct ib_mad_port_private *port_priv,
61 struct ib_mad *mad, int solicited); 61 struct ib_mad *mad);
62static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 62static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
63 struct ib_mad_private *mad); 63 struct ib_mad_private *mad);
64static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 64static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
@@ -67,7 +67,6 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
67static void timeout_sends(void *data); 67static void timeout_sends(void *data);
68static void cancel_sends(void *data); 68static void cancel_sends(void *data);
69static void local_completions(void *data); 69static void local_completions(void *data);
70static int solicited_mad(struct ib_mad *mad);
71static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
72 struct ib_mad_agent_private *agent_priv, 71 struct ib_mad_agent_private *agent_priv,
73 u8 mgmt_class); 72 u8 mgmt_class);
@@ -558,6 +557,13 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
558} 557}
559EXPORT_SYMBOL(ib_unregister_mad_agent); 558EXPORT_SYMBOL(ib_unregister_mad_agent);
560 559
560static inline int response_mad(struct ib_mad *mad)
561{
562 /* Trap represses are responses although response bit is reset */
563 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
564 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
565}
566
561static void dequeue_mad(struct ib_mad_list_head *mad_list) 567static void dequeue_mad(struct ib_mad_list_head *mad_list)
562{ 568{
563 struct ib_mad_queue *mad_queue; 569 struct ib_mad_queue *mad_queue;
@@ -650,7 +656,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
650 struct ib_smp *smp, 656 struct ib_smp *smp,
651 struct ib_send_wr *send_wr) 657 struct ib_send_wr *send_wr)
652{ 658{
653 int ret, solicited; 659 int ret;
654 unsigned long flags; 660 unsigned long flags;
655 struct ib_mad_local_private *local; 661 struct ib_mad_local_private *local;
656 struct ib_mad_private *mad_priv; 662 struct ib_mad_private *mad_priv;
@@ -696,11 +702,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
696 switch (ret) 702 switch (ret)
697 { 703 {
698 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: 704 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
699 /* 705 if (response_mad(&mad_priv->mad.mad) &&
700 * See if response is solicited and
701 * there is a recv handler
702 */
703 if (solicited_mad(&mad_priv->mad.mad) &&
704 mad_agent_priv->agent.recv_handler) { 706 mad_agent_priv->agent.recv_handler) {
705 local->mad_priv = mad_priv; 707 local->mad_priv = mad_priv;
706 local->recv_mad_agent = mad_agent_priv; 708 local->recv_mad_agent = mad_agent_priv;
@@ -717,15 +719,13 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
717 break; 719 break;
718 case IB_MAD_RESULT_SUCCESS: 720 case IB_MAD_RESULT_SUCCESS:
719 /* Treat like an incoming receive MAD */ 721 /* Treat like an incoming receive MAD */
720 solicited = solicited_mad(&mad_priv->mad.mad);
721 port_priv = ib_get_mad_port(mad_agent_priv->agent.device, 722 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
722 mad_agent_priv->agent.port_num); 723 mad_agent_priv->agent.port_num);
723 if (port_priv) { 724 if (port_priv) {
724 mad_priv->mad.mad.mad_hdr.tid = 725 mad_priv->mad.mad.mad_hdr.tid =
725 ((struct ib_mad *)smp)->mad_hdr.tid; 726 ((struct ib_mad *)smp)->mad_hdr.tid;
726 recv_mad_agent = find_mad_agent(port_priv, 727 recv_mad_agent = find_mad_agent(port_priv,
727 &mad_priv->mad.mad, 728 &mad_priv->mad.mad);
728 solicited);
729 } 729 }
730 if (!port_priv || !recv_mad_agent) { 730 if (!port_priv || !recv_mad_agent) {
731 kmem_cache_free(ib_mad_cache, mad_priv); 731 kmem_cache_free(ib_mad_cache, mad_priv);
@@ -1421,42 +1421,15 @@ out:
1421 return; 1421 return;
1422} 1422}
1423 1423
1424static int response_mad(struct ib_mad *mad)
1425{
1426 /* Trap represses are responses although response bit is reset */
1427 return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
1428 (mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
1429}
1430
1431static int solicited_mad(struct ib_mad *mad)
1432{
1433 /* CM MADs are never solicited */
1434 if (mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CM) {
1435 return 0;
1436 }
1437
1438 /* XXX: Determine whether MAD is using RMPP */
1439
1440 /* Not using RMPP */
1441 /* Is this MAD a response to a previous MAD ? */
1442 return response_mad(mad);
1443}
1444
1445static struct ib_mad_agent_private * 1424static struct ib_mad_agent_private *
1446find_mad_agent(struct ib_mad_port_private *port_priv, 1425find_mad_agent(struct ib_mad_port_private *port_priv,
1447 struct ib_mad *mad, 1426 struct ib_mad *mad)
1448 int solicited)
1449{ 1427{
1450 struct ib_mad_agent_private *mad_agent = NULL; 1428 struct ib_mad_agent_private *mad_agent = NULL;
1451 unsigned long flags; 1429 unsigned long flags;
1452 1430
1453 spin_lock_irqsave(&port_priv->reg_lock, flags); 1431 spin_lock_irqsave(&port_priv->reg_lock, flags);
1454 1432 if (response_mad(mad)) {
1455 /*
1456 * Whether MAD was solicited determines type of routing to
1457 * MAD client.
1458 */
1459 if (solicited) {
1460 u32 hi_tid; 1433 u32 hi_tid;
1461 struct ib_mad_agent_private *entry; 1434 struct ib_mad_agent_private *entry;
1462 1435
@@ -1560,18 +1533,6 @@ out:
1560 return valid; 1533 return valid;
1561} 1534}
1562 1535
1563/*
1564 * Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet
1565 */
1566static struct ib_mad_private *
1567reassemble_recv(struct ib_mad_agent_private *mad_agent_priv,
1568 struct ib_mad_private *recv)
1569{
1570 /* Until we have RMPP, all receives are reassembled!... */
1571 INIT_LIST_HEAD(&recv->header.recv_wc.recv_buf.list);
1572 return recv;
1573}
1574
1575static struct ib_mad_send_wr_private* 1536static struct ib_mad_send_wr_private*
1576find_send_req(struct ib_mad_agent_private *mad_agent_priv, 1537find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1577 u64 tid) 1538 u64 tid)
@@ -1600,29 +1561,22 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1600} 1561}
1601 1562
1602static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1563static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1603 struct ib_mad_private *recv, 1564 struct ib_mad_recv_wc *mad_recv_wc)
1604 int solicited)
1605{ 1565{
1606 struct ib_mad_send_wr_private *mad_send_wr; 1566 struct ib_mad_send_wr_private *mad_send_wr;
1607 struct ib_mad_send_wc mad_send_wc; 1567 struct ib_mad_send_wc mad_send_wc;
1608 unsigned long flags; 1568 unsigned long flags;
1569 u64 tid;
1609 1570
1610 /* Fully reassemble receive before processing */ 1571 INIT_LIST_HEAD(&mad_recv_wc->recv_buf.list);
1611 recv = reassemble_recv(mad_agent_priv, recv);
1612 if (!recv) {
1613 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1614 wake_up(&mad_agent_priv->wait);
1615 return;
1616 }
1617
1618 /* Complete corresponding request */ 1572 /* Complete corresponding request */
1619 if (solicited) { 1573 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1574 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1620 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1575 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1621 mad_send_wr = find_send_req(mad_agent_priv, 1576 mad_send_wr = find_send_req(mad_agent_priv, tid);
1622 recv->mad.mad.mad_hdr.tid);
1623 if (!mad_send_wr) { 1577 if (!mad_send_wr) {
1624 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1578 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1625 ib_free_recv_mad(&recv->header.recv_wc); 1579 ib_free_recv_mad(mad_recv_wc);
1626 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1580 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1627 wake_up(&mad_agent_priv->wait); 1581 wake_up(&mad_agent_priv->wait);
1628 return; 1582 return;
@@ -1632,10 +1586,9 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1632 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1586 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1633 1587
1634 /* Defined behavior is to complete response before request */ 1588 /* Defined behavior is to complete response before request */
1635 recv->header.recv_wc.wc->wr_id = mad_send_wr->wr_id; 1589 mad_recv_wc->wc->wr_id = mad_send_wr->wr_id;
1636 mad_agent_priv->agent.recv_handler( 1590 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1637 &mad_agent_priv->agent, 1591 mad_recv_wc);
1638 &recv->header.recv_wc);
1639 atomic_dec(&mad_agent_priv->refcount); 1592 atomic_dec(&mad_agent_priv->refcount);
1640 1593
1641 mad_send_wc.status = IB_WC_SUCCESS; 1594 mad_send_wc.status = IB_WC_SUCCESS;
@@ -1643,9 +1596,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1643 mad_send_wc.wr_id = mad_send_wr->wr_id; 1596 mad_send_wc.wr_id = mad_send_wr->wr_id;
1644 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 1597 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1645 } else { 1598 } else {
1646 mad_agent_priv->agent.recv_handler( 1599 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1647 &mad_agent_priv->agent, 1600 mad_recv_wc);
1648 &recv->header.recv_wc);
1649 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1601 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1650 wake_up(&mad_agent_priv->wait); 1602 wake_up(&mad_agent_priv->wait);
1651 } 1603 }
@@ -1659,7 +1611,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
1659 struct ib_mad_private *recv, *response; 1611 struct ib_mad_private *recv, *response;
1660 struct ib_mad_list_head *mad_list; 1612 struct ib_mad_list_head *mad_list;
1661 struct ib_mad_agent_private *mad_agent; 1613 struct ib_mad_agent_private *mad_agent;
1662 int solicited;
1663 1614
1664 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); 1615 response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL);
1665 if (!response) 1616 if (!response)
@@ -1745,11 +1696,9 @@ local:
1745 } 1696 }
1746 } 1697 }
1747 1698
1748 /* Determine corresponding MAD agent for incoming receive MAD */ 1699 mad_agent = find_mad_agent(port_priv, &recv->mad.mad);
1749 solicited = solicited_mad(&recv->mad.mad);
1750 mad_agent = find_mad_agent(port_priv, &recv->mad.mad, solicited);
1751 if (mad_agent) { 1700 if (mad_agent) {
1752 ib_mad_complete_recv(mad_agent, recv, solicited); 1701 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
1753 /* 1702 /*
1754 * recv is freed up in error cases in ib_mad_complete_recv 1703 * recv is freed up in error cases in ib_mad_complete_recv
1755 * or via recv_handler in ib_mad_complete_recv() 1704 * or via recv_handler in ib_mad_complete_recv()