diff options
author | Hal Rosenstock <hal.rosenstock@gmail.com> | 2007-08-03 13:45:17 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-08-03 13:45:17 -0400 |
commit | 445d68070c9c02acdda38e6d69bd43096f521035 (patch) | |
tree | ef828dbec51e77ba56906f167c7671c50b0fa54a | |
parent | 5399891052badf97948098d01772113801f6ef58 (diff) |
IB/mad: Fix error path if response alloc fails in ib_mad_recv_done_handler()
If ib_mad_recv_done_handler() fails to allocate response, then it just
printed a warning and continued, which leads to an oops if the MAD is
being handled for a switch device, because the switch code uses
response without checking for NULL. Fix this by bailing out of the
function if the allocation fails.
Signed-off-by: Suresh Shelvapille <suri@baymicrosystems.com>
Signed-off-by: Hal Rosenstock <hal.rosenstock@gmail.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r-- | drivers/infiniband/core/mad.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index bc547f1d34ba..969785762052 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1842,16 +1842,11 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
1842 | { | 1842 | { |
1843 | struct ib_mad_qp_info *qp_info; | 1843 | struct ib_mad_qp_info *qp_info; |
1844 | struct ib_mad_private_header *mad_priv_hdr; | 1844 | struct ib_mad_private_header *mad_priv_hdr; |
1845 | struct ib_mad_private *recv, *response; | 1845 | struct ib_mad_private *recv, *response = NULL; |
1846 | struct ib_mad_list_head *mad_list; | 1846 | struct ib_mad_list_head *mad_list; |
1847 | struct ib_mad_agent_private *mad_agent; | 1847 | struct ib_mad_agent_private *mad_agent; |
1848 | int port_num; | 1848 | int port_num; |
1849 | 1849 | ||
1850 | response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | ||
1851 | if (!response) | ||
1852 | printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " | ||
1853 | "for response buffer\n"); | ||
1854 | |||
1855 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | 1850 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; |
1856 | qp_info = mad_list->mad_queue->qp_info; | 1851 | qp_info = mad_list->mad_queue->qp_info; |
1857 | dequeue_mad(mad_list); | 1852 | dequeue_mad(mad_list); |
@@ -1879,6 +1874,13 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
1879 | if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) | 1874 | if (!validate_mad(&recv->mad.mad, qp_info->qp->qp_num)) |
1880 | goto out; | 1875 | goto out; |
1881 | 1876 | ||
1877 | response = kmem_cache_alloc(ib_mad_cache, GFP_KERNEL); | ||
1878 | if (!response) { | ||
1879 | printk(KERN_ERR PFX "ib_mad_recv_done_handler no memory " | ||
1880 | "for response buffer\n"); | ||
1881 | goto out; | ||
1882 | } | ||
1883 | |||
1882 | if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) | 1884 | if (port_priv->device->node_type == RDMA_NODE_IB_SWITCH) |
1883 | port_num = wc->port_num; | 1885 | port_num = wc->port_num; |
1884 | else | 1886 | else |