aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYan Burman <yanb@mellanox.com>2014-03-11 08:41:47 -0400
committerRoland Dreier <roland@purestorage.com>2014-04-01 13:36:07 -0400
commit2c34e68f426151bc6d16de6a187678f6693c0770 (patch)
tree1fb3e6b76d98229c876c2964ab7435994b87716b
parent5bdb0f02add5994b0bc17494f4726925ca5d6ba1 (diff)
IB/mad: Check and handle potential DMA mapping errors
Running with DMA_API_DEBUG enabled and not checking for DMA mapping errors triggers a kernel stack trace with "DMA-API: device driver failed to check map error" message. Add these checks to the MAD module, both to be be more robust and also eliminate these false-positive stack traces. Signed-off-by: Yan Burman <yanb@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r--drivers/infiniband/core/mad.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 4c837e66516b..ab31f136d04b 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1022,12 +1022,21 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1022 mad_send_wr->send_buf.mad, 1022 mad_send_wr->send_buf.mad,
1023 sge[0].length, 1023 sge[0].length,
1024 DMA_TO_DEVICE); 1024 DMA_TO_DEVICE);
1025 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1026 return -ENOMEM;
1027
1025 mad_send_wr->header_mapping = sge[0].addr; 1028 mad_send_wr->header_mapping = sge[0].addr;
1026 1029
1027 sge[1].addr = ib_dma_map_single(mad_agent->device, 1030 sge[1].addr = ib_dma_map_single(mad_agent->device,
1028 ib_get_payload(mad_send_wr), 1031 ib_get_payload(mad_send_wr),
1029 sge[1].length, 1032 sge[1].length,
1030 DMA_TO_DEVICE); 1033 DMA_TO_DEVICE);
1034 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1035 ib_dma_unmap_single(mad_agent->device,
1036 mad_send_wr->header_mapping,
1037 sge[0].length, DMA_TO_DEVICE);
1038 return -ENOMEM;
1039 }
1031 mad_send_wr->payload_mapping = sge[1].addr; 1040 mad_send_wr->payload_mapping = sge[1].addr;
1032 1041
1033 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1042 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
@@ -2590,6 +2599,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2590 sizeof *mad_priv - 2599 sizeof *mad_priv -
2591 sizeof mad_priv->header, 2600 sizeof mad_priv->header,
2592 DMA_FROM_DEVICE); 2601 DMA_FROM_DEVICE);
2602 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2603 sg_list.addr))) {
2604 ret = -ENOMEM;
2605 break;
2606 }
2593 mad_priv->header.mapping = sg_list.addr; 2607 mad_priv->header.mapping = sg_list.addr;
2594 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2608 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2595 mad_priv->header.mad_list.mad_queue = recv_queue; 2609 mad_priv->header.mad_list.mad_queue = recv_queue;