aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-05-23 14:32:37 -0400
committerRoland Dreier <rolandd@cisco.com>2006-05-23 16:29:35 -0400
commit3977026462314dfbb237adf6a964d0f683b8e45d (patch)
treed9fbd218ac4a742371468a67db1a738f52af5a83 /drivers/infiniband
parent41c75a19bf4a0102f49763a686fb7e39780349f3 (diff)
IB/ipath: fix null deref during rdma ops
The problem was that node A's sending thread, which handles sending RDMA read response data, would write the trigger word, the last packet would be sent, node B would send a new RDMA read request, node A's interrupt handler would initialize s_rdma_sge, then node A's sending thread would update s_rdma_sge. This didn't happen very often naturally but was more frequent with 1 byte RDMA reads. Rather than adding more locking or increasing the QP structure size and copying sge data, I modified the copy routine to update the pointers before writing the trigger word to avoid the update race. Signed-off-by: Ralph Campbell <ralphc@pathscale.com> Signed-off-by: Bryan O'Sullivan <bos@pathscale.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_layer.c12
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_layer.c b/drivers/infiniband/hw/ipath/ipath_layer.c
index 9cb5258ffed9..9ec4ac77b87f 100644
--- a/drivers/infiniband/hw/ipath/ipath_layer.c
+++ b/drivers/infiniband/hw/ipath/ipath_layer.c
@@ -872,12 +872,13 @@ static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
872 update_sge(ss, len); 872 update_sge(ss, len);
873 length -= len; 873 length -= len;
874 } 874 }
875 /* Update address before sending packet. */
876 update_sge(ss, length);
875 /* must flush early everything before trigger word */ 877 /* must flush early everything before trigger word */
876 ipath_flush_wc(); 878 ipath_flush_wc();
877 __raw_writel(last, piobuf); 879 __raw_writel(last, piobuf);
878 /* be sure trigger word is written */ 880 /* be sure trigger word is written */
879 ipath_flush_wc(); 881 ipath_flush_wc();
880 update_sge(ss, length);
881} 882}
882 883
883/** 884/**
@@ -943,17 +944,18 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
943 if (likely(ss->num_sge == 1 && len <= ss->sge.length && 944 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
944 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { 945 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
945 u32 w; 946 u32 w;
947 u32 *addr = (u32 *) ss->sge.vaddr;
946 948
949 /* Update address before sending packet. */
950 update_sge(ss, len);
947 /* Need to round up for the last dword in the packet. */ 951 /* Need to round up for the last dword in the packet. */
948 w = (len + 3) >> 2; 952 w = (len + 3) >> 2;
949 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1); 953 __iowrite32_copy(piobuf, addr, w - 1);
950 /* must flush early everything before trigger word */ 954 /* must flush early everything before trigger word */
951 ipath_flush_wc(); 955 ipath_flush_wc();
952 __raw_writel(((u32 *) ss->sge.vaddr)[w - 1], 956 __raw_writel(addr[w - 1], piobuf + w - 1);
953 piobuf + w - 1);
954 /* be sure trigger word is written */ 957 /* be sure trigger word is written */
955 ipath_flush_wc(); 958 ipath_flush_wc();
956 update_sge(ss, len);
957 ret = 0; 959 ret = 0;
958 goto bail; 960 goto bail;
959 } 961 }