aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorParav Pandit <pandit.parav@gmail.com>2016-09-28 16:25:47 -0400
committerDoug Ledford <dledford@redhat.com>2016-10-06 13:50:04 -0400
commitd9703650f4aba7555fde92636d8d9a689029e8f8 (patch)
tree4deebfce3ca13009f6c5ef97c2ae8b0ab5cfc7c3
parentffae955d49e6cff28589d6e7388bb9275f9ce2d1 (diff)
IB/{rxe,core,rdmavt}: Fix kernel crash for reg MR
This patch fixes below kernel crash on memory registration for rxe and other transport drivers which has dma_ops extension. IB/core invokes ib_map_sg_attrs() in generic manner with dma attributes which is used by mlx5 and mthca adapters. However in doing so it ignored honoring dma_ops extension of software based transports for sg map/unmap operation. This results in calling dma_map_sg_attrs of hardware virtual device resulting in crash for null reference. We extend the core to support sg_map/unmap_attrs and transport drivers to implement those dma_ops callback functions. Verified usign perftest applications. BUG: unable to handle kernel NULL pointer dereference at (null) IP: [<ffffffff81032a75>] check_addr+0x35/0x60 ... Call Trace: [<ffffffff81032b39>] ? nommu_map_sg+0x99/0xd0 [<ffffffffa02b31c6>] ib_umem_get+0x3d6/0x470 [ib_core] [<ffffffffa01cc329>] rxe_mem_init_user+0x49/0x270 [rdma_rxe] [<ffffffffa01c793a>] ? rxe_add_index+0xca/0x100 [rdma_rxe] [<ffffffffa01c995f>] rxe_reg_user_mr+0x9f/0x130 [rdma_rxe] [<ffffffffa00419fe>] ib_uverbs_reg_mr+0x14e/0x2c0 [ib_uverbs] [<ffffffffa003d3ab>] ib_uverbs_write+0x15b/0x3b0 [ib_uverbs] [<ffffffff811e92a6>] ? mem_cgroup_commit_charge+0x76/0xe0 [<ffffffff811af0a9>] ? page_add_new_anon_rmap+0x89/0xc0 [<ffffffff8117e6c9>] ? lru_cache_add_active_or_unevictable+0x39/0xc0 [<ffffffff811f0da8>] __vfs_write+0x28/0x120 [<ffffffff811f1239>] ? rw_verify_area+0x49/0xb0 [<ffffffff811f1492>] vfs_write+0xb2/0x1b0 [<ffffffff811f27d6>] SyS_write+0x46/0xa0 [<ffffffff814f7d32>] entry_SYSCALL_64_fastpath+0x1a/0xa4 Signed-off-by: Parav Pandit <pandit.parav@gmail.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/sw/rdmavt/dma.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_dma.c17
-rw-r--r--include/rdma/ib_verbs.h23
3 files changed, 54 insertions, 3 deletions
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c
index 33076a5eee2f..01f71caa3ac4 100644
--- a/drivers/infiniband/sw/rdmavt/dma.c
+++ b/drivers/infiniband/sw/rdmavt/dma.c
@@ -138,6 +138,21 @@ static void rvt_unmap_sg(struct ib_device *dev,
138 /* This is a stub, nothing to be done here */ 138 /* This is a stub, nothing to be done here */
139} 139}
140 140
141static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
142 int nents, enum dma_data_direction direction,
143 unsigned long attrs)
144{
145 return rvt_map_sg(dev, sgl, nents, direction);
146}
147
148static void rvt_unmap_sg_attrs(struct ib_device *dev,
149 struct scatterlist *sg, int nents,
150 enum dma_data_direction direction,
151 unsigned long attrs)
152{
153 return rvt_unmap_sg(dev, sg, nents, direction);
154}
155
141static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr, 156static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
142 size_t size, enum dma_data_direction dir) 157 size_t size, enum dma_data_direction dir)
143{ 158{
@@ -177,6 +192,8 @@ struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
177 .unmap_page = rvt_dma_unmap_page, 192 .unmap_page = rvt_dma_unmap_page,
178 .map_sg = rvt_map_sg, 193 .map_sg = rvt_map_sg,
179 .unmap_sg = rvt_unmap_sg, 194 .unmap_sg = rvt_unmap_sg,
195 .map_sg_attrs = rvt_map_sg_attrs,
196 .unmap_sg_attrs = rvt_unmap_sg_attrs,
180 .sync_single_for_cpu = rvt_sync_single_for_cpu, 197 .sync_single_for_cpu = rvt_sync_single_for_cpu,
181 .sync_single_for_device = rvt_sync_single_for_device, 198 .sync_single_for_device = rvt_sync_single_for_device,
182 .alloc_coherent = rvt_dma_alloc_coherent, 199 .alloc_coherent = rvt_dma_alloc_coherent,
diff --git a/drivers/infiniband/sw/rxe/rxe_dma.c b/drivers/infiniband/sw/rxe/rxe_dma.c
index 7634c1a81b2b..a0f8af5851ae 100644
--- a/drivers/infiniband/sw/rxe/rxe_dma.c
+++ b/drivers/infiniband/sw/rxe/rxe_dma.c
@@ -117,6 +117,21 @@ static void rxe_unmap_sg(struct ib_device *dev,
117 WARN_ON(!valid_dma_direction(direction)); 117 WARN_ON(!valid_dma_direction(direction));
118} 118}
119 119
120static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl,
121 int nents, enum dma_data_direction direction,
122 unsigned long attrs)
123{
124 return rxe_map_sg(dev, sgl, nents, direction);
125}
126
127static void rxe_unmap_sg_attrs(struct ib_device *dev,
128 struct scatterlist *sg, int nents,
129 enum dma_data_direction direction,
130 unsigned long attrs)
131{
132 rxe_unmap_sg(dev, sg, nents, direction);
133}
134
120static void rxe_sync_single_for_cpu(struct ib_device *dev, 135static void rxe_sync_single_for_cpu(struct ib_device *dev,
121 u64 addr, 136 u64 addr,
122 size_t size, enum dma_data_direction dir) 137 size_t size, enum dma_data_direction dir)
@@ -159,6 +174,8 @@ struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
159 .unmap_page = rxe_dma_unmap_page, 174 .unmap_page = rxe_dma_unmap_page,
160 .map_sg = rxe_map_sg, 175 .map_sg = rxe_map_sg,
161 .unmap_sg = rxe_unmap_sg, 176 .unmap_sg = rxe_unmap_sg,
177 .map_sg_attrs = rxe_map_sg_attrs,
178 .unmap_sg_attrs = rxe_unmap_sg_attrs,
162 .sync_single_for_cpu = rxe_sync_single_for_cpu, 179 .sync_single_for_cpu = rxe_sync_single_for_cpu,
163 .sync_single_for_device = rxe_sync_single_for_device, 180 .sync_single_for_device = rxe_sync_single_for_device,
164 .alloc_coherent = rxe_dma_alloc_coherent, 181 .alloc_coherent = rxe_dma_alloc_coherent,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e1f96737c2a1..9e935655fccb 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1739,6 +1739,14 @@ struct ib_dma_mapping_ops {
1739 void (*unmap_sg)(struct ib_device *dev, 1739 void (*unmap_sg)(struct ib_device *dev,
1740 struct scatterlist *sg, int nents, 1740 struct scatterlist *sg, int nents,
1741 enum dma_data_direction direction); 1741 enum dma_data_direction direction);
1742 int (*map_sg_attrs)(struct ib_device *dev,
1743 struct scatterlist *sg, int nents,
1744 enum dma_data_direction direction,
1745 unsigned long attrs);
1746 void (*unmap_sg_attrs)(struct ib_device *dev,
1747 struct scatterlist *sg, int nents,
1748 enum dma_data_direction direction,
1749 unsigned long attrs);
1742 void (*sync_single_for_cpu)(struct ib_device *dev, 1750 void (*sync_single_for_cpu)(struct ib_device *dev,
1743 u64 dma_handle, 1751 u64 dma_handle,
1744 size_t size, 1752 size_t size,
@@ -3000,8 +3008,12 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
3000 enum dma_data_direction direction, 3008 enum dma_data_direction direction,
3001 unsigned long dma_attrs) 3009 unsigned long dma_attrs)
3002{ 3010{
3003 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3011 if (dev->dma_ops)
3004 dma_attrs); 3012 return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
3013 dma_attrs);
3014 else
3015 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
3016 dma_attrs);
3005} 3017}
3006 3018
3007static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 3019static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3009,7 +3021,12 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
3009 enum dma_data_direction direction, 3021 enum dma_data_direction direction,
3010 unsigned long dma_attrs) 3022 unsigned long dma_attrs)
3011{ 3023{
3012 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); 3024 if (dev->dma_ops)
3025 return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
3026 dma_attrs);
3027 else
3028 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
3029 dma_attrs);
3013} 3030}
3014/** 3031/**
3015 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 3032 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry