summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGal Pressman <galpress@amazon.com>2019-06-18 09:07:32 -0400
committerDoug Ledford <dledford@redhat.com>2019-06-18 16:27:24 -0400
commit7a5834e456f7fb3eca9b63af2a6bc7f460ae482f (patch)
treebe3d82e048e66a98770665154d346d105aa72da0
parent529254340c7f16d59b928e36568597c603bae917 (diff)
RDMA/efa: Handle mmap insertions overflow
When inserting a new mmap entry to the xarray we should check for 'mmap_page' overflow as it is limited to 32 bits. Fixes: 40909f664d27 ("RDMA/efa: Add EFA verbs implementation") Signed-off-by: Gal Pressman <galpress@amazon.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c21
1 files changed, 16 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 0fea5d63fdbe..fb6115244d4c 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -204,6 +204,7 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
204 void *obj, u64 address, u64 length, u8 mmap_flag) 204 void *obj, u64 address, u64 length, u8 mmap_flag)
205{ 205{
206 struct efa_mmap_entry *entry; 206 struct efa_mmap_entry *entry;
207 u32 next_mmap_page;
207 int err; 208 int err;
208 209
209 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 210 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@ -216,15 +217,19 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
216 entry->mmap_flag = mmap_flag; 217 entry->mmap_flag = mmap_flag;
217 218
218 xa_lock(&ucontext->mmap_xa); 219 xa_lock(&ucontext->mmap_xa);
220 if (check_add_overflow(ucontext->mmap_xa_page,
221 (u32)(length >> PAGE_SHIFT),
222 &next_mmap_page))
223 goto err_unlock;
224
219 entry->mmap_page = ucontext->mmap_xa_page; 225 entry->mmap_page = ucontext->mmap_xa_page;
220 ucontext->mmap_xa_page += DIV_ROUND_UP(length, PAGE_SIZE); 226 ucontext->mmap_xa_page = next_mmap_page;
221 err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry, 227 err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
222 GFP_KERNEL); 228 GFP_KERNEL);
229 if (err)
230 goto err_unlock;
231
223 xa_unlock(&ucontext->mmap_xa); 232 xa_unlock(&ucontext->mmap_xa);
224 if (err){
225 kfree(entry);
226 return EFA_MMAP_INVALID;
227 }
228 233
229 ibdev_dbg( 234 ibdev_dbg(
230 &dev->ibdev, 235 &dev->ibdev,
@@ -232,6 +237,12 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
232 entry->obj, entry->address, entry->length, get_mmap_key(entry)); 237 entry->obj, entry->address, entry->length, get_mmap_key(entry));
233 238
234 return get_mmap_key(entry); 239 return get_mmap_key(entry);
240
241err_unlock:
242 xa_unlock(&ucontext->mmap_xa);
243 kfree(entry);
244 return EFA_MMAP_INVALID;
245
235} 246}
236 247
237int efa_query_device(struct ib_device *ibdev, 248int efa_query_device(struct ib_device *ibdev,