aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/uverbs_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/uverbs_main.c')
-rw-r--r--drivers/infiniband/core/uverbs_main.c52
1 files changed, 49 insertions, 3 deletions
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index f2e7ffe6fc54..c489f545baae 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
208 kref_put(&file->async_file->ref, 208 kref_put(&file->async_file->ref,
209 ib_uverbs_release_async_event_file); 209 ib_uverbs_release_async_event_file);
210 put_device(&file->device->dev); 210 put_device(&file->device->dev);
211
212 if (file->disassociate_page)
213 __free_pages(file->disassociate_page, 0);
211 kfree(file); 214 kfree(file);
212} 215}
213 216
@@ -877,9 +880,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
877 kfree(priv); 880 kfree(priv);
878} 881}
879 882
883/*
884 * Once the zap_vma_ptes has been called touches to the VMA will come here and
885 * we return a dummy writable zero page for all the pfns.
886 */
887static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
888{
889 struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
890 struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
891 vm_fault_t ret = 0;
892
893 if (!priv)
894 return VM_FAULT_SIGBUS;
895
896 /* Read only pages can just use the system zero page. */
897 if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
898 vmf->page = ZERO_PAGE(vmf->address);
899 get_page(vmf->page);
900 return 0;
901 }
902
903 mutex_lock(&ufile->umap_lock);
904 if (!ufile->disassociate_page)
905 ufile->disassociate_page =
906 alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
907
908 if (ufile->disassociate_page) {
909 /*
910 * This VMA is forced to always be shared so this doesn't have
911 * to worry about COW.
912 */
913 vmf->page = ufile->disassociate_page;
914 get_page(vmf->page);
915 } else {
916 ret = VM_FAULT_SIGBUS;
917 }
918 mutex_unlock(&ufile->umap_lock);
919
920 return ret;
921}
922
880static const struct vm_operations_struct rdma_umap_ops = { 923static const struct vm_operations_struct rdma_umap_ops = {
881 .open = rdma_umap_open, 924 .open = rdma_umap_open,
882 .close = rdma_umap_close, 925 .close = rdma_umap_close,
926 .fault = rdma_umap_fault,
883}; 927};
884 928
885static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext, 929static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
@@ -889,6 +933,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
889 struct ib_uverbs_file *ufile = ucontext->ufile; 933 struct ib_uverbs_file *ufile = ucontext->ufile;
890 struct rdma_umap_priv *priv; 934 struct rdma_umap_priv *priv;
891 935
936 if (!(vma->vm_flags & VM_SHARED))
937 return ERR_PTR(-EINVAL);
938
892 if (vma->vm_end - vma->vm_start != size) 939 if (vma->vm_end - vma->vm_start != size)
893 return ERR_PTR(-EINVAL); 940 return ERR_PTR(-EINVAL);
894 941
@@ -992,7 +1039,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
992 * at a time to get the lock ordering right. Typically there 1039 * at a time to get the lock ordering right. Typically there
993 * will only be one mm, so no big deal. 1040 * will only be one mm, so no big deal.
994 */ 1041 */
995 down_write(&mm->mmap_sem); 1042 down_read(&mm->mmap_sem);
996 if (!mmget_still_valid(mm)) 1043 if (!mmget_still_valid(mm))
997 goto skip_mm; 1044 goto skip_mm;
998 mutex_lock(&ufile->umap_lock); 1045 mutex_lock(&ufile->umap_lock);
@@ -1006,11 +1053,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
1006 1053
1007 zap_vma_ptes(vma, vma->vm_start, 1054 zap_vma_ptes(vma, vma->vm_start,
1008 vma->vm_end - vma->vm_start); 1055 vma->vm_end - vma->vm_start);
1009 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
1010 } 1056 }
1011 mutex_unlock(&ufile->umap_lock); 1057 mutex_unlock(&ufile->umap_lock);
1012 skip_mm: 1058 skip_mm:
1013 up_write(&mm->mmap_sem); 1059 up_read(&mm->mmap_sem);
1014 mmput(mm); 1060 mmput(mm);
1015 } 1061 }
1016} 1062}