aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2013-11-06 12:32:59 -0500
committerThomas Hellstrom <thellstrom@vmware.com>2013-11-13 02:55:31 -0500
commit3943875e7b73fdd94dd9e911d69f0cee9ab66a89 (patch)
tree1c23098a3a7da22c1983fbdc16d09d46dc0313aa
parent59c8e66378fb78adbcd05f0d09783dde6fef282b (diff)
drm/ttm: Fix vma page_prot bit manipulation
Fix a long-standing TTM issue where we manipulated the vma page_prot bits while mmap_sem was taken in read mode only. We now make a local copy of the vma structure which we pass when we set the ptes. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c30
1 files changed, 13 insertions, 17 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index c03514b93f9c..ac617f3ecd0c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -102,6 +102,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
102 int retval = VM_FAULT_NOPAGE; 102 int retval = VM_FAULT_NOPAGE;
103 struct ttm_mem_type_manager *man = 103 struct ttm_mem_type_manager *man =
104 &bdev->man[bo->mem.mem_type]; 104 &bdev->man[bo->mem.mem_type];
105 struct vm_area_struct cvma;
105 106
106 /* 107 /*
107 * Work around locking order reversal in fault / nopfn 108 * Work around locking order reversal in fault / nopfn
@@ -164,26 +165,21 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
164 } 165 }
165 166
166 /* 167 /*
167 * Strictly, we're not allowed to modify vma->vm_page_prot here, 168 * Make a local vma copy to modify the page_prot member
168 * since the mmap_sem is only held in read mode. However, we 169 * and vm_flags if necessary. The vma parameter is protected
169 * modify only the caching bits of vma->vm_page_prot and 170 * by mmap_sem in write mode.
170 * consider those bits protected by
171 * the bo->mutex, as we should be the only writers.
172 * There shouldn't really be any readers of these bits except
173 * within vm_insert_mixed()? fork?
174 *
175 * TODO: Add a list of vmas to the bo, and change the
176 * vma->vm_page_prot when the object changes caching policy, with
177 * the correct locks held.
178 */ 171 */
172 cvma = *vma;
173 cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
174
179 if (bo->mem.bus.is_iomem) { 175 if (bo->mem.bus.is_iomem) {
180 vma->vm_page_prot = ttm_io_prot(bo->mem.placement, 176 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
181 vma->vm_page_prot); 177 cvma.vm_page_prot);
182 } else { 178 } else {
183 ttm = bo->ttm; 179 ttm = bo->ttm;
184 vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ? 180 if (!(bo->mem.placement & TTM_PL_FLAG_CACHED))
185 vm_get_page_prot(vma->vm_flags) : 181 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
186 ttm_io_prot(bo->mem.placement, vma->vm_page_prot); 182 cvma.vm_page_prot);
187 183
188 /* Allocate all page at once, most common usage */ 184 /* Allocate all page at once, most common usage */
189 if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 185 if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
@@ -210,7 +206,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
210 pfn = page_to_pfn(page); 206 pfn = page_to_pfn(page);
211 } 207 }
212 208
213 ret = vm_insert_mixed(vma, address, pfn); 209 ret = vm_insert_mixed(&cvma, address, pfn);
214 /* 210 /*
215 * Somebody beat us to this PTE or prefaulting to 211 * Somebody beat us to this PTE or prefaulting to
216 * an already populated PTE, or prefaulting error. 212 * an already populated PTE, or prefaulting error.