diff options
author | Dave Airlie <airlied@redhat.com> | 2019-07-15 14:07:13 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2019-07-15 14:07:13 -0400 |
commit | 3729fe2bc2a01f4cc1aa88be8f64af06084c87d6 (patch) | |
tree | 462102b65a8cec402bc4726eef6946bdd9113111 | |
parent | 7e4b4dfc98d54bc79f7ca29c8bc6307ed2948014 (diff) |
Revert "Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next"
This reverts commit 031e610a6a21448a63dff7a0416e5e206724caac, reversing
changes made to 52d2d44eee8091e740d0d275df1311fb8373c9a9.
The mm changes in there we premature and not fully ack or reviewed by core mm folks,
I dropped the ball by merging them via this tree, so lets take em all back out.
Signed-off-by: Dave Airlie <airlied@redhat.com>
30 files changed, 483 insertions, 2136 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index d6600715a662..2abf6d28db64 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5191,7 +5191,6 @@ T: git git://people.freedesktop.org/~thomash/linux | |||
5191 | S: Supported | 5191 | S: Supported |
5192 | F: drivers/gpu/drm/vmwgfx/ | 5192 | F: drivers/gpu/drm/vmwgfx/ |
5193 | F: include/uapi/drm/vmwgfx_drm.h | 5193 | F: include/uapi/drm/vmwgfx_drm.h |
5194 | F: mm/as_dirty_helpers.c | ||
5195 | 5194 | ||
5196 | DRM DRIVERS | 5195 | DRM DRIVERS |
5197 | M: David Airlie <airlied@linux.ie> | 5196 | M: David Airlie <airlied@linux.ie> |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a7fd5a4955c9..58c403eda04e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1739,7 +1739,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, | |||
1739 | mutex_lock(&ttm_global_mutex); | 1739 | mutex_lock(&ttm_global_mutex); |
1740 | list_add_tail(&bdev->device_list, &glob->device_list); | 1740 | list_add_tail(&bdev->device_list, &glob->device_list); |
1741 | mutex_unlock(&ttm_global_mutex); | 1741 | mutex_unlock(&ttm_global_mutex); |
1742 | bdev->vm_ops = &ttm_bo_vm_ops; | ||
1743 | 1742 | ||
1744 | return 0; | 1743 | return 0; |
1745 | out_no_sys: | 1744 | out_no_sys: |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 0c4576cbafcf..6dacff49c1cc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c | |||
@@ -42,6 +42,8 @@ | |||
42 | #include <linux/uaccess.h> | 42 | #include <linux/uaccess.h> |
43 | #include <linux/mem_encrypt.h> | 43 | #include <linux/mem_encrypt.h> |
44 | 44 | ||
45 | #define TTM_BO_VM_NUM_PREFAULT 16 | ||
46 | |||
45 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, | 47 | static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, |
46 | struct vm_fault *vmf) | 48 | struct vm_fault *vmf) |
47 | { | 49 | { |
@@ -104,30 +106,25 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo, | |||
104 | + page_offset; | 106 | + page_offset; |
105 | } | 107 | } |
106 | 108 | ||
107 | /** | 109 | static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) |
108 | * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback | ||
109 | * @bo: The buffer object | ||
110 | * @vmf: The fault structure handed to the callback | ||
111 | * | ||
112 | * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped | ||
113 | * during long waits, and after the wait the callback will be restarted. This | ||
114 | * is to allow other threads using the same virtual memory space concurrent | ||
115 | * access to map(), unmap() completely unrelated buffer objects. TTM buffer | ||
116 | * object reservations sometimes wait for GPU and should therefore be | ||
117 | * considered long waits. This function reserves the buffer object interruptibly | ||
118 | * taking this into account. Starvation is avoided by the vm system not | ||
119 | * allowing too many repeated restarts. | ||
120 | * This function is intended to be used in customized fault() and _mkwrite() | ||
121 | * handlers. | ||
122 | * | ||
123 | * Return: | ||
124 | * 0 on success and the bo was reserved. | ||
125 | * VM_FAULT_RETRY if blocking wait. | ||
126 | * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed. | ||
127 | */ | ||
128 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, | ||
129 | struct vm_fault *vmf) | ||
130 | { | 110 | { |
111 | struct vm_area_struct *vma = vmf->vma; | ||
112 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) | ||
113 | vma->vm_private_data; | ||
114 | struct ttm_bo_device *bdev = bo->bdev; | ||
115 | unsigned long page_offset; | ||
116 | unsigned long page_last; | ||
117 | unsigned long pfn; | ||
118 | struct ttm_tt *ttm = NULL; | ||
119 | struct page *page; | ||
120 | int err; | ||
121 | int i; | ||
122 | vm_fault_t ret = VM_FAULT_NOPAGE; | ||
123 | unsigned long address = vmf->address; | ||
124 | struct ttm_mem_type_manager *man = | ||
125 | &bdev->man[bo->mem.mem_type]; | ||
126 | struct vm_area_struct cvma; | ||
127 | |||
131 | /* | 128 | /* |
132 | * Work around locking order reversal in fault / nopfn | 129 | * Work around locking order reversal in fault / nopfn |
133 | * between mmap_sem and bo_reserve: Perform a trylock operation | 130 | * between mmap_sem and bo_reserve: Perform a trylock operation |
@@ -154,55 +151,14 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, | |||
154 | return VM_FAULT_NOPAGE; | 151 | return VM_FAULT_NOPAGE; |
155 | } | 152 | } |
156 | 153 | ||
157 | return 0; | ||
158 | } | ||
159 | EXPORT_SYMBOL(ttm_bo_vm_reserve); | ||
160 | |||
161 | /** | ||
162 | * ttm_bo_vm_fault_reserved - TTM fault helper | ||
163 | * @vmf: The struct vm_fault given as argument to the fault callback | ||
164 | * @prot: The page protection to be used for this memory area. | ||
165 | * @num_prefault: Maximum number of prefault pages. The caller may want to | ||
166 | * specify this based on madvice settings and the size of the GPU object | ||
167 | * backed by the memory. | ||
168 | * | ||
169 | * This function inserts one or more page table entries pointing to the | ||
170 | * memory backing the buffer object, and then returns a return code | ||
171 | * instructing the caller to retry the page access. | ||
172 | * | ||
173 | * Return: | ||
174 | * VM_FAULT_NOPAGE on success or pending signal | ||
175 | * VM_FAULT_SIGBUS on unspecified error | ||
176 | * VM_FAULT_OOM on out-of-memory | ||
177 | * VM_FAULT_RETRY if retryable wait | ||
178 | */ | ||
179 | vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, | ||
180 | pgprot_t prot, | ||
181 | pgoff_t num_prefault) | ||
182 | { | ||
183 | struct vm_area_struct *vma = vmf->vma; | ||
184 | struct vm_area_struct cvma = *vma; | ||
185 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) | ||
186 | vma->vm_private_data; | ||
187 | struct ttm_bo_device *bdev = bo->bdev; | ||
188 | unsigned long page_offset; | ||
189 | unsigned long page_last; | ||
190 | unsigned long pfn; | ||
191 | struct ttm_tt *ttm = NULL; | ||
192 | struct page *page; | ||
193 | int err; | ||
194 | pgoff_t i; | ||
195 | vm_fault_t ret = VM_FAULT_NOPAGE; | ||
196 | unsigned long address = vmf->address; | ||
197 | struct ttm_mem_type_manager *man = | ||
198 | &bdev->man[bo->mem.mem_type]; | ||
199 | |||
200 | /* | 154 | /* |
201 | * Refuse to fault imported pages. This should be handled | 155 | * Refuse to fault imported pages. This should be handled |
202 | * (if at all) by redirecting mmap to the exporter. | 156 | * (if at all) by redirecting mmap to the exporter. |
203 | */ | 157 | */ |
204 | if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) | 158 | if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) { |
205 | return VM_FAULT_SIGBUS; | 159 | ret = VM_FAULT_SIGBUS; |
160 | goto out_unlock; | ||
161 | } | ||
206 | 162 | ||
207 | if (bdev->driver->fault_reserve_notify) { | 163 | if (bdev->driver->fault_reserve_notify) { |
208 | struct dma_fence *moving = dma_fence_get(bo->moving); | 164 | struct dma_fence *moving = dma_fence_get(bo->moving); |
@@ -213,9 +169,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, | |||
213 | break; | 169 | break; |
214 | case -EBUSY: | 170 | case -EBUSY: |
215 | case -ERESTARTSYS: | 171 | case -ERESTARTSYS: |
216 | return VM_FAULT_NOPAGE; | 172 | ret = VM_FAULT_NOPAGE; |
173 | goto out_unlock; | ||
217 | default: | 174 | default: |
218 | return VM_FAULT_SIGBUS; | 175 | ret = VM_FAULT_SIGBUS; |
176 | goto out_unlock; | ||
219 | } | 177 | } |
220 | 178 | ||
221 | if (bo->moving != moving) { | 179 | if (bo->moving != moving) { |
@@ -231,12 +189,21 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, | |||
231 | * move. | 189 | * move. |
232 | */ | 190 | */ |
233 | ret = ttm_bo_vm_fault_idle(bo, vmf); | 191 | ret = ttm_bo_vm_fault_idle(bo, vmf); |
234 | if (unlikely(ret != 0)) | 192 | if (unlikely(ret != 0)) { |
235 | return ret; | 193 | if (ret == VM_FAULT_RETRY && |
194 | !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { | ||
195 | /* The BO has already been unreserved. */ | ||
196 | return ret; | ||
197 | } | ||
198 | |||
199 | goto out_unlock; | ||
200 | } | ||
236 | 201 | ||
237 | err = ttm_mem_io_lock(man, true); | 202 | err = ttm_mem_io_lock(man, true); |
238 | if (unlikely(err != 0)) | 203 | if (unlikely(err != 0)) { |
239 | return VM_FAULT_NOPAGE; | 204 | ret = VM_FAULT_NOPAGE; |
205 | goto out_unlock; | ||
206 | } | ||
240 | err = ttm_mem_io_reserve_vm(bo); | 207 | err = ttm_mem_io_reserve_vm(bo); |
241 | if (unlikely(err != 0)) { | 208 | if (unlikely(err != 0)) { |
242 | ret = VM_FAULT_SIGBUS; | 209 | ret = VM_FAULT_SIGBUS; |
@@ -253,8 +220,18 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, | |||
253 | goto out_io_unlock; | 220 | goto out_io_unlock; |
254 | } | 221 | } |
255 | 222 | ||
256 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot); | 223 | /* |
257 | if (!bo->mem.bus.is_iomem) { | 224 | * Make a local vma copy to modify the page_prot member |
225 | * and vm_flags if necessary. The vma parameter is protected | ||
226 | * by mmap_sem in write mode. | ||
227 | */ | ||
228 | cvma = *vma; | ||
229 | cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags); | ||
230 | |||
231 | if (bo->mem.bus.is_iomem) { | ||
232 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, | ||
233 | cvma.vm_page_prot); | ||
234 | } else { | ||
258 | struct ttm_operation_ctx ctx = { | 235 | struct ttm_operation_ctx ctx = { |
259 | .interruptible = false, | 236 | .interruptible = false, |
260 | .no_wait_gpu = false, | 237 | .no_wait_gpu = false, |
@@ -263,21 +240,24 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, | |||
263 | }; | 240 | }; |
264 | 241 | ||
265 | ttm = bo->ttm; | 242 | ttm = bo->ttm; |
266 | if (ttm_tt_populate(bo->ttm, &ctx)) { | 243 | cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, |
244 | cvma.vm_page_prot); | ||
245 | |||
246 | /* Allocate all page at once, most common usage */ | ||
247 | if (ttm_tt_populate(ttm, &ctx)) { | ||
267 | ret = VM_FAULT_OOM; | 248 | ret = VM_FAULT_OOM; |
268 | goto out_io_unlock; | 249 | goto out_io_unlock; |
269 | } | 250 | } |
270 | } else { | ||
271 | /* Iomem should not be marked encrypted */ | ||
272 | cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); | ||
273 | } | 251 | } |
274 | 252 | ||
275 | /* | 253 | /* |
276 | * Speculatively prefault a number of pages. Only error on | 254 | * Speculatively prefault a number of pages. Only error on |
277 | * first page. | 255 | * first page. |
278 | */ | 256 | */ |
279 | for (i = 0; i < num_prefault; ++i) { | 257 | for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) { |
280 | if (bo->mem.bus.is_iomem) { | 258 | if (bo->mem.bus.is_iomem) { |
259 | /* Iomem should not be marked encrypted */ | ||
260 | cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); | ||
281 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); | 261 | pfn = ttm_bo_io_mem_pfn(bo, page_offset); |
282 | } else { | 262 | } else { |
283 | page = ttm->pages[page_offset]; | 263 | page = ttm->pages[page_offset]; |
@@ -315,26 +295,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, | |||
315 | ret = VM_FAULT_NOPAGE; | 295 | ret = VM_FAULT_NOPAGE; |
316 | out_io_unlock: | 296 | out_io_unlock: |
317 | ttm_mem_io_unlock(man); | 297 | ttm_mem_io_unlock(man); |
318 | return ret; | 298 | out_unlock: |
319 | } | ||
320 | EXPORT_SYMBOL(ttm_bo_vm_fault_reserved); | ||
321 | |||
322 | static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) | ||
323 | { | ||
324 | struct vm_area_struct *vma = vmf->vma; | ||
325 | pgprot_t prot; | ||
326 | struct ttm_buffer_object *bo = vma->vm_private_data; | ||
327 | vm_fault_t ret; | ||
328 | |||
329 | ret = ttm_bo_vm_reserve(bo, vmf); | ||
330 | if (ret) | ||
331 | return ret; | ||
332 | |||
333 | prot = vm_get_page_prot(vma->vm_flags); | ||
334 | ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); | ||
335 | if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) | ||
336 | return ret; | ||
337 | |||
338 | reservation_object_unlock(bo->resv); | 299 | reservation_object_unlock(bo->resv); |
339 | return ret; | 300 | return ret; |
340 | } | 301 | } |
@@ -434,7 +395,7 @@ static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, | |||
434 | return ret; | 395 | return ret; |
435 | } | 396 | } |
436 | 397 | ||
437 | const struct vm_operations_struct ttm_bo_vm_ops = { | 398 | static const struct vm_operations_struct ttm_bo_vm_ops = { |
438 | .fault = ttm_bo_vm_fault, | 399 | .fault = ttm_bo_vm_fault, |
439 | .open = ttm_bo_vm_open, | 400 | .open = ttm_bo_vm_open, |
440 | .close = ttm_bo_vm_close, | 401 | .close = ttm_bo_vm_close, |
@@ -487,7 +448,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, | |||
487 | if (unlikely(ret != 0)) | 448 | if (unlikely(ret != 0)) |
488 | goto out_unref; | 449 | goto out_unref; |
489 | 450 | ||
490 | vma->vm_ops = bdev->vm_ops; | 451 | vma->vm_ops = &ttm_bo_vm_ops; |
491 | 452 | ||
492 | /* | 453 | /* |
493 | * Note: We're transferring the bo reference to | 454 | * Note: We're transferring the bo reference to |
@@ -519,7 +480,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) | |||
519 | 480 | ||
520 | ttm_bo_get(bo); | 481 | ttm_bo_get(bo); |
521 | 482 | ||
522 | vma->vm_ops = bo->bdev->vm_ops; | 483 | vma->vm_ops = &ttm_bo_vm_ops; |
523 | vma->vm_private_data = bo; | 484 | vma->vm_private_data = bo; |
524 | vma->vm_flags |= VM_MIXEDMAP; | 485 | vma->vm_flags |= VM_MIXEDMAP; |
525 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; | 486 | vma->vm_flags |= VM_IO | VM_DONTEXPAND; |
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig index d5fd81a521f6..6b28a326f8bb 100644 --- a/drivers/gpu/drm/vmwgfx/Kconfig +++ b/drivers/gpu/drm/vmwgfx/Kconfig | |||
@@ -8,7 +8,6 @@ config DRM_VMWGFX | |||
8 | select FB_CFB_IMAGEBLIT | 8 | select FB_CFB_IMAGEBLIT |
9 | select DRM_TTM | 9 | select DRM_TTM |
10 | select FB | 10 | select FB |
11 | select AS_DIRTY_HELPERS | ||
12 | # Only needed for the transitional use of drm_crtc_init - can be removed | 11 | # Only needed for the transitional use of drm_crtc_init - can be removed |
13 | # again once vmwgfx sets up the primary plane itself. | 12 | # again once vmwgfx sets up the primary plane itself. |
14 | select DRM_KMS_HELPER | 13 | select DRM_KMS_HELPER |
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index c877a21a0739..8841bd30e1e5 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
@@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | |||
8 | vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ | 8 | vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ |
9 | vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ | 9 | vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ |
10 | vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ | 10 | vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ |
11 | vmwgfx_validation.o vmwgfx_page_dirty.o \ | 11 | vmwgfx_validation.o \ |
12 | ttm_object.o ttm_lock.o | 12 | ttm_object.o ttm_lock.o |
13 | 13 | ||
14 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 14 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h index 61414f105c67..f2bfd3d80598 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h | |||
@@ -1280,6 +1280,7 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format, | |||
1280 | return offset; | 1280 | return offset; |
1281 | } | 1281 | } |
1282 | 1282 | ||
1283 | |||
1283 | static inline u32 | 1284 | static inline u32 |
1284 | svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format, | 1285 | svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format, |
1285 | surf_size_struct baseLevelSize, | 1286 | surf_size_struct baseLevelSize, |
@@ -1374,236 +1375,4 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format) | |||
1374 | return svga3dsurface_is_dx_screen_target_format(format); | 1375 | return svga3dsurface_is_dx_screen_target_format(format); |
1375 | } | 1376 | } |
1376 | 1377 | ||
1377 | /** | ||
1378 | * struct svga3dsurface_mip - Mimpmap level information | ||
1379 | * @bytes: Bytes required in the backing store of this mipmap level. | ||
1380 | * @img_stride: Byte stride per image. | ||
1381 | * @row_stride: Byte stride per block row. | ||
1382 | * @size: The size of the mipmap. | ||
1383 | */ | ||
1384 | struct svga3dsurface_mip { | ||
1385 | size_t bytes; | ||
1386 | size_t img_stride; | ||
1387 | size_t row_stride; | ||
1388 | struct drm_vmw_size size; | ||
1389 | |||
1390 | }; | ||
1391 | |||
1392 | /** | ||
1393 | * struct svga3dsurface_cache - Cached surface information | ||
1394 | * @desc: Pointer to the surface descriptor | ||
1395 | * @mip: Array of mipmap level information. Valid size is @num_mip_levels. | ||
1396 | * @mip_chain_bytes: Bytes required in the backing store for the whole chain | ||
1397 | * of mip levels. | ||
1398 | * @sheet_bytes: Bytes required in the backing store for a sheet | ||
1399 | * representing a single sample. | ||
1400 | * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in | ||
1401 | * a chain. | ||
1402 | * @num_layers: Number of slices in an array texture or number of faces in | ||
1403 | * a cubemap texture. | ||
1404 | */ | ||
1405 | struct svga3dsurface_cache { | ||
1406 | const struct svga3d_surface_desc *desc; | ||
1407 | struct svga3dsurface_mip mip[DRM_VMW_MAX_MIP_LEVELS]; | ||
1408 | size_t mip_chain_bytes; | ||
1409 | size_t sheet_bytes; | ||
1410 | u32 num_mip_levels; | ||
1411 | u32 num_layers; | ||
1412 | }; | ||
1413 | |||
1414 | /** | ||
1415 | * struct svga3dsurface_loc - Surface location | ||
1416 | * @sub_resource: Surface subresource. Defined as layer * num_mip_levels + | ||
1417 | * mip_level. | ||
1418 | * @x: X coordinate. | ||
1419 | * @y: Y coordinate. | ||
1420 | * @z: Z coordinate. | ||
1421 | */ | ||
1422 | struct svga3dsurface_loc { | ||
1423 | u32 sub_resource; | ||
1424 | u32 x, y, z; | ||
1425 | }; | ||
1426 | |||
1427 | /** | ||
1428 | * svga3dsurface_subres - Compute the subresource from layer and mipmap. | ||
1429 | * @cache: Surface layout data. | ||
1430 | * @mip_level: The mipmap level. | ||
1431 | * @layer: The surface layer (face or array slice). | ||
1432 | * | ||
1433 | * Return: The subresource. | ||
1434 | */ | ||
1435 | static inline u32 svga3dsurface_subres(const struct svga3dsurface_cache *cache, | ||
1436 | u32 mip_level, u32 layer) | ||
1437 | { | ||
1438 | return cache->num_mip_levels * layer + mip_level; | ||
1439 | } | ||
1440 | |||
1441 | /** | ||
1442 | * svga3dsurface_setup_cache - Build a surface cache entry | ||
1443 | * @size: The surface base level dimensions. | ||
1444 | * @format: The surface format. | ||
1445 | * @num_mip_levels: Number of mipmap levels. | ||
1446 | * @num_layers: Number of layers. | ||
1447 | * @cache: Pointer to a struct svga3dsurface_cach object to be filled in. | ||
1448 | * | ||
1449 | * Return: Zero on success, -EINVAL on invalid surface layout. | ||
1450 | */ | ||
1451 | static inline int svga3dsurface_setup_cache(const struct drm_vmw_size *size, | ||
1452 | SVGA3dSurfaceFormat format, | ||
1453 | u32 num_mip_levels, | ||
1454 | u32 num_layers, | ||
1455 | u32 num_samples, | ||
1456 | struct svga3dsurface_cache *cache) | ||
1457 | { | ||
1458 | const struct svga3d_surface_desc *desc; | ||
1459 | u32 i; | ||
1460 | |||
1461 | memset(cache, 0, sizeof(*cache)); | ||
1462 | cache->desc = desc = svga3dsurface_get_desc(format); | ||
1463 | cache->num_mip_levels = num_mip_levels; | ||
1464 | cache->num_layers = num_layers; | ||
1465 | for (i = 0; i < cache->num_mip_levels; i++) { | ||
1466 | struct svga3dsurface_mip *mip = &cache->mip[i]; | ||
1467 | |||
1468 | mip->size = svga3dsurface_get_mip_size(*size, i); | ||
1469 | mip->bytes = svga3dsurface_get_image_buffer_size | ||
1470 | (desc, &mip->size, 0); | ||
1471 | mip->row_stride = | ||
1472 | __KERNEL_DIV_ROUND_UP(mip->size.width, | ||
1473 | desc->block_size.width) * | ||
1474 | desc->bytes_per_block * num_samples; | ||
1475 | if (!mip->row_stride) | ||
1476 | goto invalid_dim; | ||
1477 | |||
1478 | mip->img_stride = | ||
1479 | __KERNEL_DIV_ROUND_UP(mip->size.height, | ||
1480 | desc->block_size.height) * | ||
1481 | mip->row_stride; | ||
1482 | if (!mip->img_stride) | ||
1483 | goto invalid_dim; | ||
1484 | |||
1485 | cache->mip_chain_bytes += mip->bytes; | ||
1486 | } | ||
1487 | cache->sheet_bytes = cache->mip_chain_bytes * num_layers; | ||
1488 | if (!cache->sheet_bytes) | ||
1489 | goto invalid_dim; | ||
1490 | |||
1491 | return 0; | ||
1492 | |||
1493 | invalid_dim: | ||
1494 | VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n"); | ||
1495 | return -EINVAL; | ||
1496 | } | ||
1497 | |||
1498 | /** | ||
1499 | * svga3dsurface_get_loc - Get a surface location from an offset into the | ||
1500 | * backing store | ||
1501 | * @cache: Surface layout data. | ||
1502 | * @loc: Pointer to a struct svga3dsurface_loc to be filled in. | ||
1503 | * @offset: Offset into the surface backing store. | ||
1504 | */ | ||
1505 | static inline void | ||
1506 | svga3dsurface_get_loc(const struct svga3dsurface_cache *cache, | ||
1507 | struct svga3dsurface_loc *loc, | ||
1508 | size_t offset) | ||
1509 | { | ||
1510 | const struct svga3dsurface_mip *mip = &cache->mip[0]; | ||
1511 | const struct svga3d_surface_desc *desc = cache->desc; | ||
1512 | u32 layer; | ||
1513 | int i; | ||
1514 | |||
1515 | if (offset >= cache->sheet_bytes) | ||
1516 | offset %= cache->sheet_bytes; | ||
1517 | |||
1518 | layer = offset / cache->mip_chain_bytes; | ||
1519 | offset -= layer * cache->mip_chain_bytes; | ||
1520 | for (i = 0; i < cache->num_mip_levels; ++i, ++mip) { | ||
1521 | if (mip->bytes > offset) | ||
1522 | break; | ||
1523 | offset -= mip->bytes; | ||
1524 | } | ||
1525 | |||
1526 | loc->sub_resource = svga3dsurface_subres(cache, i, layer); | ||
1527 | loc->z = offset / mip->img_stride; | ||
1528 | offset -= loc->z * mip->img_stride; | ||
1529 | loc->z *= desc->block_size.depth; | ||
1530 | loc->y = offset / mip->row_stride; | ||
1531 | offset -= loc->y * mip->row_stride; | ||
1532 | loc->y *= desc->block_size.height; | ||
1533 | loc->x = offset / desc->bytes_per_block; | ||
1534 | loc->x *= desc->block_size.width; | ||
1535 | } | ||
1536 | |||
1537 | /** | ||
1538 | * svga3dsurface_inc_loc - Clamp increment a surface location with one block | ||
1539 | * size | ||
1540 | * in each dimension. | ||
1541 | * @loc: Pointer to a struct svga3dsurface_loc to be incremented. | ||
1542 | * | ||
1543 | * When computing the size of a range as size = end - start, the range does not | ||
1544 | * include the end element. However a location representing the last byte | ||
1545 | * of a touched region in the backing store *is* included in the range. | ||
1546 | * This function modifies such a location to match the end definition | ||
1547 | * given as start + size which is the one used in a SVGA3dBox. | ||
1548 | */ | ||
1549 | static inline void | ||
1550 | svga3dsurface_inc_loc(const struct svga3dsurface_cache *cache, | ||
1551 | struct svga3dsurface_loc *loc) | ||
1552 | { | ||
1553 | const struct svga3d_surface_desc *desc = cache->desc; | ||
1554 | u32 mip = loc->sub_resource % cache->num_mip_levels; | ||
1555 | const struct drm_vmw_size *size = &cache->mip[mip].size; | ||
1556 | |||
1557 | loc->sub_resource++; | ||
1558 | loc->x += desc->block_size.width; | ||
1559 | if (loc->x > size->width) | ||
1560 | loc->x = size->width; | ||
1561 | loc->y += desc->block_size.height; | ||
1562 | if (loc->y > size->height) | ||
1563 | loc->y = size->height; | ||
1564 | loc->z += desc->block_size.depth; | ||
1565 | if (loc->z > size->depth) | ||
1566 | loc->z = size->depth; | ||
1567 | } | ||
1568 | |||
1569 | /** | ||
1570 | * svga3dsurface_min_loc - The start location in a subresource | ||
1571 | * @cache: Surface layout data. | ||
1572 | * @sub_resource: The subresource. | ||
1573 | * @loc: Pointer to a struct svga3dsurface_loc to be filled in. | ||
1574 | */ | ||
1575 | static inline void | ||
1576 | svga3dsurface_min_loc(const struct svga3dsurface_cache *cache, | ||
1577 | u32 sub_resource, | ||
1578 | struct svga3dsurface_loc *loc) | ||
1579 | { | ||
1580 | loc->sub_resource = sub_resource; | ||
1581 | loc->x = loc->y = loc->z = 0; | ||
1582 | } | ||
1583 | |||
1584 | /** | ||
1585 | * svga3dsurface_min_loc - The end location in a subresource | ||
1586 | * @cache: Surface layout data. | ||
1587 | * @sub_resource: The subresource. | ||
1588 | * @loc: Pointer to a struct svga3dsurface_loc to be filled in. | ||
1589 | * | ||
1590 | * Following the end definition given in svga3dsurface_inc_loc(), | ||
1591 | * Compute the end location of a surface subresource. | ||
1592 | */ | ||
1593 | static inline void | ||
1594 | svga3dsurface_max_loc(const struct svga3dsurface_cache *cache, | ||
1595 | u32 sub_resource, | ||
1596 | struct svga3dsurface_loc *loc) | ||
1597 | { | ||
1598 | const struct drm_vmw_size *size; | ||
1599 | u32 mip; | ||
1600 | |||
1601 | loc->sub_resource = sub_resource + 1; | ||
1602 | mip = sub_resource % cache->num_mip_levels; | ||
1603 | size = &cache->mip[mip].size; | ||
1604 | loc->x = size->width; | ||
1605 | loc->y = size->height; | ||
1606 | loc->z = size->depth; | ||
1607 | } | ||
1608 | |||
1609 | #endif /* _SVGA3D_SURFACEDEFS_H_ */ | 1378 | #endif /* _SVGA3D_SURFACEDEFS_H_ */ |
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.c b/drivers/gpu/drm/vmwgfx/ttm_lock.c index 5971c72e6d10..16b2083cb9d4 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_lock.c +++ b/drivers/gpu/drm/vmwgfx/ttm_lock.c | |||
@@ -29,6 +29,7 @@ | |||
29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | 29 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <drm/ttm/ttm_module.h> | ||
32 | #include <linux/atomic.h> | 33 | #include <linux/atomic.h> |
33 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
34 | #include <linux/wait.h> | 35 | #include <linux/wait.h> |
@@ -48,6 +49,8 @@ void ttm_lock_init(struct ttm_lock *lock) | |||
48 | init_waitqueue_head(&lock->queue); | 49 | init_waitqueue_head(&lock->queue); |
49 | lock->rw = 0; | 50 | lock->rw = 0; |
50 | lock->flags = 0; | 51 | lock->flags = 0; |
52 | lock->kill_takers = false; | ||
53 | lock->signal = SIGKILL; | ||
51 | } | 54 | } |
52 | 55 | ||
53 | void ttm_read_unlock(struct ttm_lock *lock) | 56 | void ttm_read_unlock(struct ttm_lock *lock) |
@@ -63,6 +66,11 @@ static bool __ttm_read_lock(struct ttm_lock *lock) | |||
63 | bool locked = false; | 66 | bool locked = false; |
64 | 67 | ||
65 | spin_lock(&lock->lock); | 68 | spin_lock(&lock->lock); |
69 | if (unlikely(lock->kill_takers)) { | ||
70 | send_sig(lock->signal, current, 0); | ||
71 | spin_unlock(&lock->lock); | ||
72 | return false; | ||
73 | } | ||
66 | if (lock->rw >= 0 && lock->flags == 0) { | 74 | if (lock->rw >= 0 && lock->flags == 0) { |
67 | ++lock->rw; | 75 | ++lock->rw; |
68 | locked = true; | 76 | locked = true; |
@@ -90,6 +98,11 @@ static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) | |||
90 | *locked = false; | 98 | *locked = false; |
91 | 99 | ||
92 | spin_lock(&lock->lock); | 100 | spin_lock(&lock->lock); |
101 | if (unlikely(lock->kill_takers)) { | ||
102 | send_sig(lock->signal, current, 0); | ||
103 | spin_unlock(&lock->lock); | ||
104 | return false; | ||
105 | } | ||
93 | if (lock->rw >= 0 && lock->flags == 0) { | 106 | if (lock->rw >= 0 && lock->flags == 0) { |
94 | ++lock->rw; | 107 | ++lock->rw; |
95 | block = false; | 108 | block = false; |
@@ -134,6 +147,11 @@ static bool __ttm_write_lock(struct ttm_lock *lock) | |||
134 | bool locked = false; | 147 | bool locked = false; |
135 | 148 | ||
136 | spin_lock(&lock->lock); | 149 | spin_lock(&lock->lock); |
150 | if (unlikely(lock->kill_takers)) { | ||
151 | send_sig(lock->signal, current, 0); | ||
152 | spin_unlock(&lock->lock); | ||
153 | return false; | ||
154 | } | ||
137 | if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { | 155 | if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { |
138 | lock->rw = -1; | 156 | lock->rw = -1; |
139 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; | 157 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; |
@@ -164,6 +182,88 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible) | |||
164 | return ret; | 182 | return ret; |
165 | } | 183 | } |
166 | 184 | ||
185 | static int __ttm_vt_unlock(struct ttm_lock *lock) | ||
186 | { | ||
187 | int ret = 0; | ||
188 | |||
189 | spin_lock(&lock->lock); | ||
190 | if (unlikely(!(lock->flags & TTM_VT_LOCK))) | ||
191 | ret = -EINVAL; | ||
192 | lock->flags &= ~TTM_VT_LOCK; | ||
193 | wake_up_all(&lock->queue); | ||
194 | spin_unlock(&lock->lock); | ||
195 | |||
196 | return ret; | ||
197 | } | ||
198 | |||
199 | static void ttm_vt_lock_remove(struct ttm_base_object **p_base) | ||
200 | { | ||
201 | struct ttm_base_object *base = *p_base; | ||
202 | struct ttm_lock *lock = container_of(base, struct ttm_lock, base); | ||
203 | int ret; | ||
204 | |||
205 | *p_base = NULL; | ||
206 | ret = __ttm_vt_unlock(lock); | ||
207 | BUG_ON(ret != 0); | ||
208 | } | ||
209 | |||
210 | static bool __ttm_vt_lock(struct ttm_lock *lock) | ||
211 | { | ||
212 | bool locked = false; | ||
213 | |||
214 | spin_lock(&lock->lock); | ||
215 | if (lock->rw == 0) { | ||
216 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
217 | lock->flags |= TTM_VT_LOCK; | ||
218 | locked = true; | ||
219 | } else { | ||
220 | lock->flags |= TTM_VT_LOCK_PENDING; | ||
221 | } | ||
222 | spin_unlock(&lock->lock); | ||
223 | return locked; | ||
224 | } | ||
225 | |||
226 | int ttm_vt_lock(struct ttm_lock *lock, | ||
227 | bool interruptible, | ||
228 | struct ttm_object_file *tfile) | ||
229 | { | ||
230 | int ret = 0; | ||
231 | |||
232 | if (interruptible) { | ||
233 | ret = wait_event_interruptible(lock->queue, | ||
234 | __ttm_vt_lock(lock)); | ||
235 | if (unlikely(ret != 0)) { | ||
236 | spin_lock(&lock->lock); | ||
237 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
238 | wake_up_all(&lock->queue); | ||
239 | spin_unlock(&lock->lock); | ||
240 | return ret; | ||
241 | } | ||
242 | } else | ||
243 | wait_event(lock->queue, __ttm_vt_lock(lock)); | ||
244 | |||
245 | /* | ||
246 | * Add a base-object, the destructor of which will | ||
247 | * make sure the lock is released if the client dies | ||
248 | * while holding it. | ||
249 | */ | ||
250 | |||
251 | ret = ttm_base_object_init(tfile, &lock->base, false, | ||
252 | ttm_lock_type, &ttm_vt_lock_remove, NULL); | ||
253 | if (ret) | ||
254 | (void)__ttm_vt_unlock(lock); | ||
255 | else | ||
256 | lock->vt_holder = tfile; | ||
257 | |||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | int ttm_vt_unlock(struct ttm_lock *lock) | ||
262 | { | ||
263 | return ttm_ref_object_base_unref(lock->vt_holder, | ||
264 | lock->base.handle, TTM_REF_USAGE); | ||
265 | } | ||
266 | |||
167 | void ttm_suspend_unlock(struct ttm_lock *lock) | 267 | void ttm_suspend_unlock(struct ttm_lock *lock) |
168 | { | 268 | { |
169 | spin_lock(&lock->lock); | 269 | spin_lock(&lock->lock); |
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.h b/drivers/gpu/drm/vmwgfx/ttm_lock.h index 3d454e8b491f..0c3af9836863 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_lock.h +++ b/drivers/gpu/drm/vmwgfx/ttm_lock.h | |||
@@ -63,6 +63,8 @@ | |||
63 | * @lock: Spinlock protecting some lock members. | 63 | * @lock: Spinlock protecting some lock members. |
64 | * @rw: Read-write lock counter. Protected by @lock. | 64 | * @rw: Read-write lock counter. Protected by @lock. |
65 | * @flags: Lock state. Protected by @lock. | 65 | * @flags: Lock state. Protected by @lock. |
66 | * @kill_takers: Boolean whether to kill takers of the lock. | ||
67 | * @signal: Signal to send when kill_takers is true. | ||
66 | */ | 68 | */ |
67 | 69 | ||
68 | struct ttm_lock { | 70 | struct ttm_lock { |
@@ -71,6 +73,9 @@ struct ttm_lock { | |||
71 | spinlock_t lock; | 73 | spinlock_t lock; |
72 | int32_t rw; | 74 | int32_t rw; |
73 | uint32_t flags; | 75 | uint32_t flags; |
76 | bool kill_takers; | ||
77 | int signal; | ||
78 | struct ttm_object_file *vt_holder; | ||
74 | }; | 79 | }; |
75 | 80 | ||
76 | 81 | ||
@@ -215,4 +220,29 @@ extern void ttm_write_unlock(struct ttm_lock *lock); | |||
215 | */ | 220 | */ |
216 | extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); | 221 | extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); |
217 | 222 | ||
223 | /** | ||
224 | * ttm_lock_set_kill | ||
225 | * | ||
226 | * @lock: Pointer to a struct ttm_lock | ||
227 | * @val: Boolean whether to kill processes taking the lock. | ||
228 | * @signal: Signal to send to the process taking the lock. | ||
229 | * | ||
230 | * The kill-when-taking-lock functionality is used to kill processes that keep | ||
231 | * on using the TTM functionality when its resources has been taken down, for | ||
232 | * example when the X server exits. A typical sequence would look like this: | ||
233 | * - X server takes lock in write mode. | ||
234 | * - ttm_lock_set_kill() is called with @val set to true. | ||
235 | * - As part of X server exit, TTM resources are taken down. | ||
236 | * - X server releases the lock on file release. | ||
237 | * - Another dri client wants to render, takes the lock and is killed. | ||
238 | * | ||
239 | */ | ||
240 | static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, | ||
241 | int signal) | ||
242 | { | ||
243 | lock->kill_takers = val; | ||
244 | if (val) | ||
245 | lock->signal = signal; | ||
246 | } | ||
247 | |||
218 | #endif | 248 | #endif |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index e8bc7a7ac031..5d5c2bce01f3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | |||
@@ -463,8 +463,6 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo) | |||
463 | { | 463 | { |
464 | struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); | 464 | struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); |
465 | 465 | ||
466 | WARN_ON(vmw_bo->dirty); | ||
467 | WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree)); | ||
468 | vmw_bo_unmap(vmw_bo); | 466 | vmw_bo_unmap(vmw_bo); |
469 | kfree(vmw_bo); | 467 | kfree(vmw_bo); |
470 | } | 468 | } |
@@ -478,11 +476,8 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo) | |||
478 | static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) | 476 | static void vmw_user_bo_destroy(struct ttm_buffer_object *bo) |
479 | { | 477 | { |
480 | struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); | 478 | struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo); |
481 | struct vmw_buffer_object *vbo = &vmw_user_bo->vbo; | ||
482 | 479 | ||
483 | WARN_ON(vbo->dirty); | 480 | vmw_bo_unmap(&vmw_user_bo->vbo); |
484 | WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); | ||
485 | vmw_bo_unmap(vbo); | ||
486 | ttm_prime_object_kfree(vmw_user_bo, prime); | 481 | ttm_prime_object_kfree(vmw_user_bo, prime); |
487 | } | 482 | } |
488 | 483 | ||
@@ -515,9 +510,8 @@ int vmw_bo_init(struct vmw_private *dev_priv, | |||
515 | 510 | ||
516 | acc_size = vmw_bo_acc_size(dev_priv, size, user); | 511 | acc_size = vmw_bo_acc_size(dev_priv, size, user); |
517 | memset(vmw_bo, 0, sizeof(*vmw_bo)); | 512 | memset(vmw_bo, 0, sizeof(*vmw_bo)); |
518 | BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3); | 513 | |
519 | vmw_bo->base.priority = 3; | 514 | INIT_LIST_HEAD(&vmw_bo->res_list); |
520 | vmw_bo->res_tree = RB_ROOT; | ||
521 | 515 | ||
522 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | 516 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, |
523 | ttm_bo_type_device, placement, | 517 | ttm_bo_type_device, placement, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index a56c9d802382..63f111068a44 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c | |||
@@ -88,8 +88,6 @@ static const struct vmw_res_func vmw_gb_context_func = { | |||
88 | .res_type = vmw_res_context, | 88 | .res_type = vmw_res_context, |
89 | .needs_backup = true, | 89 | .needs_backup = true, |
90 | .may_evict = true, | 90 | .may_evict = true, |
91 | .prio = 3, | ||
92 | .dirty_prio = 3, | ||
93 | .type_name = "guest backed contexts", | 91 | .type_name = "guest backed contexts", |
94 | .backup_placement = &vmw_mob_placement, | 92 | .backup_placement = &vmw_mob_placement, |
95 | .create = vmw_gb_context_create, | 93 | .create = vmw_gb_context_create, |
@@ -102,8 +100,6 @@ static const struct vmw_res_func vmw_dx_context_func = { | |||
102 | .res_type = vmw_res_dx_context, | 100 | .res_type = vmw_res_dx_context, |
103 | .needs_backup = true, | 101 | .needs_backup = true, |
104 | .may_evict = true, | 102 | .may_evict = true, |
105 | .prio = 3, | ||
106 | .dirty_prio = 3, | ||
107 | .type_name = "dx contexts", | 103 | .type_name = "dx contexts", |
108 | .backup_placement = &vmw_mob_placement, | 104 | .backup_placement = &vmw_mob_placement, |
109 | .create = vmw_dx_context_create, | 105 | .create = vmw_dx_context_create, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 8c699cb2565b..b4f6e1217c9d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | |||
@@ -116,8 +116,6 @@ static const struct vmw_res_func vmw_cotable_func = { | |||
116 | .res_type = vmw_res_cotable, | 116 | .res_type = vmw_res_cotable, |
117 | .needs_backup = true, | 117 | .needs_backup = true, |
118 | .may_evict = true, | 118 | .may_evict = true, |
119 | .prio = 3, | ||
120 | .dirty_prio = 3, | ||
121 | .type_name = "context guest backed object tables", | 119 | .type_name = "context guest backed object tables", |
122 | .backup_placement = &vmw_mob_placement, | 120 | .backup_placement = &vmw_mob_placement, |
123 | .create = vmw_cotable_create, | 121 | .create = vmw_cotable_create, |
@@ -309,7 +307,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, | |||
309 | struct ttm_buffer_object *bo = val_buf->bo; | 307 | struct ttm_buffer_object *bo = val_buf->bo; |
310 | struct vmw_fence_obj *fence; | 308 | struct vmw_fence_obj *fence; |
311 | 309 | ||
312 | if (!vmw_resource_mob_attached(res)) | 310 | if (list_empty(&res->mob_head)) |
313 | return 0; | 311 | return 0; |
314 | 312 | ||
315 | WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); | 313 | WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); |
@@ -455,7 +453,6 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) | |||
455 | goto out_wait; | 453 | goto out_wait; |
456 | } | 454 | } |
457 | 455 | ||
458 | vmw_resource_mob_detach(res); | ||
459 | res->backup = buf; | 456 | res->backup = buf; |
460 | res->backup_size = new_size; | 457 | res->backup_size = new_size; |
461 | vcotbl->size_read_back = cur_size_read_back; | 458 | vcotbl->size_read_back = cur_size_read_back; |
@@ -470,12 +467,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) | |||
470 | res->backup = old_buf; | 467 | res->backup = old_buf; |
471 | res->backup_size = old_size; | 468 | res->backup_size = old_size; |
472 | vcotbl->size_read_back = old_size_read_back; | 469 | vcotbl->size_read_back = old_size_read_back; |
473 | vmw_resource_mob_attach(res); | ||
474 | goto out_wait; | 470 | goto out_wait; |
475 | } | 471 | } |
476 | 472 | ||
477 | vmw_resource_mob_attach(res); | ||
478 | /* Let go of the old mob. */ | 473 | /* Let go of the old mob. */ |
474 | list_del(&res->mob_head); | ||
475 | list_add_tail(&res->mob_head, &buf->res_list); | ||
479 | vmw_bo_unreference(&old_buf); | 476 | vmw_bo_unreference(&old_buf); |
480 | res->id = vcotbl->type; | 477 | res->id = vcotbl->type; |
481 | 478 | ||
@@ -499,7 +496,7 @@ out_wait: | |||
499 | * is called before bind() in the validation sequence is instead used for two | 496 | * is called before bind() in the validation sequence is instead used for two |
500 | * things. | 497 | * things. |
501 | * 1) Unscrub the cotable if it is scrubbed and still attached to a backup | 498 | * 1) Unscrub the cotable if it is scrubbed and still attached to a backup |
502 | * buffer. | 499 | * buffer, that is, if @res->mob_head is non-empty. |
503 | * 2) Resize the cotable if needed. | 500 | * 2) Resize the cotable if needed. |
504 | */ | 501 | */ |
505 | static int vmw_cotable_create(struct vmw_resource *res) | 502 | static int vmw_cotable_create(struct vmw_resource *res) |
@@ -515,7 +512,7 @@ static int vmw_cotable_create(struct vmw_resource *res) | |||
515 | new_size *= 2; | 512 | new_size *= 2; |
516 | 513 | ||
517 | if (likely(new_size <= res->backup_size)) { | 514 | if (likely(new_size <= res->backup_size)) { |
518 | if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) { | 515 | if (vcotbl->scrubbed && !list_empty(&res->mob_head)) { |
519 | ret = vmw_cotable_unscrub(res); | 516 | ret = vmw_cotable_unscrub(res); |
520 | if (ret) | 517 | if (ret) |
521 | return ret; | 518 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 8349a6cc126f..4ff11a0077e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -254,6 +254,7 @@ static int vmw_restrict_dma_mask; | |||
254 | static int vmw_assume_16bpp; | 254 | static int vmw_assume_16bpp; |
255 | 255 | ||
256 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | 256 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); |
257 | static void vmw_master_init(struct vmw_master *); | ||
257 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | 258 | static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, |
258 | void *ptr); | 259 | void *ptr); |
259 | 260 | ||
@@ -761,6 +762,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
761 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | 762 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", |
762 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); | 763 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); |
763 | 764 | ||
765 | vmw_master_init(&dev_priv->fbdev_master); | ||
766 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | ||
767 | dev_priv->active_master = &dev_priv->fbdev_master; | ||
768 | |||
764 | dev_priv->mmio_virt = memremap(dev_priv->mmio_start, | 769 | dev_priv->mmio_virt = memremap(dev_priv->mmio_start, |
765 | dev_priv->mmio_size, MEMREMAP_WB); | 770 | dev_priv->mmio_size, MEMREMAP_WB); |
766 | 771 | ||
@@ -828,11 +833,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
828 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); | 833 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); |
829 | goto out_no_bdev; | 834 | goto out_no_bdev; |
830 | } | 835 | } |
831 | dev_priv->vm_ops = *dev_priv->bdev.vm_ops; | ||
832 | dev_priv->vm_ops.fault = vmw_bo_vm_fault; | ||
833 | dev_priv->vm_ops.pfn_mkwrite = vmw_bo_vm_mkwrite; | ||
834 | dev_priv->vm_ops.page_mkwrite = vmw_bo_vm_mkwrite; | ||
835 | dev_priv->bdev.vm_ops = &dev_priv->vm_ops; | ||
836 | 836 | ||
837 | /* | 837 | /* |
838 | * Enable VRAM, but initially don't use it until SVGA is enabled and | 838 | * Enable VRAM, but initially don't use it until SVGA is enabled and |
@@ -1007,7 +1007,18 @@ static void vmw_driver_unload(struct drm_device *dev) | |||
1007 | static void vmw_postclose(struct drm_device *dev, | 1007 | static void vmw_postclose(struct drm_device *dev, |
1008 | struct drm_file *file_priv) | 1008 | struct drm_file *file_priv) |
1009 | { | 1009 | { |
1010 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | 1010 | struct vmw_fpriv *vmw_fp; |
1011 | |||
1012 | vmw_fp = vmw_fpriv(file_priv); | ||
1013 | |||
1014 | if (vmw_fp->locked_master) { | ||
1015 | struct vmw_master *vmaster = | ||
1016 | vmw_master(vmw_fp->locked_master); | ||
1017 | |||
1018 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | ||
1019 | ttm_vt_unlock(&vmaster->lock); | ||
1020 | drm_master_put(&vmw_fp->locked_master); | ||
1021 | } | ||
1011 | 1022 | ||
1012 | ttm_object_file_release(&vmw_fp->tfile); | 1023 | ttm_object_file_release(&vmw_fp->tfile); |
1013 | kfree(vmw_fp); | 1024 | kfree(vmw_fp); |
@@ -1036,6 +1047,55 @@ out_no_tfile: | |||
1036 | return ret; | 1047 | return ret; |
1037 | } | 1048 | } |
1038 | 1049 | ||
1050 | static struct vmw_master *vmw_master_check(struct drm_device *dev, | ||
1051 | struct drm_file *file_priv, | ||
1052 | unsigned int flags) | ||
1053 | { | ||
1054 | int ret; | ||
1055 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
1056 | struct vmw_master *vmaster; | ||
1057 | |||
1058 | if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH)) | ||
1059 | return NULL; | ||
1060 | |||
1061 | ret = mutex_lock_interruptible(&dev->master_mutex); | ||
1062 | if (unlikely(ret != 0)) | ||
1063 | return ERR_PTR(-ERESTARTSYS); | ||
1064 | |||
1065 | if (drm_is_current_master(file_priv)) { | ||
1066 | mutex_unlock(&dev->master_mutex); | ||
1067 | return NULL; | ||
1068 | } | ||
1069 | |||
1070 | /* | ||
1071 | * Check if we were previously master, but now dropped. In that | ||
1072 | * case, allow at least render node functionality. | ||
1073 | */ | ||
1074 | if (vmw_fp->locked_master) { | ||
1075 | mutex_unlock(&dev->master_mutex); | ||
1076 | |||
1077 | if (flags & DRM_RENDER_ALLOW) | ||
1078 | return NULL; | ||
1079 | |||
1080 | DRM_ERROR("Dropped master trying to access ioctl that " | ||
1081 | "requires authentication.\n"); | ||
1082 | return ERR_PTR(-EACCES); | ||
1083 | } | ||
1084 | mutex_unlock(&dev->master_mutex); | ||
1085 | |||
1086 | /* | ||
1087 | * Take the TTM lock. Possibly sleep waiting for the authenticating | ||
1088 | * master to become master again, or for a SIGTERM if the | ||
1089 | * authenticating master exits. | ||
1090 | */ | ||
1091 | vmaster = vmw_master(file_priv->master); | ||
1092 | ret = ttm_read_lock(&vmaster->lock, true); | ||
1093 | if (unlikely(ret != 0)) | ||
1094 | vmaster = ERR_PTR(ret); | ||
1095 | |||
1096 | return vmaster; | ||
1097 | } | ||
1098 | |||
1039 | static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, | 1099 | static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, |
1040 | unsigned long arg, | 1100 | unsigned long arg, |
1041 | long (*ioctl_func)(struct file *, unsigned int, | 1101 | long (*ioctl_func)(struct file *, unsigned int, |
@@ -1044,6 +1104,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, | |||
1044 | struct drm_file *file_priv = filp->private_data; | 1104 | struct drm_file *file_priv = filp->private_data; |
1045 | struct drm_device *dev = file_priv->minor->dev; | 1105 | struct drm_device *dev = file_priv->minor->dev; |
1046 | unsigned int nr = DRM_IOCTL_NR(cmd); | 1106 | unsigned int nr = DRM_IOCTL_NR(cmd); |
1107 | struct vmw_master *vmaster; | ||
1047 | unsigned int flags; | 1108 | unsigned int flags; |
1048 | long ret; | 1109 | long ret; |
1049 | 1110 | ||
@@ -1079,7 +1140,21 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, | |||
1079 | } else if (!drm_ioctl_flags(nr, &flags)) | 1140 | } else if (!drm_ioctl_flags(nr, &flags)) |
1080 | return -EINVAL; | 1141 | return -EINVAL; |
1081 | 1142 | ||
1082 | return ioctl_func(filp, cmd, arg); | 1143 | vmaster = vmw_master_check(dev, file_priv, flags); |
1144 | if (IS_ERR(vmaster)) { | ||
1145 | ret = PTR_ERR(vmaster); | ||
1146 | |||
1147 | if (ret != -ERESTARTSYS) | ||
1148 | DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n", | ||
1149 | nr, ret); | ||
1150 | return ret; | ||
1151 | } | ||
1152 | |||
1153 | ret = ioctl_func(filp, cmd, arg); | ||
1154 | if (vmaster) | ||
1155 | ttm_read_unlock(&vmaster->lock); | ||
1156 | |||
1157 | return ret; | ||
1083 | 1158 | ||
1084 | out_io_encoding: | 1159 | out_io_encoding: |
1085 | DRM_ERROR("Invalid command format, ioctl %d\n", | 1160 | DRM_ERROR("Invalid command format, ioctl %d\n", |
@@ -1106,10 +1181,65 @@ static void vmw_lastclose(struct drm_device *dev) | |||
1106 | { | 1181 | { |
1107 | } | 1182 | } |
1108 | 1183 | ||
1184 | static void vmw_master_init(struct vmw_master *vmaster) | ||
1185 | { | ||
1186 | ttm_lock_init(&vmaster->lock); | ||
1187 | } | ||
1188 | |||
1189 | static int vmw_master_create(struct drm_device *dev, | ||
1190 | struct drm_master *master) | ||
1191 | { | ||
1192 | struct vmw_master *vmaster; | ||
1193 | |||
1194 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); | ||
1195 | if (unlikely(!vmaster)) | ||
1196 | return -ENOMEM; | ||
1197 | |||
1198 | vmw_master_init(vmaster); | ||
1199 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | ||
1200 | master->driver_priv = vmaster; | ||
1201 | |||
1202 | return 0; | ||
1203 | } | ||
1204 | |||
1205 | static void vmw_master_destroy(struct drm_device *dev, | ||
1206 | struct drm_master *master) | ||
1207 | { | ||
1208 | struct vmw_master *vmaster = vmw_master(master); | ||
1209 | |||
1210 | master->driver_priv = NULL; | ||
1211 | kfree(vmaster); | ||
1212 | } | ||
1213 | |||
1109 | static int vmw_master_set(struct drm_device *dev, | 1214 | static int vmw_master_set(struct drm_device *dev, |
1110 | struct drm_file *file_priv, | 1215 | struct drm_file *file_priv, |
1111 | bool from_open) | 1216 | bool from_open) |
1112 | { | 1217 | { |
1218 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
1219 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
1220 | struct vmw_master *active = dev_priv->active_master; | ||
1221 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1222 | int ret = 0; | ||
1223 | |||
1224 | if (active) { | ||
1225 | BUG_ON(active != &dev_priv->fbdev_master); | ||
1226 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | ||
1227 | if (unlikely(ret != 0)) | ||
1228 | return ret; | ||
1229 | |||
1230 | ttm_lock_set_kill(&active->lock, true, SIGTERM); | ||
1231 | dev_priv->active_master = NULL; | ||
1232 | } | ||
1233 | |||
1234 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); | ||
1235 | if (!from_open) { | ||
1236 | ttm_vt_unlock(&vmaster->lock); | ||
1237 | BUG_ON(vmw_fp->locked_master != file_priv->master); | ||
1238 | drm_master_put(&vmw_fp->locked_master); | ||
1239 | } | ||
1240 | |||
1241 | dev_priv->active_master = vmaster; | ||
1242 | |||
1113 | /* | 1243 | /* |
1114 | * Inform a new master that the layout may have changed while | 1244 | * Inform a new master that the layout may have changed while |
1115 | * it was gone. | 1245 | * it was gone. |
@@ -1124,10 +1254,31 @@ static void vmw_master_drop(struct drm_device *dev, | |||
1124 | struct drm_file *file_priv) | 1254 | struct drm_file *file_priv) |
1125 | { | 1255 | { |
1126 | struct vmw_private *dev_priv = vmw_priv(dev); | 1256 | struct vmw_private *dev_priv = vmw_priv(dev); |
1257 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
1258 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
1259 | int ret; | ||
1260 | |||
1261 | /** | ||
1262 | * Make sure the master doesn't disappear while we have | ||
1263 | * it locked. | ||
1264 | */ | ||
1127 | 1265 | ||
1266 | vmw_fp->locked_master = drm_master_get(file_priv->master); | ||
1267 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | ||
1128 | vmw_kms_legacy_hotspot_clear(dev_priv); | 1268 | vmw_kms_legacy_hotspot_clear(dev_priv); |
1269 | if (unlikely((ret != 0))) { | ||
1270 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | ||
1271 | drm_master_put(&vmw_fp->locked_master); | ||
1272 | } | ||
1273 | |||
1274 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); | ||
1275 | |||
1129 | if (!dev_priv->enable_fb) | 1276 | if (!dev_priv->enable_fb) |
1130 | vmw_svga_disable(dev_priv); | 1277 | vmw_svga_disable(dev_priv); |
1278 | |||
1279 | dev_priv->active_master = &dev_priv->fbdev_master; | ||
1280 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | ||
1281 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | ||
1131 | } | 1282 | } |
1132 | 1283 | ||
1133 | /** | 1284 | /** |
@@ -1406,6 +1557,8 @@ static struct drm_driver driver = { | |||
1406 | .disable_vblank = vmw_disable_vblank, | 1557 | .disable_vblank = vmw_disable_vblank, |
1407 | .ioctls = vmw_ioctls, | 1558 | .ioctls = vmw_ioctls, |
1408 | .num_ioctls = ARRAY_SIZE(vmw_ioctls), | 1559 | .num_ioctls = ARRAY_SIZE(vmw_ioctls), |
1560 | .master_create = vmw_master_create, | ||
1561 | .master_destroy = vmw_master_destroy, | ||
1409 | .master_set = vmw_master_set, | 1562 | .master_set = vmw_master_set, |
1410 | .master_drop = vmw_master_drop, | 1563 | .master_drop = vmw_master_drop, |
1411 | .open = vmw_driver_open, | 1564 | .open = vmw_driver_open, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 3a358a5495e4..366dcfc1f9bb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -44,9 +44,9 @@ | |||
44 | #include <linux/sync_file.h> | 44 | #include <linux/sync_file.h> |
45 | 45 | ||
46 | #define VMWGFX_DRIVER_NAME "vmwgfx" | 46 | #define VMWGFX_DRIVER_NAME "vmwgfx" |
47 | #define VMWGFX_DRIVER_DATE "20190328" | 47 | #define VMWGFX_DRIVER_DATE "20180704" |
48 | #define VMWGFX_DRIVER_MAJOR 2 | 48 | #define VMWGFX_DRIVER_MAJOR 2 |
49 | #define VMWGFX_DRIVER_MINOR 16 | 49 | #define VMWGFX_DRIVER_MINOR 15 |
50 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 50 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
51 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 51 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
52 | #define VMWGFX_MAX_RELOCATIONS 2048 | 52 | #define VMWGFX_MAX_RELOCATIONS 2048 |
@@ -81,30 +81,19 @@ | |||
81 | #define VMW_RES_SHADER ttm_driver_type4 | 81 | #define VMW_RES_SHADER ttm_driver_type4 |
82 | 82 | ||
83 | struct vmw_fpriv { | 83 | struct vmw_fpriv { |
84 | struct drm_master *locked_master; | ||
84 | struct ttm_object_file *tfile; | 85 | struct ttm_object_file *tfile; |
85 | bool gb_aware; /* user-space is guest-backed aware */ | 86 | bool gb_aware; /* user-space is guest-backed aware */ |
86 | }; | 87 | }; |
87 | 88 | ||
88 | /** | ||
89 | * struct vmw_buffer_object - TTM buffer object with vmwgfx additions | ||
90 | * @base: The TTM buffer object | ||
91 | * @res_tree: RB tree of resources using this buffer object as a backing MOB | ||
92 | * @pin_count: pin depth | ||
93 | * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB | ||
94 | * @map: Kmap object for semi-persistent mappings | ||
95 | * @res_prios: Eviction priority counts for attached resources | ||
96 | * @dirty: structure for user-space dirty-tracking | ||
97 | */ | ||
98 | struct vmw_buffer_object { | 89 | struct vmw_buffer_object { |
99 | struct ttm_buffer_object base; | 90 | struct ttm_buffer_object base; |
100 | struct rb_root res_tree; | 91 | struct list_head res_list; |
101 | s32 pin_count; | 92 | s32 pin_count; |
102 | /* Not ref-counted. Protected by binding_mutex */ | 93 | /* Not ref-counted. Protected by binding_mutex */ |
103 | struct vmw_resource *dx_query_ctx; | 94 | struct vmw_resource *dx_query_ctx; |
104 | /* Protected by reservation */ | 95 | /* Protected by reservation */ |
105 | struct ttm_bo_kmap_obj map; | 96 | struct ttm_bo_kmap_obj map; |
106 | u32 res_prios[TTM_MAX_BO_PRIORITY]; | ||
107 | struct vmw_bo_dirty *dirty; | ||
108 | }; | 97 | }; |
109 | 98 | ||
110 | /** | 99 | /** |
@@ -135,8 +124,7 @@ struct vmw_res_func; | |||
135 | * @res_dirty: Resource contains data not yet in the backup buffer. Protected | 124 | * @res_dirty: Resource contains data not yet in the backup buffer. Protected |
136 | * by resource reserved. | 125 | * by resource reserved. |
137 | * @backup_dirty: Backup buffer contains data not yet in the HW resource. | 126 | * @backup_dirty: Backup buffer contains data not yet in the HW resource. |
138 | * Protected by resource reserved. | 127 | * Protecte by resource reserved. |
139 | * @coherent: Emulate coherency by tracking vm accesses. | ||
140 | * @backup: The backup buffer if any. Protected by resource reserved. | 128 | * @backup: The backup buffer if any. Protected by resource reserved. |
141 | * @backup_offset: Offset into the backup buffer if any. Protected by resource | 129 | * @backup_offset: Offset into the backup buffer if any. Protected by resource |
142 | * reserved. Note that only a few resource types can have a @backup_offset | 130 | * reserved. Note that only a few resource types can have a @backup_offset |
@@ -145,32 +133,28 @@ struct vmw_res_func; | |||
145 | * pin-count greater than zero. It is not on the resource LRU lists and its | 133 | * pin-count greater than zero. It is not on the resource LRU lists and its |
146 | * backup buffer is pinned. Hence it can't be evicted. | 134 | * backup buffer is pinned. Hence it can't be evicted. |
147 | * @func: Method vtable for this resource. Immutable. | 135 | * @func: Method vtable for this resource. Immutable. |
148 | * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved. | ||
149 | * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. | 136 | * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. |
137 | * @mob_head: List head for the MOB backup list. Protected by @backup reserved. | ||
150 | * @binding_head: List head for the context binding list. Protected by | 138 | * @binding_head: List head for the context binding list. Protected by |
151 | * the @dev_priv::binding_mutex | 139 | * the @dev_priv::binding_mutex |
152 | * @res_free: The resource destructor. | 140 | * @res_free: The resource destructor. |
153 | * @hw_destroy: Callback to destroy the resource on the device, as part of | 141 | * @hw_destroy: Callback to destroy the resource on the device, as part of |
154 | * resource destruction. | 142 | * resource destruction. |
155 | */ | 143 | */ |
156 | struct vmw_resource_dirty; | ||
157 | struct vmw_resource { | 144 | struct vmw_resource { |
158 | struct kref kref; | 145 | struct kref kref; |
159 | struct vmw_private *dev_priv; | 146 | struct vmw_private *dev_priv; |
160 | int id; | 147 | int id; |
161 | u32 used_prio; | ||
162 | unsigned long backup_size; | 148 | unsigned long backup_size; |
163 | u32 res_dirty : 1; | 149 | bool res_dirty; |
164 | u32 backup_dirty : 1; | 150 | bool backup_dirty; |
165 | u32 coherent : 1; | ||
166 | struct vmw_buffer_object *backup; | 151 | struct vmw_buffer_object *backup; |
167 | unsigned long backup_offset; | 152 | unsigned long backup_offset; |
168 | unsigned long pin_count; | 153 | unsigned long pin_count; |
169 | const struct vmw_res_func *func; | 154 | const struct vmw_res_func *func; |
170 | struct rb_node mob_node; | ||
171 | struct list_head lru_head; | 155 | struct list_head lru_head; |
156 | struct list_head mob_head; | ||
172 | struct list_head binding_head; | 157 | struct list_head binding_head; |
173 | struct vmw_resource_dirty *dirty; | ||
174 | void (*res_free) (struct vmw_resource *res); | 158 | void (*res_free) (struct vmw_resource *res); |
175 | void (*hw_destroy) (struct vmw_resource *res); | 159 | void (*hw_destroy) (struct vmw_resource *res); |
176 | }; | 160 | }; |
@@ -392,6 +376,10 @@ struct vmw_sw_context{ | |||
392 | struct vmw_legacy_display; | 376 | struct vmw_legacy_display; |
393 | struct vmw_overlay; | 377 | struct vmw_overlay; |
394 | 378 | ||
379 | struct vmw_master { | ||
380 | struct ttm_lock lock; | ||
381 | }; | ||
382 | |||
395 | struct vmw_vga_topology_state { | 383 | struct vmw_vga_topology_state { |
396 | uint32_t width; | 384 | uint32_t width; |
397 | uint32_t height; | 385 | uint32_t height; |
@@ -554,8 +542,11 @@ struct vmw_private { | |||
554 | spinlock_t svga_lock; | 542 | spinlock_t svga_lock; |
555 | 543 | ||
556 | /** | 544 | /** |
557 | * PM management. | 545 | * Master management. |
558 | */ | 546 | */ |
547 | |||
548 | struct vmw_master *active_master; | ||
549 | struct vmw_master fbdev_master; | ||
559 | struct notifier_block pm_nb; | 550 | struct notifier_block pm_nb; |
560 | bool refuse_hibernation; | 551 | bool refuse_hibernation; |
561 | bool suspend_locked; | 552 | bool suspend_locked; |
@@ -604,9 +595,6 @@ struct vmw_private { | |||
604 | 595 | ||
605 | /* Validation memory reservation */ | 596 | /* Validation memory reservation */ |
606 | struct vmw_validation_mem vvm; | 597 | struct vmw_validation_mem vvm; |
607 | |||
608 | /* VM operations */ | ||
609 | struct vm_operations_struct vm_ops; | ||
610 | }; | 598 | }; |
611 | 599 | ||
612 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) | 600 | static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res) |
@@ -624,6 +612,11 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) | |||
624 | return (struct vmw_fpriv *)file_priv->driver_priv; | 612 | return (struct vmw_fpriv *)file_priv->driver_priv; |
625 | } | 613 | } |
626 | 614 | ||
615 | static inline struct vmw_master *vmw_master(struct drm_master *master) | ||
616 | { | ||
617 | return (struct vmw_master *) master->driver_priv; | ||
618 | } | ||
619 | |||
627 | /* | 620 | /* |
628 | * The locking here is fine-grained, so that it is performed once | 621 | * The locking here is fine-grained, so that it is performed once |
629 | * for every read- and write operation. This is of course costly, but we | 622 | * for every read- and write operation. This is of course costly, but we |
@@ -676,8 +669,7 @@ extern void vmw_resource_unreference(struct vmw_resource **p_res); | |||
676 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); | 669 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); |
677 | extern struct vmw_resource * | 670 | extern struct vmw_resource * |
678 | vmw_resource_reference_unless_doomed(struct vmw_resource *res); | 671 | vmw_resource_reference_unless_doomed(struct vmw_resource *res); |
679 | extern int vmw_resource_validate(struct vmw_resource *res, bool intr, | 672 | extern int vmw_resource_validate(struct vmw_resource *res, bool intr); |
680 | bool dirtying); | ||
681 | extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, | 673 | extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, |
682 | bool no_backup); | 674 | bool no_backup); |
683 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); | 675 | extern bool vmw_resource_needs_backup(const struct vmw_resource *res); |
@@ -717,23 +709,6 @@ extern void vmw_query_move_notify(struct ttm_buffer_object *bo, | |||
717 | extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); | 709 | extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob); |
718 | extern void vmw_resource_evict_all(struct vmw_private *dev_priv); | 710 | extern void vmw_resource_evict_all(struct vmw_private *dev_priv); |
719 | extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); | 711 | extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo); |
720 | void vmw_resource_mob_attach(struct vmw_resource *res); | ||
721 | void vmw_resource_mob_detach(struct vmw_resource *res); | ||
722 | void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, | ||
723 | pgoff_t end); | ||
724 | int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, | ||
725 | pgoff_t end, pgoff_t *num_prefault); | ||
726 | |||
727 | /** | ||
728 | * vmw_resource_mob_attached - Whether a resource currently has a mob attached | ||
729 | * @res: The resource | ||
730 | * | ||
731 | * Return: true if the resource has a mob attached, false otherwise. | ||
732 | */ | ||
733 | static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) | ||
734 | { | ||
735 | return !RB_EMPTY_NODE(&res->mob_node); | ||
736 | } | ||
737 | 712 | ||
738 | /** | 713 | /** |
739 | * vmw_user_resource_noref_release - release a user resource pointer looked up | 714 | * vmw_user_resource_noref_release - release a user resource pointer looked up |
@@ -812,54 +787,6 @@ static inline void vmw_user_bo_noref_release(void) | |||
812 | ttm_base_object_noref_release(); | 787 | ttm_base_object_noref_release(); |
813 | } | 788 | } |
814 | 789 | ||
815 | /** | ||
816 | * vmw_bo_adjust_prio - Adjust the buffer object eviction priority | ||
817 | * according to attached resources | ||
818 | * @vbo: The struct vmw_buffer_object | ||
819 | */ | ||
820 | static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo) | ||
821 | { | ||
822 | int i = ARRAY_SIZE(vbo->res_prios); | ||
823 | |||
824 | while (i--) { | ||
825 | if (vbo->res_prios[i]) { | ||
826 | vbo->base.priority = i; | ||
827 | return; | ||
828 | } | ||
829 | } | ||
830 | |||
831 | vbo->base.priority = 3; | ||
832 | } | ||
833 | |||
834 | /** | ||
835 | * vmw_bo_prio_add - Notify a buffer object of a newly attached resource | ||
836 | * eviction priority | ||
837 | * @vbo: The struct vmw_buffer_object | ||
838 | * @prio: The resource priority | ||
839 | * | ||
840 | * After being notified, the code assigns the highest resource eviction priority | ||
841 | * to the backing buffer object (mob). | ||
842 | */ | ||
843 | static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio) | ||
844 | { | ||
845 | if (vbo->res_prios[prio]++ == 0) | ||
846 | vmw_bo_prio_adjust(vbo); | ||
847 | } | ||
848 | |||
849 | /** | ||
850 | * vmw_bo_prio_del - Notify a buffer object of a resource with a certain | ||
851 | * priority being removed | ||
852 | * @vbo: The struct vmw_buffer_object | ||
853 | * @prio: The resource priority | ||
854 | * | ||
855 | * After being notified, the code assigns the highest resource eviction priority | ||
856 | * to the backing buffer object (mob). | ||
857 | */ | ||
858 | static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio) | ||
859 | { | ||
860 | if (--vbo->res_prios[prio] == 0) | ||
861 | vmw_bo_prio_adjust(vbo); | ||
862 | } | ||
863 | 790 | ||
864 | /** | 791 | /** |
865 | * Misc Ioctl functionality - vmwgfx_ioctl.c | 792 | * Misc Ioctl functionality - vmwgfx_ioctl.c |
@@ -1089,6 +1016,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, | |||
1089 | int vmw_kms_write_svga(struct vmw_private *vmw_priv, | 1016 | int vmw_kms_write_svga(struct vmw_private *vmw_priv, |
1090 | unsigned width, unsigned height, unsigned pitch, | 1017 | unsigned width, unsigned height, unsigned pitch, |
1091 | unsigned bpp, unsigned depth); | 1018 | unsigned bpp, unsigned depth); |
1019 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster); | ||
1092 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | 1020 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, |
1093 | uint32_t pitch, | 1021 | uint32_t pitch, |
1094 | uint32_t height); | 1022 | uint32_t height); |
@@ -1411,25 +1339,6 @@ int vmw_host_log(const char *log); | |||
1411 | DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) | 1339 | DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) |
1412 | 1340 | ||
1413 | /** | 1341 | /** |
1414 | * VMW_DEBUG_KMS - Debug output for kernel mode-setting | ||
1415 | * | ||
1416 | * This macro is for debugging vmwgfx mode-setting code. | ||
1417 | */ | ||
1418 | #define VMW_DEBUG_KMS(fmt, ...) \ | ||
1419 | DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) | ||
1420 | |||
1421 | /* Resource dirtying - vmwgfx_page_dirty.c */ | ||
1422 | void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo); | ||
1423 | int vmw_bo_dirty_add(struct vmw_buffer_object *vbo); | ||
1424 | void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); | ||
1425 | void vmw_bo_dirty_clear_res(struct vmw_resource *res); | ||
1426 | void vmw_bo_dirty_release(struct vmw_buffer_object *vbo); | ||
1427 | void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, | ||
1428 | pgoff_t start, pgoff_t end); | ||
1429 | vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); | ||
1430 | vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); | ||
1431 | |||
1432 | /** | ||
1433 | * Inline helper functions | 1342 | * Inline helper functions |
1434 | */ | 1343 | */ |
1435 | 1344 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 319c1ca35663..33533d126277 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -2560,6 +2560,7 @@ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, | |||
2560 | offsetof(typeof(*cmd), sid)); | 2560 | offsetof(typeof(*cmd), sid)); |
2561 | 2561 | ||
2562 | cmd = container_of(header, typeof(*cmd), header); | 2562 | cmd = container_of(header, typeof(*cmd), header); |
2563 | |||
2563 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 2564 | return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2564 | VMW_RES_DIRTY_NONE, user_surface_converter, | 2565 | VMW_RES_DIRTY_NONE, user_surface_converter, |
2565 | &cmd->sid, NULL); | 2566 | &cmd->sid, NULL); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e7222fa2cfdf..b97bc8e5944b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -1462,7 +1462,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, | |||
1462 | if (dev_priv->active_display_unit == vmw_du_screen_target && | 1462 | if (dev_priv->active_display_unit == vmw_du_screen_target && |
1463 | (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || | 1463 | (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width || |
1464 | drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { | 1464 | drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) { |
1465 | VMW_DEBUG_KMS("Screen size not supported.\n"); | 1465 | DRM_ERROR("Screen size not supported.\n"); |
1466 | return -EINVAL; | 1466 | return -EINVAL; |
1467 | } | 1467 | } |
1468 | 1468 | ||
@@ -1486,7 +1486,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, | |||
1486 | * limit on primary bounding box | 1486 | * limit on primary bounding box |
1487 | */ | 1487 | */ |
1488 | if (pixel_mem > dev_priv->prim_bb_mem) { | 1488 | if (pixel_mem > dev_priv->prim_bb_mem) { |
1489 | VMW_DEBUG_KMS("Combined output size too large.\n"); | 1489 | DRM_ERROR("Combined output size too large.\n"); |
1490 | return -EINVAL; | 1490 | return -EINVAL; |
1491 | } | 1491 | } |
1492 | 1492 | ||
@@ -1496,7 +1496,7 @@ static int vmw_kms_check_display_memory(struct drm_device *dev, | |||
1496 | bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4; | 1496 | bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4; |
1497 | 1497 | ||
1498 | if (bb_mem > dev_priv->prim_bb_mem) { | 1498 | if (bb_mem > dev_priv->prim_bb_mem) { |
1499 | VMW_DEBUG_KMS("Topology is beyond supported limits.\n"); | 1499 | DRM_ERROR("Topology is beyond supported limits.\n"); |
1500 | return -EINVAL; | 1500 | return -EINVAL; |
1501 | } | 1501 | } |
1502 | } | 1502 | } |
@@ -1645,7 +1645,6 @@ static int vmw_kms_check_topology(struct drm_device *dev, | |||
1645 | struct vmw_connector_state *vmw_conn_state; | 1645 | struct vmw_connector_state *vmw_conn_state; |
1646 | 1646 | ||
1647 | if (!du->pref_active && new_crtc_state->enable) { | 1647 | if (!du->pref_active && new_crtc_state->enable) { |
1648 | VMW_DEBUG_KMS("Enabling a disabled display unit\n"); | ||
1649 | ret = -EINVAL; | 1648 | ret = -EINVAL; |
1650 | goto clean; | 1649 | goto clean; |
1651 | } | 1650 | } |
@@ -1702,10 +1701,8 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev, | |||
1702 | return ret; | 1701 | return ret; |
1703 | 1702 | ||
1704 | ret = vmw_kms_check_implicit(dev, state); | 1703 | ret = vmw_kms_check_implicit(dev, state); |
1705 | if (ret) { | 1704 | if (ret) |
1706 | VMW_DEBUG_KMS("Invalid implicit state\n"); | ||
1707 | return ret; | 1705 | return ret; |
1708 | } | ||
1709 | 1706 | ||
1710 | if (!state->allow_modeset) | 1707 | if (!state->allow_modeset) |
1711 | return ret; | 1708 | return ret; |
@@ -2350,9 +2347,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2350 | 2347 | ||
2351 | if (!arg->num_outputs) { | 2348 | if (!arg->num_outputs) { |
2352 | struct drm_rect def_rect = {0, 0, 800, 600}; | 2349 | struct drm_rect def_rect = {0, 0, 800, 600}; |
2353 | VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n", | ||
2354 | def_rect.x1, def_rect.y1, | ||
2355 | def_rect.x2, def_rect.y2); | ||
2356 | vmw_du_update_layout(dev_priv, 1, &def_rect); | 2350 | vmw_du_update_layout(dev_priv, 1, &def_rect); |
2357 | return 0; | 2351 | return 0; |
2358 | } | 2352 | } |
@@ -2373,7 +2367,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2373 | 2367 | ||
2374 | drm_rects = (struct drm_rect *)rects; | 2368 | drm_rects = (struct drm_rect *)rects; |
2375 | 2369 | ||
2376 | VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs); | ||
2377 | for (i = 0; i < arg->num_outputs; i++) { | 2370 | for (i = 0; i < arg->num_outputs; i++) { |
2378 | struct drm_vmw_rect curr_rect; | 2371 | struct drm_vmw_rect curr_rect; |
2379 | 2372 | ||
@@ -2390,10 +2383,6 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2390 | drm_rects[i].x2 = curr_rect.x + curr_rect.w; | 2383 | drm_rects[i].x2 = curr_rect.x + curr_rect.w; |
2391 | drm_rects[i].y2 = curr_rect.y + curr_rect.h; | 2384 | drm_rects[i].y2 = curr_rect.y + curr_rect.h; |
2392 | 2385 | ||
2393 | VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n", | ||
2394 | drm_rects[i].x1, drm_rects[i].y1, | ||
2395 | drm_rects[i].x2, drm_rects[i].y2); | ||
2396 | |||
2397 | /* | 2386 | /* |
2398 | * Currently this check is limiting the topology within | 2387 | * Currently this check is limiting the topology within |
2399 | * mode_config->max (which actually is max texture size | 2388 | * mode_config->max (which actually is max texture size |
@@ -2404,9 +2393,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | |||
2404 | if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || | 2393 | if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 || |
2405 | drm_rects[i].x2 > mode_config->max_width || | 2394 | drm_rects[i].x2 > mode_config->max_width || |
2406 | drm_rects[i].y2 > mode_config->max_height) { | 2395 | drm_rects[i].y2 > mode_config->max_height) { |
2407 | VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n", | 2396 | DRM_ERROR("Invalid GUI layout.\n"); |
2408 | drm_rects[i].x1, drm_rects[i].y1, | ||
2409 | drm_rects[i].x2, drm_rects[i].y2); | ||
2410 | ret = -EINVAL; | 2397 | ret = -EINVAL; |
2411 | goto out_free; | 2398 | goto out_free; |
2412 | } | 2399 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c deleted file mode 100644 index 730c51e397dd..000000000000 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ /dev/null | |||
@@ -1,472 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 OR MIT | ||
2 | /************************************************************************** | ||
3 | * | ||
4 | * Copyright 2019 VMware, Inc., Palo Alto, CA., USA | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | #include "vmwgfx_drv.h" | ||
28 | |||
29 | /* | ||
30 | * Different methods for tracking dirty: | ||
31 | * VMW_BO_DIRTY_PAGETABLE - Scan the pagetable for hardware dirty bits | ||
32 | * VMW_BO_DIRTY_MKWRITE - Write-protect page table entries and record write- | ||
33 | * accesses in the VM mkwrite() callback | ||
34 | */ | ||
35 | enum vmw_bo_dirty_method { | ||
36 | VMW_BO_DIRTY_PAGETABLE, | ||
37 | VMW_BO_DIRTY_MKWRITE, | ||
38 | }; | ||
39 | |||
40 | /* | ||
41 | * No dirtied pages at scan trigger a transition to the _MKWRITE method, | ||
42 | * similarly a certain percentage of dirty pages trigger a transition to | ||
43 | * the _PAGETABLE method. How many triggers should we wait for before | ||
44 | * changing method? | ||
45 | */ | ||
46 | #define VMW_DIRTY_NUM_CHANGE_TRIGGERS 2 | ||
47 | |||
48 | /* Percentage to trigger a transition to the _PAGETABLE method */ | ||
49 | #define VMW_DIRTY_PERCENTAGE 10 | ||
50 | |||
51 | /** | ||
52 | * struct vmw_bo_dirty - Dirty information for buffer objects | ||
53 | * @start: First currently dirty bit | ||
54 | * @end: Last currently dirty bit + 1 | ||
55 | * @method: The currently used dirty method | ||
56 | * @change_count: Number of consecutive method change triggers | ||
57 | * @ref_count: Reference count for this structure | ||
58 | * @bitmap_size: The size of the bitmap in bits. Typically equal to the | ||
59 | * nuber of pages in the bo. | ||
60 | * @size: The accounting size for this struct. | ||
61 | * @bitmap: A bitmap where each bit represents a page. A set bit means a | ||
62 | * dirty page. | ||
63 | */ | ||
64 | struct vmw_bo_dirty { | ||
65 | unsigned long start; | ||
66 | unsigned long end; | ||
67 | enum vmw_bo_dirty_method method; | ||
68 | unsigned int change_count; | ||
69 | unsigned int ref_count; | ||
70 | unsigned long bitmap_size; | ||
71 | size_t size; | ||
72 | unsigned long bitmap[0]; | ||
73 | }; | ||
74 | |||
75 | /** | ||
76 | * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits | ||
77 | * @vbo: The buffer object to scan | ||
78 | * | ||
79 | * Scans the pagetable for dirty bits. Clear those bits and modify the | ||
80 | * dirty structure with the results. This function may change the | ||
81 | * dirty-tracking method. | ||
82 | */ | ||
83 | static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo) | ||
84 | { | ||
85 | struct vmw_bo_dirty *dirty = vbo->dirty; | ||
86 | pgoff_t offset = drm_vma_node_start(&vbo->base.vma_node); | ||
87 | struct address_space *mapping = vbo->base.bdev->dev_mapping; | ||
88 | pgoff_t num_marked; | ||
89 | |||
90 | num_marked = apply_as_clean(mapping, | ||
91 | offset, dirty->bitmap_size, | ||
92 | offset, &dirty->bitmap[0], | ||
93 | &dirty->start, &dirty->end); | ||
94 | if (num_marked == 0) | ||
95 | dirty->change_count++; | ||
96 | else | ||
97 | dirty->change_count = 0; | ||
98 | |||
99 | if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { | ||
100 | dirty->change_count = 0; | ||
101 | dirty->method = VMW_BO_DIRTY_MKWRITE; | ||
102 | apply_as_wrprotect(mapping, | ||
103 | offset, dirty->bitmap_size); | ||
104 | apply_as_clean(mapping, | ||
105 | offset, dirty->bitmap_size, | ||
106 | offset, &dirty->bitmap[0], | ||
107 | &dirty->start, &dirty->end); | ||
108 | } | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * vmw_bo_dirty_scan_mkwrite - Reset the mkwrite dirty-tracking method | ||
113 | * @vbo: The buffer object to scan | ||
114 | * | ||
115 | * Write-protect pages written to so that consecutive write accesses will | ||
116 | * trigger a call to mkwrite. | ||
117 | * | ||
118 | * This function may change the dirty-tracking method. | ||
119 | */ | ||
120 | static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo) | ||
121 | { | ||
122 | struct vmw_bo_dirty *dirty = vbo->dirty; | ||
123 | unsigned long offset = drm_vma_node_start(&vbo->base.vma_node); | ||
124 | struct address_space *mapping = vbo->base.bdev->dev_mapping; | ||
125 | pgoff_t num_marked; | ||
126 | |||
127 | if (dirty->end <= dirty->start) | ||
128 | return; | ||
129 | |||
130 | num_marked = apply_as_wrprotect(vbo->base.bdev->dev_mapping, | ||
131 | dirty->start + offset, | ||
132 | dirty->end - dirty->start); | ||
133 | |||
134 | if (100UL * num_marked / dirty->bitmap_size > | ||
135 | VMW_DIRTY_PERCENTAGE) { | ||
136 | dirty->change_count++; | ||
137 | } else { | ||
138 | dirty->change_count = 0; | ||
139 | } | ||
140 | |||
141 | if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) { | ||
142 | pgoff_t start = 0; | ||
143 | pgoff_t end = dirty->bitmap_size; | ||
144 | |||
145 | dirty->method = VMW_BO_DIRTY_PAGETABLE; | ||
146 | apply_as_clean(mapping, offset, end, offset, &dirty->bitmap[0], | ||
147 | &start, &end); | ||
148 | bitmap_clear(&dirty->bitmap[0], 0, dirty->bitmap_size); | ||
149 | if (dirty->start < dirty->end) | ||
150 | bitmap_set(&dirty->bitmap[0], dirty->start, | ||
151 | dirty->end - dirty->start); | ||
152 | dirty->change_count = 0; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * vmw_bo_dirty_scan - Scan for dirty pages and add them to the dirty | ||
158 | * tracking structure | ||
159 | * @vbo: The buffer object to scan | ||
160 | * | ||
161 | * This function may change the dirty tracking method. | ||
162 | */ | ||
163 | void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo) | ||
164 | { | ||
165 | struct vmw_bo_dirty *dirty = vbo->dirty; | ||
166 | |||
167 | if (dirty->method == VMW_BO_DIRTY_PAGETABLE) | ||
168 | vmw_bo_dirty_scan_pagetable(vbo); | ||
169 | else | ||
170 | vmw_bo_dirty_scan_mkwrite(vbo); | ||
171 | } | ||
172 | |||
173 | /** | ||
174 | * vmw_bo_dirty_pre_unmap - write-protect and pick up dirty pages before | ||
175 | * an unmap_mapping_range operation. | ||
176 | * @vbo: The buffer object, | ||
177 | * @start: First page of the range within the buffer object. | ||
178 | * @end: Last page of the range within the buffer object + 1. | ||
179 | * | ||
180 | * If we're using the _PAGETABLE scan method, we may leak dirty pages | ||
181 | * when calling unmap_mapping_range(). This function makes sure we pick | ||
182 | * up all dirty pages. | ||
183 | */ | ||
184 | static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo, | ||
185 | pgoff_t start, pgoff_t end) | ||
186 | { | ||
187 | struct vmw_bo_dirty *dirty = vbo->dirty; | ||
188 | unsigned long offset = drm_vma_node_start(&vbo->base.vma_node); | ||
189 | struct address_space *mapping = vbo->base.bdev->dev_mapping; | ||
190 | |||
191 | if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end) | ||
192 | return; | ||
193 | |||
194 | apply_as_wrprotect(mapping, start + offset, end - start); | ||
195 | apply_as_clean(mapping, start + offset, end - start, offset, | ||
196 | &dirty->bitmap[0], &dirty->start, &dirty->end); | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * vmw_bo_dirty_unmap - Clear all ptes pointing to a range within a bo | ||
201 | * @vbo: The buffer object, | ||
202 | * @start: First page of the range within the buffer object. | ||
203 | * @end: Last page of the range within the buffer object + 1. | ||
204 | * | ||
205 | * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange. | ||
206 | */ | ||
207 | void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, | ||
208 | pgoff_t start, pgoff_t end) | ||
209 | { | ||
210 | unsigned long offset = drm_vma_node_start(&vbo->base.vma_node); | ||
211 | struct address_space *mapping = vbo->base.bdev->dev_mapping; | ||
212 | |||
213 | vmw_bo_dirty_pre_unmap(vbo, start, end); | ||
214 | unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT, | ||
215 | (loff_t) (end - start) << PAGE_SHIFT); | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * vmw_bo_dirty_add - Add a dirty-tracking user to a buffer object | ||
220 | * @vbo: The buffer object | ||
221 | * | ||
222 | * This function registers a dirty-tracking user to a buffer object. | ||
223 | * A user can be for example a resource or a vma in a special user-space | ||
224 | * mapping. | ||
225 | * | ||
226 | * Return: Zero on success, -ENOMEM on memory allocation failure. | ||
227 | */ | ||
228 | int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) | ||
229 | { | ||
230 | struct vmw_bo_dirty *dirty = vbo->dirty; | ||
231 | pgoff_t num_pages = vbo->base.num_pages; | ||
232 | size_t size, acc_size; | ||
233 | int ret; | ||
234 | static struct ttm_operation_ctx ctx = { | ||
235 | .interruptible = false, | ||
236 | .no_wait_gpu = false | ||
237 | }; | ||
238 | |||
239 | if (dirty) { | ||
240 | dirty->ref_count++; | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | size = sizeof(*dirty) + BITS_TO_LONGS(num_pages) * sizeof(long); | ||
245 | acc_size = ttm_round_pot(size); | ||
246 | ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx); | ||
247 | if (ret) { | ||
248 | VMW_DEBUG_USER("Out of graphics memory for buffer object " | ||
249 | "dirty tracker.\n"); | ||
250 | return ret; | ||
251 | } | ||
252 | dirty = kvzalloc(size, GFP_KERNEL); | ||
253 | if (!dirty) { | ||
254 | ret = -ENOMEM; | ||
255 | goto out_no_dirty; | ||
256 | } | ||
257 | |||
258 | dirty->size = acc_size; | ||
259 | dirty->bitmap_size = num_pages; | ||
260 | dirty->start = dirty->bitmap_size; | ||
261 | dirty->end = 0; | ||
262 | dirty->ref_count = 1; | ||
263 | if (num_pages < PAGE_SIZE / sizeof(pte_t)) { | ||
264 | dirty->method = VMW_BO_DIRTY_PAGETABLE; | ||
265 | } else { | ||
266 | struct address_space *mapping = vbo->base.bdev->dev_mapping; | ||
267 | pgoff_t offset = drm_vma_node_start(&vbo->base.vma_node); | ||
268 | |||
269 | dirty->method = VMW_BO_DIRTY_MKWRITE; | ||
270 | |||
271 | /* Write-protect and then pick up already dirty bits */ | ||
272 | apply_as_wrprotect(mapping, offset, num_pages); | ||
273 | apply_as_clean(mapping, offset, num_pages, offset, | ||
274 | &dirty->bitmap[0], &dirty->start, &dirty->end); | ||
275 | } | ||
276 | |||
277 | vbo->dirty = dirty; | ||
278 | |||
279 | return 0; | ||
280 | |||
281 | out_no_dirty: | ||
282 | ttm_mem_global_free(&ttm_mem_glob, acc_size); | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | /** | ||
287 | * vmw_bo_dirty_release - Release a dirty-tracking user from a buffer object | ||
288 | * @vbo: The buffer object | ||
289 | * | ||
290 | * This function releases a dirty-tracking user from a buffer object. | ||
291 | * If the reference count reaches zero, then the dirty-tracking object is | ||
292 | * freed and the pointer to it cleared. | ||
293 | * | ||
294 | * Return: Zero on success, -ENOMEM on memory allocation failure. | ||
295 | */ | ||
296 | void vmw_bo_dirty_release(struct vmw_buffer_object *vbo) | ||
297 | { | ||
298 | struct vmw_bo_dirty *dirty = vbo->dirty; | ||
299 | |||
300 | if (dirty && --dirty->ref_count == 0) { | ||
301 | size_t acc_size = dirty->size; | ||
302 | |||
303 | kvfree(dirty); | ||
304 | ttm_mem_global_free(&ttm_mem_glob, acc_size); | ||
305 | vbo->dirty = NULL; | ||
306 | } | ||
307 | } | ||
308 | |||
309 | /** | ||
310 | * vmw_bo_dirty_transfer_to_res - Pick up a resource's dirty region from | ||
311 | * its backing mob. | ||
312 | * @res: The resource | ||
313 | * | ||
314 | * This function will pick up all dirty ranges affecting the resource from | ||
315 | * it's backup mob, and call vmw_resource_dirty_update() once for each | ||
316 | * range. The transferred ranges will be cleared from the backing mob's | ||
317 | * dirty tracking. | ||
318 | */ | ||
319 | void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) | ||
320 | { | ||
321 | struct vmw_buffer_object *vbo = res->backup; | ||
322 | struct vmw_bo_dirty *dirty = vbo->dirty; | ||
323 | pgoff_t start, cur, end; | ||
324 | unsigned long res_start = res->backup_offset; | ||
325 | unsigned long res_end = res->backup_offset + res->backup_size; | ||
326 | |||
327 | WARN_ON_ONCE(res_start & ~PAGE_MASK); | ||
328 | res_start >>= PAGE_SHIFT; | ||
329 | res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); | ||
330 | |||
331 | if (res_start >= dirty->end || res_end <= dirty->start) | ||
332 | return; | ||
333 | |||
334 | cur = max(res_start, dirty->start); | ||
335 | res_end = max(res_end, dirty->end); | ||
336 | while (cur < res_end) { | ||
337 | unsigned long num; | ||
338 | |||
339 | start = find_next_bit(&dirty->bitmap[0], res_end, cur); | ||
340 | if (start >= res_end) | ||
341 | break; | ||
342 | |||
343 | end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1); | ||
344 | cur = end + 1; | ||
345 | num = end - start; | ||
346 | bitmap_clear(&dirty->bitmap[0], start, num); | ||
347 | vmw_resource_dirty_update(res, start, end); | ||
348 | } | ||
349 | |||
350 | if (res_start <= dirty->start && res_end > dirty->start) | ||
351 | dirty->start = res_end; | ||
352 | if (res_start < dirty->end && res_end >= dirty->end) | ||
353 | dirty->end = res_start; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * vmw_bo_dirty_clear_res - Clear a resource's dirty region from | ||
358 | * its backing mob. | ||
359 | * @res: The resource | ||
360 | * | ||
361 | * This function will clear all dirty ranges affecting the resource from | ||
362 | * it's backup mob's dirty tracking. | ||
363 | */ | ||
364 | void vmw_bo_dirty_clear_res(struct vmw_resource *res) | ||
365 | { | ||
366 | unsigned long res_start = res->backup_offset; | ||
367 | unsigned long res_end = res->backup_offset + res->backup_size; | ||
368 | struct vmw_buffer_object *vbo = res->backup; | ||
369 | struct vmw_bo_dirty *dirty = vbo->dirty; | ||
370 | |||
371 | res_start >>= PAGE_SHIFT; | ||
372 | res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); | ||
373 | |||
374 | if (res_start >= dirty->end || res_end <= dirty->start) | ||
375 | return; | ||
376 | |||
377 | res_start = max(res_start, dirty->start); | ||
378 | res_end = min(res_end, dirty->end); | ||
379 | bitmap_clear(&dirty->bitmap[0], res_start, res_end - res_start); | ||
380 | |||
381 | if (res_start <= dirty->start && res_end > dirty->start) | ||
382 | dirty->start = res_end; | ||
383 | if (res_start < dirty->end && res_end >= dirty->end) | ||
384 | dirty->end = res_start; | ||
385 | } | ||
386 | |||
387 | vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) | ||
388 | { | ||
389 | struct vm_area_struct *vma = vmf->vma; | ||
390 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) | ||
391 | vma->vm_private_data; | ||
392 | vm_fault_t ret; | ||
393 | unsigned long page_offset; | ||
394 | struct vmw_buffer_object *vbo = | ||
395 | container_of(bo, typeof(*vbo), base); | ||
396 | |||
397 | ret = ttm_bo_vm_reserve(bo, vmf); | ||
398 | if (ret) | ||
399 | return ret; | ||
400 | |||
401 | page_offset = vmf->pgoff - drm_vma_node_start(&bo->vma_node); | ||
402 | if (unlikely(page_offset >= bo->num_pages)) { | ||
403 | ret = VM_FAULT_SIGBUS; | ||
404 | goto out_unlock; | ||
405 | } | ||
406 | |||
407 | if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE && | ||
408 | !test_bit(page_offset, &vbo->dirty->bitmap[0])) { | ||
409 | struct vmw_bo_dirty *dirty = vbo->dirty; | ||
410 | |||
411 | __set_bit(page_offset, &dirty->bitmap[0]); | ||
412 | dirty->start = min(dirty->start, page_offset); | ||
413 | dirty->end = max(dirty->end, page_offset + 1); | ||
414 | } | ||
415 | |||
416 | out_unlock: | ||
417 | reservation_object_unlock(bo->resv); | ||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) | ||
422 | { | ||
423 | struct vm_area_struct *vma = vmf->vma; | ||
424 | struct ttm_buffer_object *bo = (struct ttm_buffer_object *) | ||
425 | vma->vm_private_data; | ||
426 | struct vmw_buffer_object *vbo = | ||
427 | container_of(bo, struct vmw_buffer_object, base); | ||
428 | pgoff_t num_prefault; | ||
429 | pgprot_t prot; | ||
430 | vm_fault_t ret; | ||
431 | |||
432 | ret = ttm_bo_vm_reserve(bo, vmf); | ||
433 | if (ret) | ||
434 | return ret; | ||
435 | |||
436 | num_prefault = (vma->vm_flags & VM_RAND_READ) ? 1 : | ||
437 | TTM_BO_VM_NUM_PREFAULT; | ||
438 | |||
439 | if (vbo->dirty) { | ||
440 | pgoff_t allowed_prefault; | ||
441 | unsigned long page_offset; | ||
442 | |||
443 | page_offset = vmf->pgoff - drm_vma_node_start(&bo->vma_node); | ||
444 | if (page_offset >= bo->num_pages || | ||
445 | vmw_resources_clean(vbo, page_offset, | ||
446 | page_offset + PAGE_SIZE, | ||
447 | &allowed_prefault)) { | ||
448 | ret = VM_FAULT_SIGBUS; | ||
449 | goto out_unlock; | ||
450 | } | ||
451 | |||
452 | num_prefault = min(num_prefault, allowed_prefault); | ||
453 | } | ||
454 | |||
455 | /* | ||
456 | * If we don't track dirty using the MKWRITE method, make sure | ||
457 | * sure the page protection is write-enabled so we don't get | ||
458 | * a lot of unnecessary write faults. | ||
459 | */ | ||
460 | if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) | ||
461 | prot = vma->vm_page_prot; | ||
462 | else | ||
463 | prot = vm_get_page_prot(vma->vm_flags); | ||
464 | |||
465 | ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); | ||
466 | if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) | ||
467 | return ret; | ||
468 | |||
469 | out_unlock: | ||
470 | reservation_object_unlock(bo->resv); | ||
471 | return ret; | ||
472 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index d70ee0df5c13..1d38a8b2f2ec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -34,51 +34,6 @@ | |||
34 | 34 | ||
35 | #define VMW_RES_EVICT_ERR_COUNT 10 | 35 | #define VMW_RES_EVICT_ERR_COUNT 10 |
36 | 36 | ||
37 | /** | ||
38 | * vmw_resource_mob_attach - Mark a resource as attached to its backing mob | ||
39 | * @res: The resource | ||
40 | */ | ||
41 | void vmw_resource_mob_attach(struct vmw_resource *res) | ||
42 | { | ||
43 | struct vmw_buffer_object *backup = res->backup; | ||
44 | struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL; | ||
45 | |||
46 | lockdep_assert_held(&backup->base.resv->lock.base); | ||
47 | res->used_prio = (res->res_dirty) ? res->func->dirty_prio : | ||
48 | res->func->prio; | ||
49 | |||
50 | while (*new) { | ||
51 | struct vmw_resource *this = | ||
52 | container_of(*new, struct vmw_resource, mob_node); | ||
53 | |||
54 | parent = *new; | ||
55 | new = (res->backup_offset < this->backup_offset) ? | ||
56 | &((*new)->rb_left) : &((*new)->rb_right); | ||
57 | } | ||
58 | |||
59 | rb_link_node(&res->mob_node, parent, new); | ||
60 | rb_insert_color(&res->mob_node, &backup->res_tree); | ||
61 | |||
62 | vmw_bo_prio_add(backup, res->used_prio); | ||
63 | } | ||
64 | |||
65 | /** | ||
66 | * vmw_resource_mob_detach - Mark a resource as detached from its backing mob | ||
67 | * @res: The resource | ||
68 | */ | ||
69 | void vmw_resource_mob_detach(struct vmw_resource *res) | ||
70 | { | ||
71 | struct vmw_buffer_object *backup = res->backup; | ||
72 | |||
73 | lockdep_assert_held(&backup->base.resv->lock.base); | ||
74 | if (vmw_resource_mob_attached(res)) { | ||
75 | rb_erase(&res->mob_node, &backup->res_tree); | ||
76 | RB_CLEAR_NODE(&res->mob_node); | ||
77 | vmw_bo_prio_del(backup, res->used_prio); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | |||
82 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | 37 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) |
83 | { | 38 | { |
84 | kref_get(&res->kref); | 39 | kref_get(&res->kref); |
@@ -125,7 +80,7 @@ static void vmw_resource_release(struct kref *kref) | |||
125 | struct ttm_buffer_object *bo = &res->backup->base; | 80 | struct ttm_buffer_object *bo = &res->backup->base; |
126 | 81 | ||
127 | ttm_bo_reserve(bo, false, false, NULL); | 82 | ttm_bo_reserve(bo, false, false, NULL); |
128 | if (vmw_resource_mob_attached(res) && | 83 | if (!list_empty(&res->mob_head) && |
129 | res->func->unbind != NULL) { | 84 | res->func->unbind != NULL) { |
130 | struct ttm_validate_buffer val_buf; | 85 | struct ttm_validate_buffer val_buf; |
131 | 86 | ||
@@ -134,11 +89,7 @@ static void vmw_resource_release(struct kref *kref) | |||
134 | res->func->unbind(res, false, &val_buf); | 89 | res->func->unbind(res, false, &val_buf); |
135 | } | 90 | } |
136 | res->backup_dirty = false; | 91 | res->backup_dirty = false; |
137 | vmw_resource_mob_detach(res); | 92 | list_del_init(&res->mob_head); |
138 | if (res->dirty) | ||
139 | res->func->dirty_free(res); | ||
140 | if (res->coherent) | ||
141 | vmw_bo_dirty_release(res->backup); | ||
142 | ttm_bo_unreserve(bo); | 93 | ttm_bo_unreserve(bo); |
143 | vmw_bo_unreference(&res->backup); | 94 | vmw_bo_unreference(&res->backup); |
144 | } | 95 | } |
@@ -220,17 +171,14 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, | |||
220 | res->res_free = res_free; | 171 | res->res_free = res_free; |
221 | res->dev_priv = dev_priv; | 172 | res->dev_priv = dev_priv; |
222 | res->func = func; | 173 | res->func = func; |
223 | RB_CLEAR_NODE(&res->mob_node); | ||
224 | INIT_LIST_HEAD(&res->lru_head); | 174 | INIT_LIST_HEAD(&res->lru_head); |
175 | INIT_LIST_HEAD(&res->mob_head); | ||
225 | INIT_LIST_HEAD(&res->binding_head); | 176 | INIT_LIST_HEAD(&res->binding_head); |
226 | res->id = -1; | 177 | res->id = -1; |
227 | res->backup = NULL; | 178 | res->backup = NULL; |
228 | res->backup_offset = 0; | 179 | res->backup_offset = 0; |
229 | res->backup_dirty = false; | 180 | res->backup_dirty = false; |
230 | res->res_dirty = false; | 181 | res->res_dirty = false; |
231 | res->coherent = false; | ||
232 | res->used_prio = 3; | ||
233 | res->dirty = NULL; | ||
234 | if (delay_id) | 182 | if (delay_id) |
235 | return 0; | 183 | return 0; |
236 | else | 184 | else |
@@ -395,8 +343,7 @@ out_no_bo: | |||
395 | * should be retried once resources have been freed up. | 343 | * should be retried once resources have been freed up. |
396 | */ | 344 | */ |
397 | static int vmw_resource_do_validate(struct vmw_resource *res, | 345 | static int vmw_resource_do_validate(struct vmw_resource *res, |
398 | struct ttm_validate_buffer *val_buf, | 346 | struct ttm_validate_buffer *val_buf) |
399 | bool dirtying) | ||
400 | { | 347 | { |
401 | int ret = 0; | 348 | int ret = 0; |
402 | const struct vmw_res_func *func = res->func; | 349 | const struct vmw_res_func *func = res->func; |
@@ -408,47 +355,14 @@ static int vmw_resource_do_validate(struct vmw_resource *res, | |||
408 | } | 355 | } |
409 | 356 | ||
410 | if (func->bind && | 357 | if (func->bind && |
411 | ((func->needs_backup && !vmw_resource_mob_attached(res) && | 358 | ((func->needs_backup && list_empty(&res->mob_head) && |
412 | val_buf->bo != NULL) || | 359 | val_buf->bo != NULL) || |
413 | (!func->needs_backup && val_buf->bo != NULL))) { | 360 | (!func->needs_backup && val_buf->bo != NULL))) { |
414 | ret = func->bind(res, val_buf); | 361 | ret = func->bind(res, val_buf); |
415 | if (unlikely(ret != 0)) | 362 | if (unlikely(ret != 0)) |
416 | goto out_bind_failed; | 363 | goto out_bind_failed; |
417 | if (func->needs_backup) | 364 | if (func->needs_backup) |
418 | vmw_resource_mob_attach(res); | 365 | list_add_tail(&res->mob_head, &res->backup->res_list); |
419 | } | ||
420 | |||
421 | /* | ||
422 | * Handle the case where the backup mob is marked coherent but | ||
423 | * the resource isn't. | ||
424 | */ | ||
425 | if (func->dirty_alloc && vmw_resource_mob_attached(res) && | ||
426 | !res->coherent) { | ||
427 | if (res->backup->dirty && !res->dirty) { | ||
428 | ret = func->dirty_alloc(res); | ||
429 | if (ret) | ||
430 | return ret; | ||
431 | } else if (!res->backup->dirty && res->dirty) { | ||
432 | func->dirty_free(res); | ||
433 | } | ||
434 | } | ||
435 | |||
436 | /* | ||
437 | * Transfer the dirty regions to the resource and update | ||
438 | * the resource. | ||
439 | */ | ||
440 | if (res->dirty) { | ||
441 | if (dirtying && !res->res_dirty) { | ||
442 | pgoff_t start = res->backup_offset >> PAGE_SHIFT; | ||
443 | pgoff_t end = __KERNEL_DIV_ROUND_UP | ||
444 | (res->backup_offset + res->backup_size, | ||
445 | PAGE_SIZE); | ||
446 | |||
447 | vmw_bo_dirty_unmap(res->backup, start, end); | ||
448 | } | ||
449 | |||
450 | vmw_bo_dirty_transfer_to_res(res); | ||
451 | return func->dirty_sync(res); | ||
452 | } | 366 | } |
453 | 367 | ||
454 | return 0; | 368 | return 0; |
@@ -488,29 +402,19 @@ void vmw_resource_unreserve(struct vmw_resource *res, | |||
488 | 402 | ||
489 | if (switch_backup && new_backup != res->backup) { | 403 | if (switch_backup && new_backup != res->backup) { |
490 | if (res->backup) { | 404 | if (res->backup) { |
491 | vmw_resource_mob_detach(res); | 405 | lockdep_assert_held(&res->backup->base.resv->lock.base); |
492 | if (res->coherent) | 406 | list_del_init(&res->mob_head); |
493 | vmw_bo_dirty_release(res->backup); | ||
494 | vmw_bo_unreference(&res->backup); | 407 | vmw_bo_unreference(&res->backup); |
495 | } | 408 | } |
496 | 409 | ||
497 | if (new_backup) { | 410 | if (new_backup) { |
498 | res->backup = vmw_bo_reference(new_backup); | 411 | res->backup = vmw_bo_reference(new_backup); |
499 | 412 | lockdep_assert_held(&new_backup->base.resv->lock.base); | |
500 | /* | 413 | list_add_tail(&res->mob_head, &new_backup->res_list); |
501 | * The validation code should already have added a | ||
502 | * dirty tracker here. | ||
503 | */ | ||
504 | WARN_ON(res->coherent && !new_backup->dirty); | ||
505 | |||
506 | vmw_resource_mob_attach(res); | ||
507 | } else { | 414 | } else { |
508 | res->backup = NULL; | 415 | res->backup = NULL; |
509 | } | 416 | } |
510 | } else if (switch_backup && res->coherent) { | ||
511 | vmw_bo_dirty_release(res->backup); | ||
512 | } | 417 | } |
513 | |||
514 | if (switch_backup) | 418 | if (switch_backup) |
515 | res->backup_offset = new_backup_offset; | 419 | res->backup_offset = new_backup_offset; |
516 | 420 | ||
@@ -565,7 +469,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, | |||
565 | if (unlikely(ret != 0)) | 469 | if (unlikely(ret != 0)) |
566 | goto out_no_reserve; | 470 | goto out_no_reserve; |
567 | 471 | ||
568 | if (res->func->needs_backup && !vmw_resource_mob_attached(res)) | 472 | if (res->func->needs_backup && list_empty(&res->mob_head)) |
569 | return 0; | 473 | return 0; |
570 | 474 | ||
571 | backup_dirty = res->backup_dirty; | 475 | backup_dirty = res->backup_dirty; |
@@ -670,11 +574,11 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, | |||
670 | return ret; | 574 | return ret; |
671 | 575 | ||
672 | if (unlikely(func->unbind != NULL && | 576 | if (unlikely(func->unbind != NULL && |
673 | (!func->needs_backup || vmw_resource_mob_attached(res)))) { | 577 | (!func->needs_backup || !list_empty(&res->mob_head)))) { |
674 | ret = func->unbind(res, res->res_dirty, &val_buf); | 578 | ret = func->unbind(res, res->res_dirty, &val_buf); |
675 | if (unlikely(ret != 0)) | 579 | if (unlikely(ret != 0)) |
676 | goto out_no_unbind; | 580 | goto out_no_unbind; |
677 | vmw_resource_mob_detach(res); | 581 | list_del_init(&res->mob_head); |
678 | } | 582 | } |
679 | ret = func->destroy(res); | 583 | ret = func->destroy(res); |
680 | res->backup_dirty = true; | 584 | res->backup_dirty = true; |
@@ -691,7 +595,6 @@ out_no_unbind: | |||
691 | * to the device. | 595 | * to the device. |
692 | * @res: The resource to make visible to the device. | 596 | * @res: The resource to make visible to the device. |
693 | * @intr: Perform waits interruptible if possible. | 597 | * @intr: Perform waits interruptible if possible. |
694 | * @dirtying: Pending GPU operation will dirty the resource | ||
695 | * | 598 | * |
696 | * On succesful return, any backup DMA buffer pointed to by @res->backup will | 599 | * On succesful return, any backup DMA buffer pointed to by @res->backup will |
697 | * be reserved and validated. | 600 | * be reserved and validated. |
@@ -701,8 +604,7 @@ out_no_unbind: | |||
701 | * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code | 604 | * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code |
702 | * on failure. | 605 | * on failure. |
703 | */ | 606 | */ |
704 | int vmw_resource_validate(struct vmw_resource *res, bool intr, | 607 | int vmw_resource_validate(struct vmw_resource *res, bool intr) |
705 | bool dirtying) | ||
706 | { | 608 | { |
707 | int ret; | 609 | int ret; |
708 | struct vmw_resource *evict_res; | 610 | struct vmw_resource *evict_res; |
@@ -719,7 +621,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, | |||
719 | if (res->backup) | 621 | if (res->backup) |
720 | val_buf.bo = &res->backup->base; | 622 | val_buf.bo = &res->backup->base; |
721 | do { | 623 | do { |
722 | ret = vmw_resource_do_validate(res, &val_buf, dirtying); | 624 | ret = vmw_resource_do_validate(res, &val_buf); |
723 | if (likely(ret != -EBUSY)) | 625 | if (likely(ret != -EBUSY)) |
724 | break; | 626 | break; |
725 | 627 | ||
@@ -758,7 +660,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, | |||
758 | if (unlikely(ret != 0)) | 660 | if (unlikely(ret != 0)) |
759 | goto out_no_validate; | 661 | goto out_no_validate; |
760 | else if (!res->func->needs_backup && res->backup) { | 662 | else if (!res->func->needs_backup && res->backup) { |
761 | WARN_ON_ONCE(vmw_resource_mob_attached(res)); | 663 | list_del_init(&res->mob_head); |
762 | vmw_bo_unreference(&res->backup); | 664 | vmw_bo_unreference(&res->backup); |
763 | } | 665 | } |
764 | 666 | ||
@@ -782,23 +684,22 @@ out_no_validate: | |||
782 | */ | 684 | */ |
783 | void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) | 685 | void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) |
784 | { | 686 | { |
687 | |||
688 | struct vmw_resource *res, *next; | ||
785 | struct ttm_validate_buffer val_buf = { | 689 | struct ttm_validate_buffer val_buf = { |
786 | .bo = &vbo->base, | 690 | .bo = &vbo->base, |
787 | .num_shared = 0 | 691 | .num_shared = 0 |
788 | }; | 692 | }; |
789 | 693 | ||
790 | lockdep_assert_held(&vbo->base.resv->lock.base); | 694 | lockdep_assert_held(&vbo->base.resv->lock.base); |
791 | while (!RB_EMPTY_ROOT(&vbo->res_tree)) { | 695 | list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { |
792 | struct rb_node *node = vbo->res_tree.rb_node; | 696 | if (!res->func->unbind) |
793 | struct vmw_resource *res = | 697 | continue; |
794 | container_of(node, struct vmw_resource, mob_node); | ||
795 | |||
796 | if (!WARN_ON_ONCE(!res->func->unbind)) | ||
797 | (void) res->func->unbind(res, res->res_dirty, &val_buf); | ||
798 | 698 | ||
699 | (void) res->func->unbind(res, res->res_dirty, &val_buf); | ||
799 | res->backup_dirty = true; | 700 | res->backup_dirty = true; |
800 | res->res_dirty = false; | 701 | res->res_dirty = false; |
801 | vmw_resource_mob_detach(res); | 702 | list_del_init(&res->mob_head); |
802 | } | 703 | } |
803 | 704 | ||
804 | (void) ttm_bo_wait(&vbo->base, false, false); | 705 | (void) ttm_bo_wait(&vbo->base, false, false); |
@@ -1019,7 +920,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible) | |||
1019 | /* Do we really need to pin the MOB as well? */ | 920 | /* Do we really need to pin the MOB as well? */ |
1020 | vmw_bo_pin_reserved(vbo, true); | 921 | vmw_bo_pin_reserved(vbo, true); |
1021 | } | 922 | } |
1022 | ret = vmw_resource_validate(res, interruptible, true); | 923 | ret = vmw_resource_validate(res, interruptible); |
1023 | if (vbo) | 924 | if (vbo) |
1024 | ttm_bo_unreserve(&vbo->base); | 925 | ttm_bo_unreserve(&vbo->base); |
1025 | if (ret) | 926 | if (ret) |
@@ -1079,101 +980,3 @@ enum vmw_res_type vmw_res_type(const struct vmw_resource *res) | |||
1079 | { | 980 | { |
1080 | return res->func->res_type; | 981 | return res->func->res_type; |
1081 | } | 982 | } |
1082 | |||
1083 | /** | ||
1084 | * vmw_resource_update_dirty - Update a resource's dirty tracker with a | ||
1085 | * sequential range of touched backing store memory. | ||
1086 | * @res: The resource. | ||
1087 | * @start: The first page touched. | ||
1088 | * @end: The last page touched + 1. | ||
1089 | */ | ||
1090 | void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, | ||
1091 | pgoff_t end) | ||
1092 | { | ||
1093 | if (res->dirty) | ||
1094 | res->func->dirty_range_add(res, start << PAGE_SHIFT, | ||
1095 | end << PAGE_SHIFT); | ||
1096 | } | ||
1097 | |||
1098 | /** | ||
1099 | * vmw_resources_clean - Clean resources intersecting a mob range | ||
1100 | * @vbo: The mob buffer object | ||
1101 | * @start: The mob page offset starting the range | ||
1102 | * @end: The mob page offset ending the range | ||
1103 | * @num_prefault: Returns how many pages including the first have been | ||
1104 | * cleaned and are ok to prefault | ||
1105 | */ | ||
1106 | int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, | ||
1107 | pgoff_t end, pgoff_t *num_prefault) | ||
1108 | { | ||
1109 | struct rb_node *cur = vbo->res_tree.rb_node; | ||
1110 | struct vmw_resource *found = NULL; | ||
1111 | unsigned long res_start = start << PAGE_SHIFT; | ||
1112 | unsigned long res_end = end << PAGE_SHIFT; | ||
1113 | unsigned long last_cleaned = 0; | ||
1114 | |||
1115 | /* | ||
1116 | * Find the resource with lowest backup_offset that intersects the | ||
1117 | * range. | ||
1118 | */ | ||
1119 | while (cur) { | ||
1120 | struct vmw_resource *cur_res = | ||
1121 | container_of(cur, struct vmw_resource, mob_node); | ||
1122 | |||
1123 | if (cur_res->backup_offset >= res_end) { | ||
1124 | cur = cur->rb_left; | ||
1125 | } else if (cur_res->backup_offset + cur_res->backup_size <= | ||
1126 | res_start) { | ||
1127 | cur = cur->rb_right; | ||
1128 | } else { | ||
1129 | found = cur_res; | ||
1130 | cur = cur->rb_left; | ||
1131 | /* Continue to look for resources with lower offsets */ | ||
1132 | } | ||
1133 | } | ||
1134 | |||
1135 | /* | ||
1136 | * In order of increasing backup_offset, clean dirty resorces | ||
1137 | * intersecting the range. | ||
1138 | */ | ||
1139 | while (found) { | ||
1140 | if (found->res_dirty) { | ||
1141 | int ret; | ||
1142 | |||
1143 | if (!found->func->clean) | ||
1144 | return -EINVAL; | ||
1145 | |||
1146 | ret = found->func->clean(found); | ||
1147 | if (ret) | ||
1148 | return ret; | ||
1149 | |||
1150 | found->res_dirty = false; | ||
1151 | } | ||
1152 | last_cleaned = found->backup_offset + found->backup_size; | ||
1153 | cur = rb_next(&found->mob_node); | ||
1154 | if (!cur) | ||
1155 | break; | ||
1156 | |||
1157 | found = container_of(cur, struct vmw_resource, mob_node); | ||
1158 | if (found->backup_offset >= res_end) | ||
1159 | break; | ||
1160 | } | ||
1161 | |||
1162 | /* | ||
1163 | * Set number of pages allowed prefaulting and fence the buffer object | ||
1164 | */ | ||
1165 | *num_prefault = 1; | ||
1166 | if (last_cleaned > res_start) { | ||
1167 | struct ttm_buffer_object *bo = &vbo->base; | ||
1168 | |||
1169 | *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, | ||
1170 | PAGE_SIZE); | ||
1171 | vmw_bo_fence_single(bo, NULL); | ||
1172 | if (bo->moving) | ||
1173 | dma_fence_put(bo->moving); | ||
1174 | bo->moving = dma_fence_get | ||
1175 | (reservation_object_get_excl(bo->resv)); | ||
1176 | } | ||
1177 | |||
1178 | return 0; | ||
1179 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h index 3b7438b2d289..7e19eba0b0b8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h | |||
@@ -71,13 +71,6 @@ struct vmw_user_resource_conv { | |||
71 | * @commit_notify: If the resource is a command buffer managed resource, | 71 | * @commit_notify: If the resource is a command buffer managed resource, |
72 | * callback to notify that a define or remove command | 72 | * callback to notify that a define or remove command |
73 | * has been committed to the device. | 73 | * has been committed to the device. |
74 | * @dirty_alloc: Allocate a dirty tracker. NULL if dirty-tracking is not | ||
75 | * supported. | ||
76 | * @dirty_free: Free the dirty tracker. | ||
77 | * @dirty_sync: Upload the dirty mob contents to the resource. | ||
78 | * @dirty_add_range: Add a sequential dirty range to the resource | ||
79 | * dirty tracker. | ||
80 | * @clean: Clean the resource. | ||
81 | */ | 74 | */ |
82 | struct vmw_res_func { | 75 | struct vmw_res_func { |
83 | enum vmw_res_type res_type; | 76 | enum vmw_res_type res_type; |
@@ -85,8 +78,6 @@ struct vmw_res_func { | |||
85 | const char *type_name; | 78 | const char *type_name; |
86 | struct ttm_placement *backup_placement; | 79 | struct ttm_placement *backup_placement; |
87 | bool may_evict; | 80 | bool may_evict; |
88 | u32 prio; | ||
89 | u32 dirty_prio; | ||
90 | 81 | ||
91 | int (*create) (struct vmw_resource *res); | 82 | int (*create) (struct vmw_resource *res); |
92 | int (*destroy) (struct vmw_resource *res); | 83 | int (*destroy) (struct vmw_resource *res); |
@@ -97,12 +88,6 @@ struct vmw_res_func { | |||
97 | struct ttm_validate_buffer *val_buf); | 88 | struct ttm_validate_buffer *val_buf); |
98 | void (*commit_notify)(struct vmw_resource *res, | 89 | void (*commit_notify)(struct vmw_resource *res, |
99 | enum vmw_cmdbuf_res_state state); | 90 | enum vmw_cmdbuf_res_state state); |
100 | int (*dirty_alloc)(struct vmw_resource *res); | ||
101 | void (*dirty_free)(struct vmw_resource *res); | ||
102 | int (*dirty_sync)(struct vmw_resource *res); | ||
103 | void (*dirty_range_add)(struct vmw_resource *res, size_t start, | ||
104 | size_t end); | ||
105 | int (*clean)(struct vmw_resource *res); | ||
106 | }; | 91 | }; |
107 | 92 | ||
108 | /** | 93 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index e139fdfd1635..d310d21f0d54 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c | |||
@@ -95,8 +95,6 @@ static const struct vmw_res_func vmw_gb_shader_func = { | |||
95 | .res_type = vmw_res_shader, | 95 | .res_type = vmw_res_shader, |
96 | .needs_backup = true, | 96 | .needs_backup = true, |
97 | .may_evict = true, | 97 | .may_evict = true, |
98 | .prio = 3, | ||
99 | .dirty_prio = 3, | ||
100 | .type_name = "guest backed shaders", | 98 | .type_name = "guest backed shaders", |
101 | .backup_placement = &vmw_mob_placement, | 99 | .backup_placement = &vmw_mob_placement, |
102 | .create = vmw_gb_shader_create, | 100 | .create = vmw_gb_shader_create, |
@@ -108,9 +106,7 @@ static const struct vmw_res_func vmw_gb_shader_func = { | |||
108 | static const struct vmw_res_func vmw_dx_shader_func = { | 106 | static const struct vmw_res_func vmw_dx_shader_func = { |
109 | .res_type = vmw_res_shader, | 107 | .res_type = vmw_res_shader, |
110 | .needs_backup = true, | 108 | .needs_backup = true, |
111 | .may_evict = true, | 109 | .may_evict = false, |
112 | .prio = 3, | ||
113 | .dirty_prio = 3, | ||
114 | .type_name = "dx shaders", | 110 | .type_name = "dx shaders", |
115 | .backup_placement = &vmw_mob_placement, | 111 | .backup_placement = &vmw_mob_placement, |
116 | .create = vmw_dx_shader_create, | 112 | .create = vmw_dx_shader_create, |
@@ -427,7 +423,7 @@ static int vmw_dx_shader_create(struct vmw_resource *res) | |||
427 | 423 | ||
428 | WARN_ON_ONCE(!shader->committed); | 424 | WARN_ON_ONCE(!shader->committed); |
429 | 425 | ||
430 | if (vmw_resource_mob_attached(res)) { | 426 | if (!list_empty(&res->mob_head)) { |
431 | mutex_lock(&dev_priv->binding_mutex); | 427 | mutex_lock(&dev_priv->binding_mutex); |
432 | ret = vmw_dx_shader_unscrub(res); | 428 | ret = vmw_dx_shader_unscrub(res); |
433 | mutex_unlock(&dev_priv->binding_mutex); | 429 | mutex_unlock(&dev_priv->binding_mutex); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 862ca44680ca..219471903bc1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | |||
@@ -68,20 +68,6 @@ struct vmw_surface_offset { | |||
68 | uint32_t bo_offset; | 68 | uint32_t bo_offset; |
69 | }; | 69 | }; |
70 | 70 | ||
71 | /** | ||
72 | * vmw_surface_dirty - Surface dirty-tracker | ||
73 | * @cache: Cached layout information of the surface. | ||
74 | * @size: Accounting size for the struct vmw_surface_dirty. | ||
75 | * @num_subres: Number of subresources. | ||
76 | * @boxes: Array of SVGA3dBoxes indicating dirty regions. One per subresource. | ||
77 | */ | ||
78 | struct vmw_surface_dirty { | ||
79 | struct svga3dsurface_cache cache; | ||
80 | size_t size; | ||
81 | u32 num_subres; | ||
82 | SVGA3dBox boxes[0]; | ||
83 | }; | ||
84 | |||
85 | static void vmw_user_surface_free(struct vmw_resource *res); | 71 | static void vmw_user_surface_free(struct vmw_resource *res); |
86 | static struct vmw_resource * | 72 | static struct vmw_resource * |
87 | vmw_user_surface_base_to_res(struct ttm_base_object *base); | 73 | vmw_user_surface_base_to_res(struct ttm_base_object *base); |
@@ -110,13 +96,6 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, | |||
110 | struct drm_vmw_gb_surface_ref_ext_rep *rep, | 96 | struct drm_vmw_gb_surface_ref_ext_rep *rep, |
111 | struct drm_file *file_priv); | 97 | struct drm_file *file_priv); |
112 | 98 | ||
113 | static void vmw_surface_dirty_free(struct vmw_resource *res); | ||
114 | static int vmw_surface_dirty_alloc(struct vmw_resource *res); | ||
115 | static int vmw_surface_dirty_sync(struct vmw_resource *res); | ||
116 | static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, | ||
117 | size_t end); | ||
118 | static int vmw_surface_clean(struct vmw_resource *res); | ||
119 | |||
120 | static const struct vmw_user_resource_conv user_surface_conv = { | 99 | static const struct vmw_user_resource_conv user_surface_conv = { |
121 | .object_type = VMW_RES_SURFACE, | 100 | .object_type = VMW_RES_SURFACE, |
122 | .base_obj_to_res = vmw_user_surface_base_to_res, | 101 | .base_obj_to_res = vmw_user_surface_base_to_res, |
@@ -133,8 +112,6 @@ static const struct vmw_res_func vmw_legacy_surface_func = { | |||
133 | .res_type = vmw_res_surface, | 112 | .res_type = vmw_res_surface, |
134 | .needs_backup = false, | 113 | .needs_backup = false, |
135 | .may_evict = true, | 114 | .may_evict = true, |
136 | .prio = 1, | ||
137 | .dirty_prio = 1, | ||
138 | .type_name = "legacy surfaces", | 115 | .type_name = "legacy surfaces", |
139 | .backup_placement = &vmw_srf_placement, | 116 | .backup_placement = &vmw_srf_placement, |
140 | .create = &vmw_legacy_srf_create, | 117 | .create = &vmw_legacy_srf_create, |
@@ -147,19 +124,12 @@ static const struct vmw_res_func vmw_gb_surface_func = { | |||
147 | .res_type = vmw_res_surface, | 124 | .res_type = vmw_res_surface, |
148 | .needs_backup = true, | 125 | .needs_backup = true, |
149 | .may_evict = true, | 126 | .may_evict = true, |
150 | .prio = 1, | ||
151 | .dirty_prio = 2, | ||
152 | .type_name = "guest backed surfaces", | 127 | .type_name = "guest backed surfaces", |
153 | .backup_placement = &vmw_mob_placement, | 128 | .backup_placement = &vmw_mob_placement, |
154 | .create = vmw_gb_surface_create, | 129 | .create = vmw_gb_surface_create, |
155 | .destroy = vmw_gb_surface_destroy, | 130 | .destroy = vmw_gb_surface_destroy, |
156 | .bind = vmw_gb_surface_bind, | 131 | .bind = vmw_gb_surface_bind, |
157 | .unbind = vmw_gb_surface_unbind, | 132 | .unbind = vmw_gb_surface_unbind |
158 | .dirty_alloc = vmw_surface_dirty_alloc, | ||
159 | .dirty_free = vmw_surface_dirty_free, | ||
160 | .dirty_sync = vmw_surface_dirty_sync, | ||
161 | .dirty_range_add = vmw_surface_dirty_range_add, | ||
162 | .clean = vmw_surface_clean, | ||
163 | }; | 133 | }; |
164 | 134 | ||
165 | /** | 135 | /** |
@@ -667,7 +637,6 @@ static void vmw_user_surface_free(struct vmw_resource *res) | |||
667 | struct vmw_private *dev_priv = srf->res.dev_priv; | 637 | struct vmw_private *dev_priv = srf->res.dev_priv; |
668 | uint32_t size = user_srf->size; | 638 | uint32_t size = user_srf->size; |
669 | 639 | ||
670 | WARN_ON_ONCE(res->dirty); | ||
671 | if (user_srf->master) | 640 | if (user_srf->master) |
672 | drm_master_put(&user_srf->master); | 641 | drm_master_put(&user_srf->master); |
673 | kfree(srf->offsets); | 642 | kfree(srf->offsets); |
@@ -946,6 +915,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, | |||
946 | if (unlikely(drm_is_render_client(file_priv))) | 915 | if (unlikely(drm_is_render_client(file_priv))) |
947 | require_exist = true; | 916 | require_exist = true; |
948 | 917 | ||
918 | if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) { | ||
919 | DRM_ERROR("Locked master refused legacy " | ||
920 | "surface reference.\n"); | ||
921 | return -EACCES; | ||
922 | } | ||
923 | |||
949 | handle = u_handle; | 924 | handle = u_handle; |
950 | } | 925 | } |
951 | 926 | ||
@@ -1195,16 +1170,10 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, | |||
1195 | cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; | 1170 | cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; |
1196 | cmd2->header.size = sizeof(cmd2->body); | 1171 | cmd2->header.size = sizeof(cmd2->body); |
1197 | cmd2->body.sid = res->id; | 1172 | cmd2->body.sid = res->id; |
1173 | res->backup_dirty = false; | ||
1198 | } | 1174 | } |
1199 | vmw_fifo_commit(dev_priv, submit_size); | 1175 | vmw_fifo_commit(dev_priv, submit_size); |
1200 | 1176 | ||
1201 | if (res->backup->dirty && res->backup_dirty) { | ||
1202 | /* We've just made a full upload. Cear dirty regions. */ | ||
1203 | vmw_bo_dirty_clear_res(res); | ||
1204 | } | ||
1205 | |||
1206 | res->backup_dirty = false; | ||
1207 | |||
1208 | return 0; | 1177 | return 0; |
1209 | } | 1178 | } |
1210 | 1179 | ||
@@ -1669,8 +1638,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, | |||
1669 | } | 1638 | } |
1670 | } | 1639 | } |
1671 | } else if (req->base.drm_surface_flags & | 1640 | } else if (req->base.drm_surface_flags & |
1672 | (drm_vmw_surface_flag_create_buffer | | 1641 | drm_vmw_surface_flag_create_buffer) |
1673 | drm_vmw_surface_flag_coherent)) | ||
1674 | ret = vmw_user_bo_alloc(dev_priv, tfile, | 1642 | ret = vmw_user_bo_alloc(dev_priv, tfile, |
1675 | res->backup_size, | 1643 | res->backup_size, |
1676 | req->base.drm_surface_flags & | 1644 | req->base.drm_surface_flags & |
@@ -1684,26 +1652,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, | |||
1684 | goto out_unlock; | 1652 | goto out_unlock; |
1685 | } | 1653 | } |
1686 | 1654 | ||
1687 | if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) { | ||
1688 | struct vmw_buffer_object *backup = res->backup; | ||
1689 | |||
1690 | ttm_bo_reserve(&backup->base, false, false, NULL); | ||
1691 | if (!res->func->dirty_alloc) | ||
1692 | ret = -EINVAL; | ||
1693 | if (!ret) | ||
1694 | ret = vmw_bo_dirty_add(backup); | ||
1695 | if (!ret) { | ||
1696 | res->coherent = true; | ||
1697 | ret = res->func->dirty_alloc(res); | ||
1698 | } | ||
1699 | ttm_bo_unreserve(&backup->base); | ||
1700 | if (ret) { | ||
1701 | vmw_resource_unreference(&res); | ||
1702 | goto out_unlock; | ||
1703 | } | ||
1704 | |||
1705 | } | ||
1706 | |||
1707 | tmp = vmw_resource_reference(res); | 1655 | tmp = vmw_resource_reference(res); |
1708 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, | 1656 | ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, |
1709 | req->base.drm_surface_flags & | 1657 | req->base.drm_surface_flags & |
@@ -1812,338 +1760,3 @@ out_bad_resource: | |||
1812 | 1760 | ||
1813 | return ret; | 1761 | return ret; |
1814 | } | 1762 | } |
1815 | |||
1816 | /** | ||
1817 | * vmw_subres_dirty_add - Add a dirty region to a subresource | ||
1818 | * @dirty: The surfaces's dirty tracker. | ||
1819 | * @loc_start: The location corresponding to the start of the region. | ||
1820 | * @loc_end: The location corresponding to the end of the region. | ||
1821 | * | ||
1822 | * As we are assuming that @loc_start and @loc_end represent a sequential | ||
1823 | * range of backing store memory, if the region spans multiple lines then | ||
1824 | * regardless of the x coordinate, the full lines are dirtied. | ||
1825 | * Correspondingly if the region spans multiple z slices, then full rather | ||
1826 | * than partial z slices are dirtied. | ||
1827 | */ | ||
1828 | static void vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, | ||
1829 | const struct svga3dsurface_loc *loc_start, | ||
1830 | const struct svga3dsurface_loc *loc_end) | ||
1831 | { | ||
1832 | const struct svga3dsurface_cache *cache = &dirty->cache; | ||
1833 | SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource]; | ||
1834 | u32 mip = loc_start->sub_resource % cache->num_mip_levels; | ||
1835 | const struct drm_vmw_size *size = &cache->mip[mip].size; | ||
1836 | u32 box_c2 = box->z + box->d; | ||
1837 | |||
1838 | if (WARN_ON(loc_start->sub_resource >= dirty->num_subres)) | ||
1839 | return; | ||
1840 | |||
1841 | if (box->d == 0 || box->z > loc_start->z) | ||
1842 | box->z = loc_start->z; | ||
1843 | if (box_c2 < loc_end->z) | ||
1844 | box->d = loc_end->z - box->z; | ||
1845 | |||
1846 | if (loc_start->z + 1 == loc_end->z) { | ||
1847 | box_c2 = box->y + box->h; | ||
1848 | if (box->h == 0 || box->y > loc_start->y) | ||
1849 | box->y = loc_start->y; | ||
1850 | if (box_c2 < loc_end->y) | ||
1851 | box->h = loc_end->y - box->y; | ||
1852 | |||
1853 | if (loc_start->y + 1 == loc_end->y) { | ||
1854 | box_c2 = box->x + box->w; | ||
1855 | if (box->w == 0 || box->x > loc_start->x) | ||
1856 | box->x = loc_start->x; | ||
1857 | if (box_c2 < loc_end->x) | ||
1858 | box->w = loc_end->x - box->x; | ||
1859 | } else { | ||
1860 | box->x = 0; | ||
1861 | box->w = size->width; | ||
1862 | } | ||
1863 | } else { | ||
1864 | box->y = 0; | ||
1865 | box->h = size->height; | ||
1866 | box->x = 0; | ||
1867 | box->w = size->width; | ||
1868 | } | ||
1869 | } | ||
1870 | |||
1871 | /** | ||
1872 | * vmw_subres_dirty_full - Mark a full subresource as dirty | ||
1873 | * @dirty: The surface's dirty tracker. | ||
1874 | * @subres: The subresource | ||
1875 | */ | ||
1876 | static void vmw_subres_dirty_full(struct vmw_surface_dirty *dirty, u32 subres) | ||
1877 | { | ||
1878 | const struct svga3dsurface_cache *cache = &dirty->cache; | ||
1879 | u32 mip = subres % cache->num_mip_levels; | ||
1880 | const struct drm_vmw_size *size = &cache->mip[mip].size; | ||
1881 | SVGA3dBox *box = &dirty->boxes[subres]; | ||
1882 | |||
1883 | box->x = 0; | ||
1884 | box->y = 0; | ||
1885 | box->z = 0; | ||
1886 | box->w = size->width; | ||
1887 | box->h = size->height; | ||
1888 | box->d = size->depth; | ||
1889 | } | ||
1890 | |||
1891 | /* | ||
1892 | * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture | ||
1893 | * surfaces. | ||
1894 | */ | ||
1895 | static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res, | ||
1896 | size_t start, size_t end) | ||
1897 | { | ||
1898 | struct vmw_surface_dirty *dirty = | ||
1899 | (struct vmw_surface_dirty *) res->dirty; | ||
1900 | size_t backup_end = res->backup_offset + res->backup_size; | ||
1901 | struct svga3dsurface_loc loc1, loc2; | ||
1902 | const struct svga3dsurface_cache *cache; | ||
1903 | |||
1904 | start = max_t(size_t, start, res->backup_offset) - res->backup_offset; | ||
1905 | end = min(end, backup_end) - res->backup_offset; | ||
1906 | cache = &dirty->cache; | ||
1907 | svga3dsurface_get_loc(cache, &loc1, start); | ||
1908 | svga3dsurface_get_loc(cache, &loc2, end - 1); | ||
1909 | svga3dsurface_inc_loc(cache, &loc2); | ||
1910 | |||
1911 | if (loc1.sub_resource + 1 == loc2.sub_resource) { | ||
1912 | /* Dirty range covers a single sub-resource */ | ||
1913 | vmw_subres_dirty_add(dirty, &loc1, &loc2); | ||
1914 | } else { | ||
1915 | /* Dirty range covers multiple sub-resources */ | ||
1916 | struct svga3dsurface_loc loc_min, loc_max; | ||
1917 | u32 sub_res = loc1.sub_resource; | ||
1918 | |||
1919 | svga3dsurface_max_loc(cache, loc1.sub_resource, &loc_max); | ||
1920 | vmw_subres_dirty_add(dirty, &loc1, &loc_max); | ||
1921 | svga3dsurface_min_loc(cache, loc2.sub_resource - 1, &loc_min); | ||
1922 | vmw_subres_dirty_add(dirty, &loc_min, &loc2); | ||
1923 | for (sub_res = loc1.sub_resource + 1; | ||
1924 | sub_res < loc2.sub_resource - 1; ++sub_res) | ||
1925 | vmw_subres_dirty_full(dirty, sub_res); | ||
1926 | } | ||
1927 | } | ||
1928 | |||
1929 | /* | ||
1930 | * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer | ||
1931 | * surfaces. | ||
1932 | */ | ||
1933 | static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res, | ||
1934 | size_t start, size_t end) | ||
1935 | { | ||
1936 | struct vmw_surface_dirty *dirty = | ||
1937 | (struct vmw_surface_dirty *) res->dirty; | ||
1938 | const struct svga3dsurface_cache *cache = &dirty->cache; | ||
1939 | size_t backup_end = res->backup_offset + cache->mip_chain_bytes; | ||
1940 | SVGA3dBox *box = &dirty->boxes[0]; | ||
1941 | u32 box_c2; | ||
1942 | |||
1943 | box->h = box->d = 1; | ||
1944 | start = max_t(size_t, start, res->backup_offset) - res->backup_offset; | ||
1945 | end = min(end, backup_end) - res->backup_offset; | ||
1946 | box_c2 = box->x + box->w; | ||
1947 | if (box->w == 0 || box->x > start) | ||
1948 | box->x = start; | ||
1949 | if (box_c2 < end) | ||
1950 | box->w = end - box->x; | ||
1951 | } | ||
1952 | |||
1953 | /* | ||
1954 | * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces | ||
1955 | */ | ||
1956 | static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start, | ||
1957 | size_t end) | ||
1958 | { | ||
1959 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
1960 | |||
1961 | if (WARN_ON(end <= res->backup_offset || | ||
1962 | start >= res->backup_offset + res->backup_size)) | ||
1963 | return; | ||
1964 | |||
1965 | if (srf->format == SVGA3D_BUFFER) | ||
1966 | vmw_surface_buf_dirty_range_add(res, start, end); | ||
1967 | else | ||
1968 | vmw_surface_tex_dirty_range_add(res, start, end); | ||
1969 | } | ||
1970 | |||
1971 | /* | ||
1972 | * vmw_surface_dirty_sync - The surface's dirty_sync callback. | ||
1973 | */ | ||
1974 | static int vmw_surface_dirty_sync(struct vmw_resource *res) | ||
1975 | { | ||
1976 | struct vmw_private *dev_priv = res->dev_priv; | ||
1977 | bool has_dx = 0; | ||
1978 | u32 i, num_dirty; | ||
1979 | struct vmw_surface_dirty *dirty = | ||
1980 | (struct vmw_surface_dirty *) res->dirty; | ||
1981 | size_t alloc_size; | ||
1982 | const struct svga3dsurface_cache *cache = &dirty->cache; | ||
1983 | struct { | ||
1984 | SVGA3dCmdHeader header; | ||
1985 | SVGA3dCmdDXUpdateSubResource body; | ||
1986 | } *cmd1; | ||
1987 | struct { | ||
1988 | SVGA3dCmdHeader header; | ||
1989 | SVGA3dCmdUpdateGBImage body; | ||
1990 | } *cmd2; | ||
1991 | void *cmd; | ||
1992 | |||
1993 | num_dirty = 0; | ||
1994 | for (i = 0; i < dirty->num_subres; ++i) { | ||
1995 | const SVGA3dBox *box = &dirty->boxes[i]; | ||
1996 | |||
1997 | if (box->d) | ||
1998 | num_dirty++; | ||
1999 | } | ||
2000 | |||
2001 | if (!num_dirty) | ||
2002 | goto out; | ||
2003 | |||
2004 | alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2)); | ||
2005 | cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size); | ||
2006 | if (!cmd) | ||
2007 | return -ENOMEM; | ||
2008 | |||
2009 | cmd1 = cmd; | ||
2010 | cmd2 = cmd; | ||
2011 | |||
2012 | for (i = 0; i < dirty->num_subres; ++i) { | ||
2013 | const SVGA3dBox *box = &dirty->boxes[i]; | ||
2014 | |||
2015 | if (!box->d) | ||
2016 | continue; | ||
2017 | |||
2018 | /* | ||
2019 | * DX_UPDATE_SUBRESOURCE is aware of array surfaces. | ||
2020 | * UPDATE_GB_IMAGE is not. | ||
2021 | */ | ||
2022 | if (has_dx) { | ||
2023 | cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE; | ||
2024 | cmd1->header.size = sizeof(cmd1->body); | ||
2025 | cmd1->body.sid = res->id; | ||
2026 | cmd1->body.subResource = i; | ||
2027 | cmd1->body.box = *box; | ||
2028 | cmd1++; | ||
2029 | } else { | ||
2030 | cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; | ||
2031 | cmd2->header.size = sizeof(cmd2->body); | ||
2032 | cmd2->body.image.sid = res->id; | ||
2033 | cmd2->body.image.face = i / cache->num_mip_levels; | ||
2034 | cmd2->body.image.mipmap = i - | ||
2035 | (cache->num_mip_levels * cmd2->body.image.face); | ||
2036 | cmd2->body.box = *box; | ||
2037 | cmd2++; | ||
2038 | } | ||
2039 | |||
2040 | } | ||
2041 | vmw_fifo_commit(dev_priv, alloc_size); | ||
2042 | out: | ||
2043 | memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) * | ||
2044 | dirty->num_subres); | ||
2045 | |||
2046 | return 0; | ||
2047 | } | ||
2048 | |||
2049 | /* | ||
2050 | * vmw_surface_dirty_alloc - The surface's dirty_alloc callback. | ||
2051 | */ | ||
2052 | static int vmw_surface_dirty_alloc(struct vmw_resource *res) | ||
2053 | { | ||
2054 | struct vmw_surface *srf = vmw_res_to_srf(res); | ||
2055 | struct vmw_surface_dirty *dirty; | ||
2056 | u32 num_layers = 1; | ||
2057 | u32 num_mip; | ||
2058 | u32 num_subres; | ||
2059 | u32 num_samples; | ||
2060 | size_t dirty_size, acc_size; | ||
2061 | static struct ttm_operation_ctx ctx = { | ||
2062 | .interruptible = false, | ||
2063 | .no_wait_gpu = false | ||
2064 | }; | ||
2065 | int ret; | ||
2066 | |||
2067 | if (srf->array_size) | ||
2068 | num_layers = srf->array_size; | ||
2069 | else if (srf->flags & SVGA3D_SURFACE_CUBEMAP) | ||
2070 | num_layers *= SVGA3D_MAX_SURFACE_FACES; | ||
2071 | |||
2072 | num_mip = srf->mip_levels[0]; | ||
2073 | if (!num_mip) | ||
2074 | num_mip = 1; | ||
2075 | |||
2076 | num_subres = num_layers * num_mip; | ||
2077 | dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]); | ||
2078 | acc_size = ttm_round_pot(dirty_size); | ||
2079 | ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv), | ||
2080 | acc_size, &ctx); | ||
2081 | if (ret) { | ||
2082 | VMW_DEBUG_USER("Out of graphics memory for surface " | ||
2083 | "dirty tracker.\n"); | ||
2084 | return ret; | ||
2085 | } | ||
2086 | |||
2087 | dirty = kvzalloc(dirty_size, GFP_KERNEL); | ||
2088 | if (!dirty) { | ||
2089 | ret = -ENOMEM; | ||
2090 | goto out_no_dirty; | ||
2091 | } | ||
2092 | |||
2093 | num_samples = max_t(u32, 1, srf->multisample_count); | ||
2094 | ret = svga3dsurface_setup_cache(&srf->base_size, srf->format, num_mip, | ||
2095 | num_layers, num_samples, &dirty->cache); | ||
2096 | if (ret) | ||
2097 | goto out_no_cache; | ||
2098 | |||
2099 | dirty->num_subres = num_subres; | ||
2100 | dirty->size = acc_size; | ||
2101 | res->dirty = (struct vmw_resource_dirty *) dirty; | ||
2102 | |||
2103 | return 0; | ||
2104 | |||
2105 | out_no_cache: | ||
2106 | kvfree(dirty); | ||
2107 | out_no_dirty: | ||
2108 | ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); | ||
2109 | return ret; | ||
2110 | } | ||
2111 | |||
2112 | /* | ||
2113 | * vmw_surface_dirty_free - The surface's dirty_free callback | ||
2114 | */ | ||
2115 | static void vmw_surface_dirty_free(struct vmw_resource *res) | ||
2116 | { | ||
2117 | struct vmw_surface_dirty *dirty = | ||
2118 | (struct vmw_surface_dirty *) res->dirty; | ||
2119 | size_t acc_size = dirty->size; | ||
2120 | |||
2121 | kvfree(dirty); | ||
2122 | ttm_mem_global_free(vmw_mem_glob(res->dev_priv), acc_size); | ||
2123 | res->dirty = NULL; | ||
2124 | } | ||
2125 | |||
2126 | /* | ||
2127 | * vmw_surface_clean - The surface's clean callback | ||
2128 | */ | ||
2129 | static int vmw_surface_clean(struct vmw_resource *res) | ||
2130 | { | ||
2131 | struct vmw_private *dev_priv = res->dev_priv; | ||
2132 | size_t alloc_size; | ||
2133 | struct { | ||
2134 | SVGA3dCmdHeader header; | ||
2135 | SVGA3dCmdReadbackGBSurface body; | ||
2136 | } *cmd; | ||
2137 | |||
2138 | alloc_size = sizeof(*cmd); | ||
2139 | cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size); | ||
2140 | if (!cmd) | ||
2141 | return -ENOMEM; | ||
2142 | |||
2143 | cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; | ||
2144 | cmd->header.size = sizeof(cmd->body); | ||
2145 | cmd->body.sid = res->id; | ||
2146 | vmw_fifo_commit(dev_priv, alloc_size); | ||
2147 | |||
2148 | return 0; | ||
2149 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index 9aaf807ed73c..f611b2290a1b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c | |||
@@ -33,8 +33,6 @@ | |||
33 | * struct vmw_validation_bo_node - Buffer object validation metadata. | 33 | * struct vmw_validation_bo_node - Buffer object validation metadata. |
34 | * @base: Metadata used for TTM reservation- and validation. | 34 | * @base: Metadata used for TTM reservation- and validation. |
35 | * @hash: A hash entry used for the duplicate detection hash table. | 35 | * @hash: A hash entry used for the duplicate detection hash table. |
36 | * @coherent_count: If switching backup buffers, number of new coherent | ||
37 | * resources that will have this buffer as a backup buffer. | ||
38 | * @as_mob: Validate as mob. | 36 | * @as_mob: Validate as mob. |
39 | * @cpu_blit: Validate for cpu blit access. | 37 | * @cpu_blit: Validate for cpu blit access. |
40 | * | 38 | * |
@@ -44,7 +42,6 @@ | |||
44 | struct vmw_validation_bo_node { | 42 | struct vmw_validation_bo_node { |
45 | struct ttm_validate_buffer base; | 43 | struct ttm_validate_buffer base; |
46 | struct drm_hash_item hash; | 44 | struct drm_hash_item hash; |
47 | unsigned int coherent_count; | ||
48 | u32 as_mob : 1; | 45 | u32 as_mob : 1; |
49 | u32 cpu_blit : 1; | 46 | u32 cpu_blit : 1; |
50 | }; | 47 | }; |
@@ -462,19 +459,6 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx, | |||
462 | if (ret) | 459 | if (ret) |
463 | goto out_unreserve; | 460 | goto out_unreserve; |
464 | } | 461 | } |
465 | |||
466 | if (val->switching_backup && val->new_backup && | ||
467 | res->coherent) { | ||
468 | struct vmw_validation_bo_node *bo_node = | ||
469 | vmw_validation_find_bo_dup(ctx, | ||
470 | val->new_backup); | ||
471 | |||
472 | if (WARN_ON(!bo_node)) { | ||
473 | ret = -EINVAL; | ||
474 | goto out_unreserve; | ||
475 | } | ||
476 | bo_node->coherent_count++; | ||
477 | } | ||
478 | } | 462 | } |
479 | 463 | ||
480 | return 0; | 464 | return 0; |
@@ -578,9 +562,6 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) | |||
578 | int ret; | 562 | int ret; |
579 | 563 | ||
580 | list_for_each_entry(entry, &ctx->bo_list, base.head) { | 564 | list_for_each_entry(entry, &ctx->bo_list, base.head) { |
581 | struct vmw_buffer_object *vbo = | ||
582 | container_of(entry->base.bo, typeof(*vbo), base); | ||
583 | |||
584 | if (entry->cpu_blit) { | 565 | if (entry->cpu_blit) { |
585 | struct ttm_operation_ctx ctx = { | 566 | struct ttm_operation_ctx ctx = { |
586 | .interruptible = intr, | 567 | .interruptible = intr, |
@@ -595,27 +576,6 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr) | |||
595 | } | 576 | } |
596 | if (ret) | 577 | if (ret) |
597 | return ret; | 578 | return ret; |
598 | |||
599 | /* | ||
600 | * Rather than having the resource code allocating the bo | ||
601 | * dirty tracker in resource_unreserve() where we can't fail, | ||
602 | * Do it here when validating the buffer object. | ||
603 | */ | ||
604 | if (entry->coherent_count) { | ||
605 | unsigned int coherent_count = entry->coherent_count; | ||
606 | |||
607 | while (coherent_count) { | ||
608 | ret = vmw_bo_dirty_add(vbo); | ||
609 | if (ret) | ||
610 | return ret; | ||
611 | |||
612 | coherent_count--; | ||
613 | } | ||
614 | entry->coherent_count -= coherent_count; | ||
615 | } | ||
616 | |||
617 | if (vbo->dirty) | ||
618 | vmw_bo_dirty_scan(vbo); | ||
619 | } | 579 | } |
620 | return 0; | 580 | return 0; |
621 | } | 581 | } |
@@ -641,8 +601,7 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr) | |||
641 | struct vmw_resource *res = val->res; | 601 | struct vmw_resource *res = val->res; |
642 | struct vmw_buffer_object *backup = res->backup; | 602 | struct vmw_buffer_object *backup = res->backup; |
643 | 603 | ||
644 | ret = vmw_resource_validate(res, intr, val->dirty_set && | 604 | ret = vmw_resource_validate(res, intr); |
645 | val->dirty); | ||
646 | if (ret) { | 605 | if (ret) { |
647 | if (ret != -ERESTARTSYS) | 606 | if (ret != -ERESTARTSYS) |
648 | DRM_ERROR("Failed to validate resource.\n"); | 607 | DRM_ERROR("Failed to validate resource.\n"); |
@@ -869,34 +828,3 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx, | |||
869 | ctx->mem_size_left += size; | 828 | ctx->mem_size_left += size; |
870 | return 0; | 829 | return 0; |
871 | } | 830 | } |
872 | |||
873 | /** | ||
874 | * vmw_validation_bo_backoff - Unreserve buffer objects registered with a | ||
875 | * validation context | ||
876 | * @ctx: The validation context | ||
877 | * | ||
878 | * This function unreserves the buffer objects previously reserved using | ||
879 | * vmw_validation_bo_reserve. It's typically used as part of an error path | ||
880 | */ | ||
881 | void vmw_validation_bo_backoff(struct vmw_validation_context *ctx) | ||
882 | { | ||
883 | struct vmw_validation_bo_node *entry; | ||
884 | |||
885 | /* | ||
886 | * Switching coherent resource backup buffers failed. | ||
887 | * Release corresponding buffer object dirty trackers. | ||
888 | */ | ||
889 | list_for_each_entry(entry, &ctx->bo_list, base.head) { | ||
890 | if (entry->coherent_count) { | ||
891 | unsigned int coherent_count = entry->coherent_count; | ||
892 | struct vmw_buffer_object *vbo = | ||
893 | container_of(entry->base.bo, typeof(*vbo), | ||
894 | base); | ||
895 | |||
896 | while (coherent_count--) | ||
897 | vmw_bo_dirty_release(vbo); | ||
898 | } | ||
899 | } | ||
900 | |||
901 | ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list); | ||
902 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index fd83e017c2a5..1d2322ad6fd5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h | |||
@@ -173,6 +173,20 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx, | |||
173 | } | 173 | } |
174 | 174 | ||
175 | /** | 175 | /** |
176 | * vmw_validation_bo_backoff - Unreserve buffer objects registered with a | ||
177 | * validation context | ||
178 | * @ctx: The validation context | ||
179 | * | ||
180 | * This function unreserves the buffer objects previously reserved using | ||
181 | * vmw_validation_bo_reserve. It's typically used as part of an error path | ||
182 | */ | ||
183 | static inline void | ||
184 | vmw_validation_bo_backoff(struct vmw_validation_context *ctx) | ||
185 | { | ||
186 | ttm_eu_backoff_reservation(&ctx->ticket, &ctx->bo_list); | ||
187 | } | ||
188 | |||
189 | /** | ||
176 | * vmw_validation_bo_fence - Unreserve and fence buffer objects registered | 190 | * vmw_validation_bo_fence - Unreserve and fence buffer objects registered |
177 | * with a validation context | 191 | * with a validation context |
178 | * @ctx: The validation context | 192 | * @ctx: The validation context |
@@ -254,6 +268,4 @@ int vmw_validation_preload_res(struct vmw_validation_context *ctx, | |||
254 | unsigned int size); | 268 | unsigned int size); |
255 | void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx, | 269 | void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx, |
256 | void *val_private, u32 dirty); | 270 | void *val_private, u32 dirty); |
257 | void vmw_validation_bo_backoff(struct vmw_validation_context *ctx); | ||
258 | |||
259 | #endif | 271 | #endif |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 435d02f719a8..49d9cdfc58f2 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -768,14 +768,4 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, | |||
768 | struct ttm_operation_ctx *ctx); | 768 | struct ttm_operation_ctx *ctx); |
769 | void ttm_bo_swapout_all(struct ttm_bo_device *bdev); | 769 | void ttm_bo_swapout_all(struct ttm_bo_device *bdev); |
770 | int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); | 770 | int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo); |
771 | |||
772 | /* Default number of pre-faulted pages in the TTM fault handler */ | ||
773 | #define TTM_BO_VM_NUM_PREFAULT 16 | ||
774 | |||
775 | vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, | ||
776 | struct vm_fault *vmf); | ||
777 | |||
778 | vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, | ||
779 | pgprot_t prot, | ||
780 | pgoff_t num_prefault); | ||
781 | #endif | 771 | #endif |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index a2d810a2504d..c9b8ba492f24 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -442,9 +442,6 @@ extern struct ttm_bo_global { | |||
442 | * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. | 442 | * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver. |
443 | * @man: An array of mem_type_managers. | 443 | * @man: An array of mem_type_managers. |
444 | * @vma_manager: Address space manager | 444 | * @vma_manager: Address space manager |
445 | * @vm_ops: Pointer to the struct vm_operations_struct used for this | ||
446 | * device's VM operations. The driver may override this before the first | ||
447 | * mmap() call. | ||
448 | * lru_lock: Spinlock that protects the buffer+device lru lists and | 445 | * lru_lock: Spinlock that protects the buffer+device lru lists and |
449 | * ddestroy lists. | 446 | * ddestroy lists. |
450 | * @dev_mapping: A pointer to the struct address_space representing the | 447 | * @dev_mapping: A pointer to the struct address_space representing the |
@@ -463,7 +460,6 @@ struct ttm_bo_device { | |||
463 | struct ttm_bo_global *glob; | 460 | struct ttm_bo_global *glob; |
464 | struct ttm_bo_driver *driver; | 461 | struct ttm_bo_driver *driver; |
465 | struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; | 462 | struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES]; |
466 | const struct vm_operations_struct *vm_ops; | ||
467 | 463 | ||
468 | /* | 464 | /* |
469 | * Protected by internal locks. | 465 | * Protected by internal locks. |
@@ -492,8 +488,6 @@ struct ttm_bo_device { | |||
492 | bool no_retry; | 488 | bool no_retry; |
493 | }; | 489 | }; |
494 | 490 | ||
495 | extern const struct vm_operations_struct ttm_bo_vm_ops; | ||
496 | |||
497 | /** | 491 | /** |
498 | * struct ttm_lru_bulk_move_pos | 492 | * struct ttm_lru_bulk_move_pos |
499 | * | 493 | * |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 798cdda9560e..dd0b5f4e1e45 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -2686,24 +2686,7 @@ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, | |||
2686 | extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, | 2686 | extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, |
2687 | unsigned long size, pte_fn_t fn, void *data); | 2687 | unsigned long size, pte_fn_t fn, void *data); |
2688 | 2688 | ||
2689 | struct pfn_range_apply; | 2689 | |
2690 | typedef int (*pter_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, | ||
2691 | struct pfn_range_apply *closure); | ||
2692 | struct pfn_range_apply { | ||
2693 | struct mm_struct *mm; | ||
2694 | pter_fn_t ptefn; | ||
2695 | unsigned int alloc; | ||
2696 | }; | ||
2697 | extern int apply_to_pfn_range(struct pfn_range_apply *closure, | ||
2698 | unsigned long address, unsigned long size); | ||
2699 | unsigned long apply_as_wrprotect(struct address_space *mapping, | ||
2700 | pgoff_t first_index, pgoff_t nr); | ||
2701 | unsigned long apply_as_clean(struct address_space *mapping, | ||
2702 | pgoff_t first_index, pgoff_t nr, | ||
2703 | pgoff_t bitmap_pgoff, | ||
2704 | unsigned long *bitmap, | ||
2705 | pgoff_t *start, | ||
2706 | pgoff_t *end); | ||
2707 | #ifdef CONFIG_PAGE_POISONING | 2690 | #ifdef CONFIG_PAGE_POISONING |
2708 | extern bool page_poisoning_enabled(void); | 2691 | extern bool page_poisoning_enabled(void); |
2709 | extern void kernel_poison_pages(struct page *page, int numpages, int enable); | 2692 | extern void kernel_poison_pages(struct page *page, int numpages, int enable); |
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h index 02cab33f2f25..399f58317cff 100644 --- a/include/uapi/drm/vmwgfx_drm.h +++ b/include/uapi/drm/vmwgfx_drm.h | |||
@@ -891,13 +891,11 @@ struct drm_vmw_shader_arg { | |||
891 | * surface. | 891 | * surface. |
892 | * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is | 892 | * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is |
893 | * given. | 893 | * given. |
894 | * @drm_vmw_surface_flag_coherent: Back surface with coherent memory. | ||
895 | */ | 894 | */ |
896 | enum drm_vmw_surface_flags { | 895 | enum drm_vmw_surface_flags { |
897 | drm_vmw_surface_flag_shareable = (1 << 0), | 896 | drm_vmw_surface_flag_shareable = (1 << 0), |
898 | drm_vmw_surface_flag_scanout = (1 << 1), | 897 | drm_vmw_surface_flag_scanout = (1 << 1), |
899 | drm_vmw_surface_flag_create_buffer = (1 << 2), | 898 | drm_vmw_surface_flag_create_buffer = (1 << 2) |
900 | drm_vmw_surface_flag_coherent = (1 << 3), | ||
901 | }; | 899 | }; |
902 | 900 | ||
903 | /** | 901 | /** |
diff --git a/mm/Kconfig b/mm/Kconfig index 5006d0e6a5c7..f0c76ba47695 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -765,7 +765,4 @@ config GUP_BENCHMARK | |||
765 | config ARCH_HAS_PTE_SPECIAL | 765 | config ARCH_HAS_PTE_SPECIAL |
766 | bool | 766 | bool |
767 | 767 | ||
768 | config AS_DIRTY_HELPERS | ||
769 | bool | ||
770 | |||
771 | endmenu | 768 | endmenu |
diff --git a/mm/Makefile b/mm/Makefile index f5d412bbc2f7..ac5e5ba78874 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -104,4 +104,3 @@ obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o | |||
104 | obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o | 104 | obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o |
105 | obj-$(CONFIG_HMM) += hmm.o | 105 | obj-$(CONFIG_HMM) += hmm.o |
106 | obj-$(CONFIG_MEMFD_CREATE) += memfd.o | 106 | obj-$(CONFIG_MEMFD_CREATE) += memfd.o |
107 | obj-$(CONFIG_AS_DIRTY_HELPERS) += as_dirty_helpers.o | ||
diff --git a/mm/as_dirty_helpers.c b/mm/as_dirty_helpers.c deleted file mode 100644 index f600e31534fb..000000000000 --- a/mm/as_dirty_helpers.c +++ /dev/null | |||
@@ -1,300 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <linux/mm.h> | ||
3 | #include <linux/mm_types.h> | ||
4 | #include <linux/hugetlb.h> | ||
5 | #include <linux/bitops.h> | ||
6 | #include <linux/mmu_notifier.h> | ||
7 | #include <asm/cacheflush.h> | ||
8 | #include <asm/tlbflush.h> | ||
9 | |||
10 | /** | ||
11 | * struct apply_as - Closure structure for apply_as_range | ||
12 | * @base: struct pfn_range_apply we derive from | ||
13 | * @start: Address of first modified pte | ||
14 | * @end: Address of last modified pte + 1 | ||
15 | * @total: Total number of modified ptes | ||
16 | * @vma: Pointer to the struct vm_area_struct we're currently operating on | ||
17 | */ | ||
18 | struct apply_as { | ||
19 | struct pfn_range_apply base; | ||
20 | unsigned long start; | ||
21 | unsigned long end; | ||
22 | unsigned long total; | ||
23 | struct vm_area_struct *vma; | ||
24 | }; | ||
25 | |||
26 | /** | ||
27 | * apply_pt_wrprotect - Leaf pte callback to write-protect a pte | ||
28 | * @pte: Pointer to the pte | ||
29 | * @token: Page table token, see apply_to_pfn_range() | ||
30 | * @addr: The virtual page address | ||
31 | * @closure: Pointer to a struct pfn_range_apply embedded in a | ||
32 | * struct apply_as | ||
33 | * | ||
34 | * The function write-protects a pte and records the range in | ||
35 | * virtual address space of touched ptes for efficient range TLB flushes. | ||
36 | * | ||
37 | * Return: Always zero. | ||
38 | */ | ||
39 | static int apply_pt_wrprotect(pte_t *pte, pgtable_t token, | ||
40 | unsigned long addr, | ||
41 | struct pfn_range_apply *closure) | ||
42 | { | ||
43 | struct apply_as *aas = container_of(closure, typeof(*aas), base); | ||
44 | pte_t ptent = *pte; | ||
45 | |||
46 | if (pte_write(ptent)) { | ||
47 | pte_t old_pte = ptep_modify_prot_start(aas->vma, addr, pte); | ||
48 | |||
49 | ptent = pte_wrprotect(old_pte); | ||
50 | ptep_modify_prot_commit(aas->vma, addr, pte, old_pte, ptent); | ||
51 | aas->total++; | ||
52 | aas->start = min(aas->start, addr); | ||
53 | aas->end = max(aas->end, addr + PAGE_SIZE); | ||
54 | } | ||
55 | |||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * struct apply_as_clean - Closure structure for apply_as_clean | ||
61 | * @base: struct apply_as we derive from | ||
62 | * @bitmap_pgoff: Address_space Page offset of the first bit in @bitmap | ||
63 | * @bitmap: Bitmap with one bit for each page offset in the address_space range | ||
64 | * covered. | ||
65 | * @start: Address_space page offset of first modified pte relative | ||
66 | * to @bitmap_pgoff | ||
67 | * @end: Address_space page offset of last modified pte relative | ||
68 | * to @bitmap_pgoff | ||
69 | */ | ||
70 | struct apply_as_clean { | ||
71 | struct apply_as base; | ||
72 | pgoff_t bitmap_pgoff; | ||
73 | unsigned long *bitmap; | ||
74 | pgoff_t start; | ||
75 | pgoff_t end; | ||
76 | }; | ||
77 | |||
78 | /** | ||
79 | * apply_pt_clean - Leaf pte callback to clean a pte | ||
80 | * @pte: Pointer to the pte | ||
81 | * @token: Page table token, see apply_to_pfn_range() | ||
82 | * @addr: The virtual page address | ||
83 | * @closure: Pointer to a struct pfn_range_apply embedded in a | ||
84 | * struct apply_as_clean | ||
85 | * | ||
86 | * The function cleans a pte and records the range in | ||
87 | * virtual address space of touched ptes for efficient TLB flushes. | ||
88 | * It also records dirty ptes in a bitmap representing page offsets | ||
89 | * in the address_space, as well as the first and last of the bits | ||
90 | * touched. | ||
91 | * | ||
92 | * Return: Always zero. | ||
93 | */ | ||
94 | static int apply_pt_clean(pte_t *pte, pgtable_t token, | ||
95 | unsigned long addr, | ||
96 | struct pfn_range_apply *closure) | ||
97 | { | ||
98 | struct apply_as *aas = container_of(closure, typeof(*aas), base); | ||
99 | struct apply_as_clean *clean = container_of(aas, typeof(*clean), base); | ||
100 | pte_t ptent = *pte; | ||
101 | |||
102 | if (pte_dirty(ptent)) { | ||
103 | pgoff_t pgoff = ((addr - aas->vma->vm_start) >> PAGE_SHIFT) + | ||
104 | aas->vma->vm_pgoff - clean->bitmap_pgoff; | ||
105 | pte_t old_pte = ptep_modify_prot_start(aas->vma, addr, pte); | ||
106 | |||
107 | ptent = pte_mkclean(old_pte); | ||
108 | ptep_modify_prot_commit(aas->vma, addr, pte, old_pte, ptent); | ||
109 | |||
110 | aas->total++; | ||
111 | aas->start = min(aas->start, addr); | ||
112 | aas->end = max(aas->end, addr + PAGE_SIZE); | ||
113 | |||
114 | __set_bit(pgoff, clean->bitmap); | ||
115 | clean->start = min(clean->start, pgoff); | ||
116 | clean->end = max(clean->end, pgoff + 1); | ||
117 | } | ||
118 | |||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * apply_as_range - Apply a pte callback to all PTEs pointing into a range | ||
124 | * of an address_space. | ||
125 | * @mapping: Pointer to the struct address_space | ||
126 | * @aas: Closure structure | ||
127 | * @first_index: First page offset in the address_space | ||
128 | * @nr: Number of incremental page offsets to cover | ||
129 | * | ||
130 | * Return: Number of ptes touched. Note that this number might be larger | ||
131 | * than @nr if there are overlapping vmas | ||
132 | */ | ||
133 | static unsigned long apply_as_range(struct address_space *mapping, | ||
134 | struct apply_as *aas, | ||
135 | pgoff_t first_index, pgoff_t nr) | ||
136 | { | ||
137 | struct vm_area_struct *vma; | ||
138 | pgoff_t vba, vea, cba, cea; | ||
139 | unsigned long start_addr, end_addr; | ||
140 | struct mmu_notifier_range range; | ||
141 | |||
142 | i_mmap_lock_read(mapping); | ||
143 | vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index, | ||
144 | first_index + nr - 1) { | ||
145 | unsigned long vm_flags = READ_ONCE(vma->vm_flags); | ||
146 | |||
147 | /* | ||
148 | * We can only do advisory flag tests below, since we can't | ||
149 | * require the vm's mmap_sem to be held to protect the flags. | ||
150 | * Therefore, callers that strictly depend on specific mmap | ||
151 | * flags to remain constant throughout the operation must | ||
152 | * either ensure those flags are immutable for all relevant | ||
153 | * vmas or can't use this function. Fixing this properly would | ||
154 | * require the vma::vm_flags to be protected by a separate | ||
155 | * lock taken after the i_mmap_lock | ||
156 | */ | ||
157 | |||
158 | /* Skip non-applicable VMAs */ | ||
159 | if ((vm_flags & (VM_SHARED | VM_WRITE)) != | ||
160 | (VM_SHARED | VM_WRITE)) | ||
161 | continue; | ||
162 | |||
163 | /* Warn on and skip VMAs whose flags indicate illegal usage */ | ||
164 | if (WARN_ON((vm_flags & (VM_HUGETLB | VM_IO)) != VM_IO)) | ||
165 | continue; | ||
166 | |||
167 | /* Clip to the vma */ | ||
168 | vba = vma->vm_pgoff; | ||
169 | vea = vba + vma_pages(vma); | ||
170 | cba = first_index; | ||
171 | cba = max(cba, vba); | ||
172 | cea = first_index + nr; | ||
173 | cea = min(cea, vea); | ||
174 | |||
175 | /* Translate to virtual address */ | ||
176 | start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start; | ||
177 | end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start; | ||
178 | if (start_addr >= end_addr) | ||
179 | continue; | ||
180 | |||
181 | aas->base.mm = vma->vm_mm; | ||
182 | aas->vma = vma; | ||
183 | aas->start = end_addr; | ||
184 | aas->end = start_addr; | ||
185 | |||
186 | mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0, | ||
187 | vma, vma->vm_mm, start_addr, end_addr); | ||
188 | mmu_notifier_invalidate_range_start(&range); | ||
189 | |||
190 | /* Needed when we only change protection? */ | ||
191 | flush_cache_range(vma, start_addr, end_addr); | ||
192 | |||
193 | /* | ||
194 | * We're not using tlb_gather_mmu() since typically | ||
195 | * only a small subrange of PTEs are affected. | ||
196 | */ | ||
197 | inc_tlb_flush_pending(vma->vm_mm); | ||
198 | |||
199 | /* Should not error since aas->base.alloc == 0 */ | ||
200 | WARN_ON(apply_to_pfn_range(&aas->base, start_addr, | ||
201 | end_addr - start_addr)); | ||
202 | if (aas->end > aas->start) | ||
203 | flush_tlb_range(vma, aas->start, aas->end); | ||
204 | |||
205 | mmu_notifier_invalidate_range_end(&range); | ||
206 | dec_tlb_flush_pending(vma->vm_mm); | ||
207 | } | ||
208 | i_mmap_unlock_read(mapping); | ||
209 | |||
210 | return aas->total; | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * apply_as_wrprotect - Write-protect all ptes in an address_space range | ||
215 | * @mapping: The address_space we want to write protect | ||
216 | * @first_index: The first page offset in the range | ||
217 | * @nr: Number of incremental page offsets to cover | ||
218 | * | ||
219 | * WARNING: This function should only be used for address spaces whose | ||
220 | * vmas are marked VM_IO and that do not contain huge pages. | ||
221 | * To avoid interference with COW'd pages, vmas not marked VM_SHARED are | ||
222 | * simply skipped. | ||
223 | * | ||
224 | * Return: The number of ptes actually write-protected. Note that | ||
225 | * already write-protected ptes are not counted. | ||
226 | */ | ||
227 | unsigned long apply_as_wrprotect(struct address_space *mapping, | ||
228 | pgoff_t first_index, pgoff_t nr) | ||
229 | { | ||
230 | struct apply_as aas = { | ||
231 | .base = { | ||
232 | .alloc = 0, | ||
233 | .ptefn = apply_pt_wrprotect, | ||
234 | }, | ||
235 | .total = 0, | ||
236 | }; | ||
237 | |||
238 | return apply_as_range(mapping, &aas, first_index, nr); | ||
239 | } | ||
240 | EXPORT_SYMBOL_GPL(apply_as_wrprotect); | ||
241 | |||
242 | /** | ||
243 | * apply_as_clean - Clean all ptes in an address_space range | ||
244 | * @mapping: The address_space we want to clean | ||
245 | * @first_index: The first page offset in the range | ||
246 | * @nr: Number of incremental page offsets to cover | ||
247 | * @bitmap_pgoff: The page offset of the first bit in @bitmap | ||
248 | * @bitmap: Pointer to a bitmap of at least @nr bits. The bitmap needs to | ||
249 | * cover the whole range @first_index..@first_index + @nr. | ||
250 | * @start: Pointer to number of the first set bit in @bitmap. | ||
251 | * is modified as new bits are set by the function. | ||
252 | * @end: Pointer to the number of the last set bit in @bitmap. | ||
253 | * none set. The value is modified as new bits are set by the function. | ||
254 | * | ||
255 | * Note: When this function returns there is no guarantee that a CPU has | ||
256 | * not already dirtied new ptes. However it will not clean any ptes not | ||
257 | * reported in the bitmap. | ||
258 | * | ||
259 | * If a caller needs to make sure all dirty ptes are picked up and none | ||
260 | * additional are added, it first needs to write-protect the address-space | ||
261 | * range and make sure new writers are blocked in page_mkwrite() or | ||
262 | * pfn_mkwrite(). And then after a TLB flush following the write-protection | ||
263 | * pick up all dirty bits. | ||
264 | * | ||
265 | * WARNING: This function should only be used for address spaces whose | ||
266 | * vmas are marked VM_IO and that do not contain huge pages. | ||
267 | * To avoid interference with COW'd pages, vmas not marked VM_SHARED are | ||
268 | * simply skipped. | ||
269 | * | ||
270 | * Return: The number of dirty ptes actually cleaned. | ||
271 | */ | ||
272 | unsigned long apply_as_clean(struct address_space *mapping, | ||
273 | pgoff_t first_index, pgoff_t nr, | ||
274 | pgoff_t bitmap_pgoff, | ||
275 | unsigned long *bitmap, | ||
276 | pgoff_t *start, | ||
277 | pgoff_t *end) | ||
278 | { | ||
279 | bool none_set = (*start >= *end); | ||
280 | struct apply_as_clean clean = { | ||
281 | .base = { | ||
282 | .base = { | ||
283 | .alloc = 0, | ||
284 | .ptefn = apply_pt_clean, | ||
285 | }, | ||
286 | .total = 0, | ||
287 | }, | ||
288 | .bitmap_pgoff = bitmap_pgoff, | ||
289 | .bitmap = bitmap, | ||
290 | .start = none_set ? nr : *start, | ||
291 | .end = none_set ? 0 : *end, | ||
292 | }; | ||
293 | unsigned long ret = apply_as_range(mapping, &clean.base, first_index, | ||
294 | nr); | ||
295 | |||
296 | *start = clean.start; | ||
297 | *end = clean.end; | ||
298 | return ret; | ||
299 | } | ||
300 | EXPORT_SYMBOL_GPL(apply_as_clean); | ||
diff --git a/mm/memory.c b/mm/memory.c index 462aa47f8878..ddf20bd0c317 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2032,17 +2032,18 @@ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long | |||
2032 | } | 2032 | } |
2033 | EXPORT_SYMBOL(vm_iomap_memory); | 2033 | EXPORT_SYMBOL(vm_iomap_memory); |
2034 | 2034 | ||
2035 | static int apply_to_pte_range(struct pfn_range_apply *closure, pmd_t *pmd, | 2035 | static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, |
2036 | unsigned long addr, unsigned long end) | 2036 | unsigned long addr, unsigned long end, |
2037 | pte_fn_t fn, void *data) | ||
2037 | { | 2038 | { |
2038 | pte_t *pte; | 2039 | pte_t *pte; |
2039 | int err; | 2040 | int err; |
2040 | pgtable_t token; | 2041 | pgtable_t token; |
2041 | spinlock_t *uninitialized_var(ptl); | 2042 | spinlock_t *uninitialized_var(ptl); |
2042 | 2043 | ||
2043 | pte = (closure->mm == &init_mm) ? | 2044 | pte = (mm == &init_mm) ? |
2044 | pte_alloc_kernel(pmd, addr) : | 2045 | pte_alloc_kernel(pmd, addr) : |
2045 | pte_alloc_map_lock(closure->mm, pmd, addr, &ptl); | 2046 | pte_alloc_map_lock(mm, pmd, addr, &ptl); |
2046 | if (!pte) | 2047 | if (!pte) |
2047 | return -ENOMEM; | 2048 | return -ENOMEM; |
2048 | 2049 | ||
@@ -2053,109 +2054,86 @@ static int apply_to_pte_range(struct pfn_range_apply *closure, pmd_t *pmd, | |||
2053 | token = pmd_pgtable(*pmd); | 2054 | token = pmd_pgtable(*pmd); |
2054 | 2055 | ||
2055 | do { | 2056 | do { |
2056 | err = closure->ptefn(pte++, token, addr, closure); | 2057 | err = fn(pte++, token, addr, data); |
2057 | if (err) | 2058 | if (err) |
2058 | break; | 2059 | break; |
2059 | } while (addr += PAGE_SIZE, addr != end); | 2060 | } while (addr += PAGE_SIZE, addr != end); |
2060 | 2061 | ||
2061 | arch_leave_lazy_mmu_mode(); | 2062 | arch_leave_lazy_mmu_mode(); |
2062 | 2063 | ||
2063 | if (closure->mm != &init_mm) | 2064 | if (mm != &init_mm) |
2064 | pte_unmap_unlock(pte-1, ptl); | 2065 | pte_unmap_unlock(pte-1, ptl); |
2065 | return err; | 2066 | return err; |
2066 | } | 2067 | } |
2067 | 2068 | ||
2068 | static int apply_to_pmd_range(struct pfn_range_apply *closure, pud_t *pud, | 2069 | static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, |
2069 | unsigned long addr, unsigned long end) | 2070 | unsigned long addr, unsigned long end, |
2071 | pte_fn_t fn, void *data) | ||
2070 | { | 2072 | { |
2071 | pmd_t *pmd; | 2073 | pmd_t *pmd; |
2072 | unsigned long next; | 2074 | unsigned long next; |
2073 | int err = 0; | 2075 | int err; |
2074 | 2076 | ||
2075 | BUG_ON(pud_huge(*pud)); | 2077 | BUG_ON(pud_huge(*pud)); |
2076 | 2078 | ||
2077 | pmd = pmd_alloc(closure->mm, pud, addr); | 2079 | pmd = pmd_alloc(mm, pud, addr); |
2078 | if (!pmd) | 2080 | if (!pmd) |
2079 | return -ENOMEM; | 2081 | return -ENOMEM; |
2080 | |||
2081 | do { | 2082 | do { |
2082 | next = pmd_addr_end(addr, end); | 2083 | next = pmd_addr_end(addr, end); |
2083 | if (!closure->alloc && pmd_none_or_clear_bad(pmd)) | 2084 | err = apply_to_pte_range(mm, pmd, addr, next, fn, data); |
2084 | continue; | ||
2085 | err = apply_to_pte_range(closure, pmd, addr, next); | ||
2086 | if (err) | 2085 | if (err) |
2087 | break; | 2086 | break; |
2088 | } while (pmd++, addr = next, addr != end); | 2087 | } while (pmd++, addr = next, addr != end); |
2089 | return err; | 2088 | return err; |
2090 | } | 2089 | } |
2091 | 2090 | ||
2092 | static int apply_to_pud_range(struct pfn_range_apply *closure, p4d_t *p4d, | 2091 | static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d, |
2093 | unsigned long addr, unsigned long end) | 2092 | unsigned long addr, unsigned long end, |
2093 | pte_fn_t fn, void *data) | ||
2094 | { | 2094 | { |
2095 | pud_t *pud; | 2095 | pud_t *pud; |
2096 | unsigned long next; | 2096 | unsigned long next; |
2097 | int err = 0; | 2097 | int err; |
2098 | 2098 | ||
2099 | pud = pud_alloc(closure->mm, p4d, addr); | 2099 | pud = pud_alloc(mm, p4d, addr); |
2100 | if (!pud) | 2100 | if (!pud) |
2101 | return -ENOMEM; | 2101 | return -ENOMEM; |
2102 | |||
2103 | do { | 2102 | do { |
2104 | next = pud_addr_end(addr, end); | 2103 | next = pud_addr_end(addr, end); |
2105 | if (!closure->alloc && pud_none_or_clear_bad(pud)) | 2104 | err = apply_to_pmd_range(mm, pud, addr, next, fn, data); |
2106 | continue; | ||
2107 | err = apply_to_pmd_range(closure, pud, addr, next); | ||
2108 | if (err) | 2105 | if (err) |
2109 | break; | 2106 | break; |
2110 | } while (pud++, addr = next, addr != end); | 2107 | } while (pud++, addr = next, addr != end); |
2111 | return err; | 2108 | return err; |
2112 | } | 2109 | } |
2113 | 2110 | ||
2114 | static int apply_to_p4d_range(struct pfn_range_apply *closure, pgd_t *pgd, | 2111 | static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd, |
2115 | unsigned long addr, unsigned long end) | 2112 | unsigned long addr, unsigned long end, |
2113 | pte_fn_t fn, void *data) | ||
2116 | { | 2114 | { |
2117 | p4d_t *p4d; | 2115 | p4d_t *p4d; |
2118 | unsigned long next; | 2116 | unsigned long next; |
2119 | int err = 0; | 2117 | int err; |
2120 | 2118 | ||
2121 | p4d = p4d_alloc(closure->mm, pgd, addr); | 2119 | p4d = p4d_alloc(mm, pgd, addr); |
2122 | if (!p4d) | 2120 | if (!p4d) |
2123 | return -ENOMEM; | 2121 | return -ENOMEM; |
2124 | |||
2125 | do { | 2122 | do { |
2126 | next = p4d_addr_end(addr, end); | 2123 | next = p4d_addr_end(addr, end); |
2127 | if (!closure->alloc && p4d_none_or_clear_bad(p4d)) | 2124 | err = apply_to_pud_range(mm, p4d, addr, next, fn, data); |
2128 | continue; | ||
2129 | err = apply_to_pud_range(closure, p4d, addr, next); | ||
2130 | if (err) | 2125 | if (err) |
2131 | break; | 2126 | break; |
2132 | } while (p4d++, addr = next, addr != end); | 2127 | } while (p4d++, addr = next, addr != end); |
2133 | return err; | 2128 | return err; |
2134 | } | 2129 | } |
2135 | 2130 | ||
2136 | /** | 2131 | /* |
2137 | * apply_to_pfn_range - Scan a region of virtual memory, calling a provided | 2132 | * Scan a region of virtual memory, filling in page tables as necessary |
2138 | * function on each leaf page table entry | 2133 | * and calling a provided function on each leaf page table. |
2139 | * @closure: Details about how to scan and what function to apply | ||
2140 | * @addr: Start virtual address | ||
2141 | * @size: Size of the region | ||
2142 | * | ||
2143 | * If @closure->alloc is set to 1, the function will fill in the page table | ||
2144 | * as necessary. Otherwise it will skip non-present parts. | ||
2145 | * Note: The caller must ensure that the range does not contain huge pages. | ||
2146 | * The caller must also assure that the proper mmu_notifier functions are | ||
2147 | * called before and after the call to apply_to_pfn_range. | ||
2148 | * | ||
2149 | * WARNING: Do not use this function unless you know exactly what you are | ||
2150 | * doing. It is lacking support for huge pages and transparent huge pages. | ||
2151 | * | ||
2152 | * Return: Zero on success. If the provided function returns a non-zero status, | ||
2153 | * the page table walk will terminate and that status will be returned. | ||
2154 | * If @closure->alloc is set to 1, then this function may also return memory | ||
2155 | * allocation errors arising from allocating page table memory. | ||
2156 | */ | 2134 | */ |
2157 | int apply_to_pfn_range(struct pfn_range_apply *closure, | 2135 | int apply_to_page_range(struct mm_struct *mm, unsigned long addr, |
2158 | unsigned long addr, unsigned long size) | 2136 | unsigned long size, pte_fn_t fn, void *data) |
2159 | { | 2137 | { |
2160 | pgd_t *pgd; | 2138 | pgd_t *pgd; |
2161 | unsigned long next; | 2139 | unsigned long next; |
@@ -2165,65 +2143,16 @@ int apply_to_pfn_range(struct pfn_range_apply *closure, | |||
2165 | if (WARN_ON(addr >= end)) | 2143 | if (WARN_ON(addr >= end)) |
2166 | return -EINVAL; | 2144 | return -EINVAL; |
2167 | 2145 | ||
2168 | pgd = pgd_offset(closure->mm, addr); | 2146 | pgd = pgd_offset(mm, addr); |
2169 | do { | 2147 | do { |
2170 | next = pgd_addr_end(addr, end); | 2148 | next = pgd_addr_end(addr, end); |
2171 | if (!closure->alloc && pgd_none_or_clear_bad(pgd)) | 2149 | err = apply_to_p4d_range(mm, pgd, addr, next, fn, data); |
2172 | continue; | ||
2173 | err = apply_to_p4d_range(closure, pgd, addr, next); | ||
2174 | if (err) | 2150 | if (err) |
2175 | break; | 2151 | break; |
2176 | } while (pgd++, addr = next, addr != end); | 2152 | } while (pgd++, addr = next, addr != end); |
2177 | 2153 | ||
2178 | return err; | 2154 | return err; |
2179 | } | 2155 | } |
2180 | |||
2181 | /** | ||
2182 | * struct page_range_apply - Closure structure for apply_to_page_range() | ||
2183 | * @pter: The base closure structure we derive from | ||
2184 | * @fn: The leaf pte function to call | ||
2185 | * @data: The leaf pte function closure | ||
2186 | */ | ||
2187 | struct page_range_apply { | ||
2188 | struct pfn_range_apply pter; | ||
2189 | pte_fn_t fn; | ||
2190 | void *data; | ||
2191 | }; | ||
2192 | |||
2193 | /* | ||
2194 | * Callback wrapper to enable use of apply_to_pfn_range for | ||
2195 | * the apply_to_page_range interface | ||
2196 | */ | ||
2197 | static int apply_to_page_range_wrapper(pte_t *pte, pgtable_t token, | ||
2198 | unsigned long addr, | ||
2199 | struct pfn_range_apply *pter) | ||
2200 | { | ||
2201 | struct page_range_apply *pra = | ||
2202 | container_of(pter, typeof(*pra), pter); | ||
2203 | |||
2204 | return pra->fn(pte, token, addr, pra->data); | ||
2205 | } | ||
2206 | |||
2207 | /* | ||
2208 | * Scan a region of virtual memory, filling in page tables as necessary | ||
2209 | * and calling a provided function on each leaf page table. | ||
2210 | * | ||
2211 | * WARNING: Do not use this function unless you know exactly what you are | ||
2212 | * doing. It is lacking support for huge pages and transparent huge pages. | ||
2213 | */ | ||
2214 | int apply_to_page_range(struct mm_struct *mm, unsigned long addr, | ||
2215 | unsigned long size, pte_fn_t fn, void *data) | ||
2216 | { | ||
2217 | struct page_range_apply pra = { | ||
2218 | .pter = {.mm = mm, | ||
2219 | .alloc = 1, | ||
2220 | .ptefn = apply_to_page_range_wrapper }, | ||
2221 | .fn = fn, | ||
2222 | .data = data | ||
2223 | }; | ||
2224 | |||
2225 | return apply_to_pfn_range(&pra.pter, addr, size); | ||
2226 | } | ||
2227 | EXPORT_SYMBOL_GPL(apply_to_page_range); | 2156 | EXPORT_SYMBOL_GPL(apply_to_page_range); |
2228 | 2157 | ||
2229 | /* | 2158 | /* |
@@ -2309,7 +2238,7 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf) | |||
2309 | ret = vmf->vma->vm_ops->page_mkwrite(vmf); | 2238 | ret = vmf->vma->vm_ops->page_mkwrite(vmf); |
2310 | /* Restore original flags so that caller is not surprised */ | 2239 | /* Restore original flags so that caller is not surprised */ |
2311 | vmf->flags = old_flags; | 2240 | vmf->flags = old_flags; |
2312 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) | 2241 | if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) |
2313 | return ret; | 2242 | return ret; |
2314 | if (unlikely(!(ret & VM_FAULT_LOCKED))) { | 2243 | if (unlikely(!(ret & VM_FAULT_LOCKED))) { |
2315 | lock_page(page); | 2244 | lock_page(page); |
@@ -2586,7 +2515,7 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) | |||
2586 | pte_unmap_unlock(vmf->pte, vmf->ptl); | 2515 | pte_unmap_unlock(vmf->pte, vmf->ptl); |
2587 | vmf->flags |= FAULT_FLAG_MKWRITE; | 2516 | vmf->flags |= FAULT_FLAG_MKWRITE; |
2588 | ret = vma->vm_ops->pfn_mkwrite(vmf); | 2517 | ret = vma->vm_ops->pfn_mkwrite(vmf); |
2589 | if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)) | 2518 | if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) |
2590 | return ret; | 2519 | return ret; |
2591 | return finish_mkwrite_fault(vmf); | 2520 | return finish_mkwrite_fault(vmf); |
2592 | } | 2521 | } |
@@ -2607,8 +2536,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) | |||
2607 | pte_unmap_unlock(vmf->pte, vmf->ptl); | 2536 | pte_unmap_unlock(vmf->pte, vmf->ptl); |
2608 | tmp = do_page_mkwrite(vmf); | 2537 | tmp = do_page_mkwrite(vmf); |
2609 | if (unlikely(!tmp || (tmp & | 2538 | if (unlikely(!tmp || (tmp & |
2610 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE | | 2539 | (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { |
2611 | VM_FAULT_RETRY)))) { | ||
2612 | put_page(vmf->page); | 2540 | put_page(vmf->page); |
2613 | return tmp; | 2541 | return tmp; |
2614 | } | 2542 | } |
@@ -3673,8 +3601,7 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) | |||
3673 | unlock_page(vmf->page); | 3601 | unlock_page(vmf->page); |
3674 | tmp = do_page_mkwrite(vmf); | 3602 | tmp = do_page_mkwrite(vmf); |
3675 | if (unlikely(!tmp || | 3603 | if (unlikely(!tmp || |
3676 | (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | | 3604 | (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { |
3677 | VM_FAULT_RETRY)))) { | ||
3678 | put_page(vmf->page); | 3605 | put_page(vmf->page); |
3679 | return tmp; | 3606 | return tmp; |
3680 | } | 3607 | } |