aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-10-14 08:47:43 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-10-19 04:19:37 -0400
commitb5e4feb6615fe07150f05bb0e0ccc0ff9138b9ec (patch)
tree920be74c3946e1b882258a7d8c0af5266417fa66 /drivers
parent202f2fef7a1aa6b2e4fa6e1de3ef582342fd41f0 (diff)
drm/i915: Attempt to prefault user pages for pread/pwrite
... in the hope that it makes the atomic fast paths more likely. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 942e4b351cdd..b44c09ab8928 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -265,19 +265,14 @@ fast_shmem_read(struct page **pages,
265 char __user *data, 265 char __user *data,
266 int length) 266 int length)
267{ 267{
268 char __iomem *vaddr;
269 int unwritten; 268 int unwritten;
269 char *vaddr;
270 270
271 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 271 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
272 if (vaddr == NULL)
273 return -ENOMEM;
274 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); 272 unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
275 kunmap_atomic(vaddr, KM_USER0); 273 kunmap_atomic(vaddr, KM_USER0);
276 274
277 if (unwritten) 275 return unwritten ? -EFAULT : 0;
278 return -EFAULT;
279
280 return 0;
281} 276}
282 277
283static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) 278static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
@@ -602,6 +597,13 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
602 goto out; 597 goto out;
603 } 598 }
604 599
600 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
601 args->size);
602 if (ret) {
603 ret = -EFAULT;
604 goto out;
605 }
606
605 if (i915_gem_object_needs_bit17_swizzle(obj)) { 607 if (i915_gem_object_needs_bit17_swizzle(obj)) {
606 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 608 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
607 } else { 609 } else {
@@ -668,18 +670,14 @@ fast_shmem_write(struct page **pages,
668 char __user *data, 670 char __user *data,
669 int length) 671 int length)
670{ 672{
671 char __iomem *vaddr; 673 int unwritten;
672 unsigned long unwritten; 674 char *vaddr;
673 675
674 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 676 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
675 if (vaddr == NULL)
676 return -ENOMEM;
677 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); 677 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
678 kunmap_atomic(vaddr, KM_USER0); 678 kunmap_atomic(vaddr, KM_USER0);
679 679
680 if (unwritten) 680 return unwritten ? -EFAULT : 0;
681 return -EFAULT;
682 return 0;
683} 681}
684 682
685/** 683/**
@@ -1078,6 +1076,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1078 goto out; 1076 goto out;
1079 } 1077 }
1080 1078
1079 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
1080 args->size);
1081 if (ret) {
1082 ret = -EFAULT;
1083 goto out;
1084 }
1085
1081 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1086 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1082 * it would end up going through the fenced access, and we'll get 1087 * it would end up going through the fenced access, and we'll get
1083 * different detiling behavior between reading and writing. 1088 * different detiling behavior between reading and writing.