aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-05-27 09:15:35 -0400
committerEric Anholt <eric@anholt.net>2010-05-28 14:03:29 -0400
commitab34c226812588de8f341ce48eb32c3fef5155a9 (patch)
treee508b67a2d7ea0ea6450b0d8d9eedfb1f5cb2772
parent99a03df57c82ec20848d2634f652c07ac3504b98 (diff)
drm/i915: Fix up address spaces in slow_kernel_write()
Since we now get_user_pages() outside of the mutex prior to performing the copy, we kmap() the page inside the copy routine and so need to perform an ordinary memcpy() and not copy_from_user(). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c42
1 files changed, 17 insertions, 25 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b8e351274493..9ded3dae6c87 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -509,25 +509,24 @@ fast_user_write(struct io_mapping *mapping,
509 * page faults 509 * page faults
510 */ 510 */
511 511
512static inline int 512static inline void
513slow_kernel_write(struct io_mapping *mapping, 513slow_kernel_write(struct io_mapping *mapping,
514 loff_t gtt_base, int gtt_offset, 514 loff_t gtt_base, int gtt_offset,
515 struct page *user_page, int user_offset, 515 struct page *user_page, int user_offset,
516 int length) 516 int length)
517{ 517{
518 char *src_vaddr, *dst_vaddr; 518 char __iomem *dst_vaddr;
519 unsigned long unwritten; 519 char *src_vaddr;
520 520
521 dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base); 521 dst_vaddr = io_mapping_map_wc(mapping, gtt_base);
522 src_vaddr = kmap_atomic(user_page, KM_USER1); 522 src_vaddr = kmap(user_page);
523 unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset, 523
524 src_vaddr + user_offset, 524 memcpy_toio(dst_vaddr + gtt_offset,
525 length); 525 src_vaddr + user_offset,
526 kunmap_atomic(src_vaddr, KM_USER1); 526 length);
527 io_mapping_unmap_atomic(dst_vaddr); 527
528 if (unwritten) 528 kunmap(user_page);
529 return -EFAULT; 529 io_mapping_unmap(dst_vaddr);
530 return 0;
531} 530}
532 531
533static inline int 532static inline int
@@ -700,18 +699,11 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
700 if ((data_page_offset + page_length) > PAGE_SIZE) 699 if ((data_page_offset + page_length) > PAGE_SIZE)
701 page_length = PAGE_SIZE - data_page_offset; 700 page_length = PAGE_SIZE - data_page_offset;
702 701
703 ret = slow_kernel_write(dev_priv->mm.gtt_mapping, 702 slow_kernel_write(dev_priv->mm.gtt_mapping,
704 gtt_page_base, gtt_page_offset, 703 gtt_page_base, gtt_page_offset,
705 user_pages[data_page_index], 704 user_pages[data_page_index],
706 data_page_offset, 705 data_page_offset,
707 page_length); 706 page_length);
708
709 /* If we get a fault while copying data, then (presumably) our
710 * source page isn't available. Return the error and we'll
711 * retry in the slow path.
712 */
713 if (ret)
714 goto out_unpin_object;
715 707
716 remain -= page_length; 708 remain -= page_length;
717 offset += page_length; 709 offset += page_length;