aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-10-28 08:45:36 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-10-28 15:55:02 -0400
commite5281ccd2e0049e2b9e8ce82449630d25082372d (patch)
tree56ff79f83ce4d0e957d68643a37af4cbc08eba11 /drivers/gpu/drm/i915/i915_gem.c
parent39a01d1fb63cf8ebc1a8cf436f5c0ba9657b55c6 (diff)
drm/i915: Eliminate nested get/put pages
By using read_cache_page() for individual pages during pwrite/pread we can eliminate an unnecessary large allocation (and immediate free) of obj->pages. Also this eliminates any potential nesting of get/put pages, simplifying the code and preparing the path for greater things. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c358
1 files changed, 163 insertions, 195 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4ade4e243379..abe6d901f95b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -58,13 +58,6 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
58 struct drm_file *file_priv); 58 struct drm_file *file_priv);
59static void i915_gem_free_object_tail(struct drm_gem_object *obj); 59static void i915_gem_free_object_tail(struct drm_gem_object *obj);
60 60
61static int
62i915_gem_object_get_pages(struct drm_gem_object *obj,
63 gfp_t gfpmask);
64
65static void
66i915_gem_object_put_pages(struct drm_gem_object *obj);
67
68static int i915_gem_inactive_shrink(struct shrinker *shrinker, 61static int i915_gem_inactive_shrink(struct shrinker *shrinker,
69 int nr_to_scan, 62 int nr_to_scan,
70 gfp_t gfp_mask); 63 gfp_t gfp_mask);
@@ -326,22 +319,6 @@ i915_gem_object_cpu_accessible(struct drm_i915_gem_object *obj)
326 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 319 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
327} 320}
328 321
329static inline int
330fast_shmem_read(struct page **pages,
331 loff_t page_base, int page_offset,
332 char __user *data,
333 int length)
334{
335 char *vaddr;
336 int ret;
337
338 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
339 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
340 kunmap_atomic(vaddr);
341
342 return ret;
343}
344
345static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) 322static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
346{ 323{
347 drm_i915_private_t *dev_priv = obj->dev->dev_private; 324 drm_i915_private_t *dev_priv = obj->dev->dev_private;
@@ -429,8 +406,9 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
429 struct drm_file *file_priv) 406 struct drm_file *file_priv)
430{ 407{
431 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 408 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
409 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
432 ssize_t remain; 410 ssize_t remain;
433 loff_t offset, page_base; 411 loff_t offset;
434 char __user *user_data; 412 char __user *user_data;
435 int page_offset, page_length; 413 int page_offset, page_length;
436 414
@@ -441,21 +419,34 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
441 offset = args->offset; 419 offset = args->offset;
442 420
443 while (remain > 0) { 421 while (remain > 0) {
422 struct page *page;
423 char *vaddr;
424 int ret;
425
444 /* Operation in this page 426 /* Operation in this page
445 * 427 *
446 * page_base = page offset within aperture
447 * page_offset = offset within page 428 * page_offset = offset within page
448 * page_length = bytes to copy for this page 429 * page_length = bytes to copy for this page
449 */ 430 */
450 page_base = (offset & ~(PAGE_SIZE-1));
451 page_offset = offset & (PAGE_SIZE-1); 431 page_offset = offset & (PAGE_SIZE-1);
452 page_length = remain; 432 page_length = remain;
453 if ((page_offset + remain) > PAGE_SIZE) 433 if ((page_offset + remain) > PAGE_SIZE)
454 page_length = PAGE_SIZE - page_offset; 434 page_length = PAGE_SIZE - page_offset;
455 435
456 if (fast_shmem_read(obj_priv->pages, 436 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
457 page_base, page_offset, 437 GFP_HIGHUSER | __GFP_RECLAIMABLE);
458 user_data, page_length)) 438 if (IS_ERR(page))
439 return PTR_ERR(page);
440
441 vaddr = kmap_atomic(page);
442 ret = __copy_to_user_inatomic(user_data,
443 vaddr + page_offset,
444 page_length);
445 kunmap_atomic(vaddr);
446
447 mark_page_accessed(page);
448 page_cache_release(page);
449 if (ret)
459 return -EFAULT; 450 return -EFAULT;
460 451
461 remain -= page_length; 452 remain -= page_length;
@@ -466,31 +457,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
466 return 0; 457 return 0;
467} 458}
468 459
469static int
470i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
471{
472 int ret;
473
474 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
475
476 /* If we've insufficient memory to map in the pages, attempt
477 * to make some space by throwing out some old buffers.
478 */
479 if (ret == -ENOMEM) {
480 struct drm_device *dev = obj->dev;
481
482 ret = i915_gem_evict_something(dev, obj->size,
483 i915_gem_get_gtt_alignment(obj),
484 false);
485 if (ret)
486 return ret;
487
488 ret = i915_gem_object_get_pages(obj, 0);
489 }
490
491 return ret;
492}
493
494/** 460/**
495 * This is the fallback shmem pread path, which allocates temporary storage 461 * This is the fallback shmem pread path, which allocates temporary storage
496 * in kernel space to copy_to_user into outside of the struct_mutex, so we 462 * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -502,14 +468,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
502 struct drm_i915_gem_pread *args, 468 struct drm_i915_gem_pread *args,
503 struct drm_file *file_priv) 469 struct drm_file *file_priv)
504{ 470{
471 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
505 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 472 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
506 struct mm_struct *mm = current->mm; 473 struct mm_struct *mm = current->mm;
507 struct page **user_pages; 474 struct page **user_pages;
508 ssize_t remain; 475 ssize_t remain;
509 loff_t offset, pinned_pages, i; 476 loff_t offset, pinned_pages, i;
510 loff_t first_data_page, last_data_page, num_pages; 477 loff_t first_data_page, last_data_page, num_pages;
511 int shmem_page_index, shmem_page_offset; 478 int shmem_page_offset;
512 int data_page_index, data_page_offset; 479 int data_page_index, data_page_offset;
513 int page_length; 480 int page_length;
514 int ret; 481 int ret;
515 uint64_t data_ptr = args->data_ptr; 482 uint64_t data_ptr = args->data_ptr;
@@ -552,15 +519,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
552 offset = args->offset; 519 offset = args->offset;
553 520
554 while (remain > 0) { 521 while (remain > 0) {
522 struct page *page;
523
555 /* Operation in this page 524 /* Operation in this page
556 * 525 *
557 * shmem_page_index = page number within shmem file
558 * shmem_page_offset = offset within page in shmem file 526 * shmem_page_offset = offset within page in shmem file
559 * data_page_index = page number in get_user_pages return 527 * data_page_index = page number in get_user_pages return
560 * data_page_offset = offset with data_page_index page. 528 * data_page_offset = offset with data_page_index page.
561 * page_length = bytes to copy for this page 529 * page_length = bytes to copy for this page
562 */ 530 */
563 shmem_page_index = offset / PAGE_SIZE;
564 shmem_page_offset = offset & ~PAGE_MASK; 531 shmem_page_offset = offset & ~PAGE_MASK;
565 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 532 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
566 data_page_offset = data_ptr & ~PAGE_MASK; 533 data_page_offset = data_ptr & ~PAGE_MASK;
@@ -571,8 +538,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
571 if ((data_page_offset + page_length) > PAGE_SIZE) 538 if ((data_page_offset + page_length) > PAGE_SIZE)
572 page_length = PAGE_SIZE - data_page_offset; 539 page_length = PAGE_SIZE - data_page_offset;
573 540
541 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
542 GFP_HIGHUSER | __GFP_RECLAIMABLE);
543 if (IS_ERR(page))
544 return PTR_ERR(page);
545
574 if (do_bit17_swizzling) { 546 if (do_bit17_swizzling) {
575 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 547 slow_shmem_bit17_copy(page,
576 shmem_page_offset, 548 shmem_page_offset,
577 user_pages[data_page_index], 549 user_pages[data_page_index],
578 data_page_offset, 550 data_page_offset,
@@ -581,11 +553,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
581 } else { 553 } else {
582 slow_shmem_copy(user_pages[data_page_index], 554 slow_shmem_copy(user_pages[data_page_index],
583 data_page_offset, 555 data_page_offset,
584 obj_priv->pages[shmem_page_index], 556 page,
585 shmem_page_offset, 557 shmem_page_offset,
586 page_length); 558 page_length);
587 } 559 }
588 560
561 mark_page_accessed(page);
562 page_cache_release(page);
563
589 remain -= page_length; 564 remain -= page_length;
590 data_ptr += page_length; 565 data_ptr += page_length;
591 offset += page_length; 566 offset += page_length;
@@ -594,6 +569,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
594out: 569out:
595 for (i = 0; i < pinned_pages; i++) { 570 for (i = 0; i < pinned_pages; i++) {
596 SetPageDirty(user_pages[i]); 571 SetPageDirty(user_pages[i]);
572 mark_page_accessed(user_pages[i]);
597 page_cache_release(user_pages[i]); 573 page_cache_release(user_pages[i]);
598 } 574 }
599 drm_free_large(user_pages); 575 drm_free_large(user_pages);
@@ -649,15 +625,11 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
649 goto out; 625 goto out;
650 } 626 }
651 627
652 ret = i915_gem_object_get_pages_or_evict(obj);
653 if (ret)
654 goto out;
655
656 ret = i915_gem_object_set_cpu_read_domain_range(obj, 628 ret = i915_gem_object_set_cpu_read_domain_range(obj,
657 args->offset, 629 args->offset,
658 args->size); 630 args->size);
659 if (ret) 631 if (ret)
660 goto out_put; 632 goto out;
661 633
662 ret = -EFAULT; 634 ret = -EFAULT;
663 if (!i915_gem_object_needs_bit17_swizzle(obj)) 635 if (!i915_gem_object_needs_bit17_swizzle(obj))
@@ -665,8 +637,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
665 if (ret == -EFAULT) 637 if (ret == -EFAULT)
666 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 638 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
667 639
668out_put:
669 i915_gem_object_put_pages(obj);
670out: 640out:
671 drm_gem_object_unreference(obj); 641 drm_gem_object_unreference(obj);
672unlock: 642unlock:
@@ -718,22 +688,6 @@ slow_kernel_write(struct io_mapping *mapping,
718 io_mapping_unmap(dst_vaddr); 688 io_mapping_unmap(dst_vaddr);
719} 689}
720 690
721static inline int
722fast_shmem_write(struct page **pages,
723 loff_t page_base, int page_offset,
724 char __user *data,
725 int length)
726{
727 char *vaddr;
728 int ret;
729
730 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
731 ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
732 kunmap_atomic(vaddr);
733
734 return ret;
735}
736
737/** 691/**
738 * This is the fast pwrite path, where we copy the data directly from the 692 * This is the fast pwrite path, where we copy the data directly from the
739 * user into the GTT, uncached. 693 * user into the GTT, uncached.
@@ -890,9 +844,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
890 struct drm_i915_gem_pwrite *args, 844 struct drm_i915_gem_pwrite *args,
891 struct drm_file *file_priv) 845 struct drm_file *file_priv)
892{ 846{
847 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
893 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 848 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
894 ssize_t remain; 849 ssize_t remain;
895 loff_t offset, page_base; 850 loff_t offset;
896 char __user *user_data; 851 char __user *user_data;
897 int page_offset, page_length; 852 int page_offset, page_length;
898 853
@@ -904,21 +859,40 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
904 obj_priv->dirty = 1; 859 obj_priv->dirty = 1;
905 860
906 while (remain > 0) { 861 while (remain > 0) {
862 struct page *page;
863 char *vaddr;
864 int ret;
865
907 /* Operation in this page 866 /* Operation in this page
908 * 867 *
909 * page_base = page offset within aperture
910 * page_offset = offset within page 868 * page_offset = offset within page
911 * page_length = bytes to copy for this page 869 * page_length = bytes to copy for this page
912 */ 870 */
913 page_base = (offset & ~(PAGE_SIZE-1));
914 page_offset = offset & (PAGE_SIZE-1); 871 page_offset = offset & (PAGE_SIZE-1);
915 page_length = remain; 872 page_length = remain;
916 if ((page_offset + remain) > PAGE_SIZE) 873 if ((page_offset + remain) > PAGE_SIZE)
917 page_length = PAGE_SIZE - page_offset; 874 page_length = PAGE_SIZE - page_offset;
918 875
919 if (fast_shmem_write(obj_priv->pages, 876 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
920 page_base, page_offset, 877 GFP_HIGHUSER | __GFP_RECLAIMABLE);
921 user_data, page_length)) 878 if (IS_ERR(page))
879 return PTR_ERR(page);
880
881 vaddr = kmap_atomic(page, KM_USER0);
882 ret = __copy_from_user_inatomic(vaddr + page_offset,
883 user_data,
884 page_length);
885 kunmap_atomic(vaddr, KM_USER0);
886
887 set_page_dirty(page);
888 mark_page_accessed(page);
889 page_cache_release(page);
890
891 /* If we get a fault while copying data, then (presumably) our
892 * source page isn't available. Return the error and we'll
893 * retry in the slow path.
894 */
895 if (ret)
922 return -EFAULT; 896 return -EFAULT;
923 897
924 remain -= page_length; 898 remain -= page_length;
@@ -941,13 +915,14 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
941 struct drm_i915_gem_pwrite *args, 915 struct drm_i915_gem_pwrite *args,
942 struct drm_file *file_priv) 916 struct drm_file *file_priv)
943{ 917{
918 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
944 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 919 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
945 struct mm_struct *mm = current->mm; 920 struct mm_struct *mm = current->mm;
946 struct page **user_pages; 921 struct page **user_pages;
947 ssize_t remain; 922 ssize_t remain;
948 loff_t offset, pinned_pages, i; 923 loff_t offset, pinned_pages, i;
949 loff_t first_data_page, last_data_page, num_pages; 924 loff_t first_data_page, last_data_page, num_pages;
950 int shmem_page_index, shmem_page_offset; 925 int shmem_page_offset;
951 int data_page_index, data_page_offset; 926 int data_page_index, data_page_offset;
952 int page_length; 927 int page_length;
953 int ret; 928 int ret;
@@ -990,15 +965,15 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
990 obj_priv->dirty = 1; 965 obj_priv->dirty = 1;
991 966
992 while (remain > 0) { 967 while (remain > 0) {
968 struct page *page;
969
993 /* Operation in this page 970 /* Operation in this page
994 * 971 *
995 * shmem_page_index = page number within shmem file
996 * shmem_page_offset = offset within page in shmem file 972 * shmem_page_offset = offset within page in shmem file
997 * data_page_index = page number in get_user_pages return 973 * data_page_index = page number in get_user_pages return
998 * data_page_offset = offset with data_page_index page. 974 * data_page_offset = offset with data_page_index page.
999 * page_length = bytes to copy for this page 975 * page_length = bytes to copy for this page
1000 */ 976 */
1001 shmem_page_index = offset / PAGE_SIZE;
1002 shmem_page_offset = offset & ~PAGE_MASK; 977 shmem_page_offset = offset & ~PAGE_MASK;
1003 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 978 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
1004 data_page_offset = data_ptr & ~PAGE_MASK; 979 data_page_offset = data_ptr & ~PAGE_MASK;
@@ -1009,21 +984,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
1009 if ((data_page_offset + page_length) > PAGE_SIZE) 984 if ((data_page_offset + page_length) > PAGE_SIZE)
1010 page_length = PAGE_SIZE - data_page_offset; 985 page_length = PAGE_SIZE - data_page_offset;
1011 986
987 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
988 GFP_HIGHUSER | __GFP_RECLAIMABLE);
989 if (IS_ERR(page)) {
990 ret = PTR_ERR(page);
991 goto out;
992 }
993
1012 if (do_bit17_swizzling) { 994 if (do_bit17_swizzling) {
1013 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 995 slow_shmem_bit17_copy(page,
1014 shmem_page_offset, 996 shmem_page_offset,
1015 user_pages[data_page_index], 997 user_pages[data_page_index],
1016 data_page_offset, 998 data_page_offset,
1017 page_length, 999 page_length,
1018 0); 1000 0);
1019 } else { 1001 } else {
1020 slow_shmem_copy(obj_priv->pages[shmem_page_index], 1002 slow_shmem_copy(page,
1021 shmem_page_offset, 1003 shmem_page_offset,
1022 user_pages[data_page_index], 1004 user_pages[data_page_index],
1023 data_page_offset, 1005 data_page_offset,
1024 page_length); 1006 page_length);
1025 } 1007 }
1026 1008
1009 set_page_dirty(page);
1010 mark_page_accessed(page);
1011 page_cache_release(page);
1012
1027 remain -= page_length; 1013 remain -= page_length;
1028 data_ptr += page_length; 1014 data_ptr += page_length;
1029 offset += page_length; 1015 offset += page_length;
@@ -1112,22 +1098,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1112out_unpin: 1098out_unpin:
1113 i915_gem_object_unpin(obj); 1099 i915_gem_object_unpin(obj);
1114 } else { 1100 } else {
1115 ret = i915_gem_object_get_pages_or_evict(obj);
1116 if (ret)
1117 goto out;
1118
1119 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 1101 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1120 if (ret) 1102 if (ret)
1121 goto out_put; 1103 goto out;
1122 1104
1123 ret = -EFAULT; 1105 ret = -EFAULT;
1124 if (!i915_gem_object_needs_bit17_swizzle(obj)) 1106 if (!i915_gem_object_needs_bit17_swizzle(obj))
1125 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); 1107 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1126 if (ret == -EFAULT) 1108 if (ret == -EFAULT)
1127 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); 1109 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1128
1129out_put:
1130 i915_gem_object_put_pages(obj);
1131 } 1110 }
1132 1111
1133out: 1112out:
@@ -1587,19 +1566,62 @@ unlock:
1587 return ret; 1566 return ret;
1588} 1567}
1589 1568
1569static int
1570i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
1571 gfp_t gfpmask)
1572{
1573 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1574 int page_count, i;
1575 struct address_space *mapping;
1576 struct inode *inode;
1577 struct page *page;
1578
1579 /* Get the list of pages out of our struct file. They'll be pinned
1580 * at this point until we release them.
1581 */
1582 page_count = obj->size / PAGE_SIZE;
1583 BUG_ON(obj_priv->pages != NULL);
1584 obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1585 if (obj_priv->pages == NULL)
1586 return -ENOMEM;
1587
1588 inode = obj->filp->f_path.dentry->d_inode;
1589 mapping = inode->i_mapping;
1590 for (i = 0; i < page_count; i++) {
1591 page = read_cache_page_gfp(mapping, i,
1592 GFP_HIGHUSER |
1593 __GFP_COLD |
1594 __GFP_RECLAIMABLE |
1595 gfpmask);
1596 if (IS_ERR(page))
1597 goto err_pages;
1598
1599 obj_priv->pages[i] = page;
1600 }
1601
1602 if (obj_priv->tiling_mode != I915_TILING_NONE)
1603 i915_gem_object_do_bit_17_swizzle(obj);
1604
1605 return 0;
1606
1607err_pages:
1608 while (i--)
1609 page_cache_release(obj_priv->pages[i]);
1610
1611 drm_free_large(obj_priv->pages);
1612 obj_priv->pages = NULL;
1613 return PTR_ERR(page);
1614}
1615
1590static void 1616static void
1591i915_gem_object_put_pages(struct drm_gem_object *obj) 1617i915_gem_object_put_pages_gtt(struct drm_gem_object *obj)
1592{ 1618{
1593 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1619 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1594 int page_count = obj->size / PAGE_SIZE; 1620 int page_count = obj->size / PAGE_SIZE;
1595 int i; 1621 int i;
1596 1622
1597 BUG_ON(obj_priv->pages_refcount == 0);
1598 BUG_ON(obj_priv->madv == __I915_MADV_PURGED); 1623 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1599 1624
1600 if (--obj_priv->pages_refcount != 0)
1601 return;
1602
1603 if (obj_priv->tiling_mode != I915_TILING_NONE) 1625 if (obj_priv->tiling_mode != I915_TILING_NONE)
1604 i915_gem_object_save_bit_17_swizzle(obj); 1626 i915_gem_object_save_bit_17_swizzle(obj);
1605 1627
@@ -2229,8 +2251,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2229 drm_unbind_agp(obj_priv->agp_mem); 2251 drm_unbind_agp(obj_priv->agp_mem);
2230 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 2252 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2231 2253
2232 i915_gem_object_put_pages(obj); 2254 i915_gem_object_put_pages_gtt(obj);
2233 BUG_ON(obj_priv->pages_refcount);
2234 2255
2235 i915_gem_info_remove_gtt(dev_priv, obj); 2256 i915_gem_info_remove_gtt(dev_priv, obj);
2236 list_del_init(&obj_priv->mm_list); 2257 list_del_init(&obj_priv->mm_list);
@@ -2290,62 +2311,6 @@ i915_gpu_idle(struct drm_device *dev)
2290 return 0; 2311 return 0;
2291} 2312}
2292 2313
2293static int
2294i915_gem_object_get_pages(struct drm_gem_object *obj,
2295 gfp_t gfpmask)
2296{
2297 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2298 int page_count, i;
2299 struct address_space *mapping;
2300 struct inode *inode;
2301 struct page *page;
2302
2303 BUG_ON(obj_priv->pages_refcount
2304 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2305
2306 if (obj_priv->pages_refcount++ != 0)
2307 return 0;
2308
2309 /* Get the list of pages out of our struct file. They'll be pinned
2310 * at this point until we release them.
2311 */
2312 page_count = obj->size / PAGE_SIZE;
2313 BUG_ON(obj_priv->pages != NULL);
2314 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2315 if (obj_priv->pages == NULL) {
2316 obj_priv->pages_refcount--;
2317 return -ENOMEM;
2318 }
2319
2320 inode = obj->filp->f_path.dentry->d_inode;
2321 mapping = inode->i_mapping;
2322 for (i = 0; i < page_count; i++) {
2323 page = read_cache_page_gfp(mapping, i,
2324 GFP_HIGHUSER |
2325 __GFP_COLD |
2326 __GFP_RECLAIMABLE |
2327 gfpmask);
2328 if (IS_ERR(page))
2329 goto err_pages;
2330
2331 obj_priv->pages[i] = page;
2332 }
2333
2334 if (obj_priv->tiling_mode != I915_TILING_NONE)
2335 i915_gem_object_do_bit_17_swizzle(obj);
2336
2337 return 0;
2338
2339err_pages:
2340 while (i--)
2341 page_cache_release(obj_priv->pages[i]);
2342
2343 drm_free_large(obj_priv->pages);
2344 obj_priv->pages = NULL;
2345 obj_priv->pages_refcount--;
2346 return PTR_ERR(page);
2347}
2348
2349static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) 2314static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2350{ 2315{
2351 struct drm_gem_object *obj = reg->obj; 2316 struct drm_gem_object *obj = reg->obj;
@@ -2772,7 +2737,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2772 goto search_free; 2737 goto search_free;
2773 } 2738 }
2774 2739
2775 ret = i915_gem_object_get_pages(obj, gfpmask); 2740 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2776 if (ret) { 2741 if (ret) {
2777 drm_mm_put_block(obj_priv->gtt_space); 2742 drm_mm_put_block(obj_priv->gtt_space);
2778 obj_priv->gtt_space = NULL; 2743 obj_priv->gtt_space = NULL;
@@ -2806,7 +2771,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
2806 obj_priv->gtt_space->start, 2771 obj_priv->gtt_space->start,
2807 obj_priv->agp_type); 2772 obj_priv->agp_type);
2808 if (obj_priv->agp_mem == NULL) { 2773 if (obj_priv->agp_mem == NULL) {
2809 i915_gem_object_put_pages(obj); 2774 i915_gem_object_put_pages_gtt(obj);
2810 drm_mm_put_block(obj_priv->gtt_space); 2775 drm_mm_put_block(obj_priv->gtt_space);
2811 obj_priv->gtt_space = NULL; 2776 obj_priv->gtt_space = NULL;
2812 2777
@@ -4860,33 +4825,35 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
4860void i915_gem_detach_phys_object(struct drm_device *dev, 4825void i915_gem_detach_phys_object(struct drm_device *dev,
4861 struct drm_gem_object *obj) 4826 struct drm_gem_object *obj)
4862{ 4827{
4863 struct drm_i915_gem_object *obj_priv; 4828 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
4829 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4830 char *vaddr;
4864 int i; 4831 int i;
4865 int ret;
4866 int page_count; 4832 int page_count;
4867 4833
4868 obj_priv = to_intel_bo(obj);
4869 if (!obj_priv->phys_obj) 4834 if (!obj_priv->phys_obj)
4870 return; 4835 return;
4871 4836 vaddr = obj_priv->phys_obj->handle->vaddr;
4872 ret = i915_gem_object_get_pages(obj, 0);
4873 if (ret)
4874 goto out;
4875 4837
4876 page_count = obj->size / PAGE_SIZE; 4838 page_count = obj->size / PAGE_SIZE;
4877 4839
4878 for (i = 0; i < page_count; i++) { 4840 for (i = 0; i < page_count; i++) {
4879 char *dst = kmap_atomic(obj_priv->pages[i]); 4841 struct page *page = read_cache_page_gfp(mapping, i,
4880 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4842 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4881 4843 if (!IS_ERR(page)) {
4882 memcpy(dst, src, PAGE_SIZE); 4844 char *dst = kmap_atomic(page);
4883 kunmap_atomic(dst); 4845 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4846 kunmap_atomic(dst);
4847
4848 drm_clflush_pages(&page, 1);
4849
4850 set_page_dirty(page);
4851 mark_page_accessed(page);
4852 page_cache_release(page);
4853 }
4884 } 4854 }
4885 drm_clflush_pages(obj_priv->pages, page_count);
4886 drm_agp_chipset_flush(dev); 4855 drm_agp_chipset_flush(dev);
4887 4856
4888 i915_gem_object_put_pages(obj);
4889out:
4890 obj_priv->phys_obj->cur_obj = NULL; 4857 obj_priv->phys_obj->cur_obj = NULL;
4891 obj_priv->phys_obj = NULL; 4858 obj_priv->phys_obj = NULL;
4892} 4859}
@@ -4897,6 +4864,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4897 int id, 4864 int id,
4898 int align) 4865 int align)
4899{ 4866{
4867 struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
4900 drm_i915_private_t *dev_priv = dev->dev_private; 4868 drm_i915_private_t *dev_priv = dev->dev_private;
4901 struct drm_i915_gem_object *obj_priv; 4869 struct drm_i915_gem_object *obj_priv;
4902 int ret = 0; 4870 int ret = 0;
@@ -4920,7 +4888,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4920 obj->size, align); 4888 obj->size, align);
4921 if (ret) { 4889 if (ret) {
4922 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); 4890 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
4923 goto out; 4891 return ret;
4924 } 4892 }
4925 } 4893 }
4926 4894
@@ -4928,27 +4896,27 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4928 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 4896 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
4929 obj_priv->phys_obj->cur_obj = obj; 4897 obj_priv->phys_obj->cur_obj = obj;
4930 4898
4931 ret = i915_gem_object_get_pages(obj, 0);
4932 if (ret) {
4933 DRM_ERROR("failed to get page list\n");
4934 goto out;
4935 }
4936
4937 page_count = obj->size / PAGE_SIZE; 4899 page_count = obj->size / PAGE_SIZE;
4938 4900
4939 for (i = 0; i < page_count; i++) { 4901 for (i = 0; i < page_count; i++) {
4940 char *src = kmap_atomic(obj_priv->pages[i]); 4902 struct page *page;
4941 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 4903 char *dst, *src;
4904
4905 page = read_cache_page_gfp(mapping, i,
4906 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4907 if (IS_ERR(page))
4908 return PTR_ERR(page);
4942 4909
4910 src = kmap_atomic(obj_priv->pages[i]);
4911 dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4943 memcpy(dst, src, PAGE_SIZE); 4912 memcpy(dst, src, PAGE_SIZE);
4944 kunmap_atomic(src); 4913 kunmap_atomic(src);
4945 }
4946 4914
4947 i915_gem_object_put_pages(obj); 4915 mark_page_accessed(page);
4916 page_cache_release(page);
4917 }
4948 4918
4949 return 0; 4919 return 0;
4950out:
4951 return ret;
4952} 4920}
4953 4921
4954static int 4922static int