aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/char/agp/intel-gtt.c51
-rw-r--r--drivers/gpu/drm/drm_cache.c25
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c79
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c123
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c9
-rw-r--r--include/drm/drmP.h1
-rw-r--r--include/drm/intel-gtt.h10
12 files changed, 239 insertions, 220 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 58e32f7c3229..7fa655ac24d8 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -84,40 +84,33 @@ static struct _intel_private {
84#define IS_IRONLAKE intel_private.driver->is_ironlake 84#define IS_IRONLAKE intel_private.driver->is_ironlake
85#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable 85#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
86 86
87int intel_gtt_map_memory(struct page **pages, unsigned int num_entries, 87static int intel_gtt_map_memory(struct page **pages,
88 struct scatterlist **sg_list, int *num_sg) 88 unsigned int num_entries,
89 struct sg_table *st)
89{ 90{
90 struct sg_table st;
91 struct scatterlist *sg; 91 struct scatterlist *sg;
92 int i; 92 int i;
93 93
94 if (*sg_list)
95 return 0; /* already mapped (for e.g. resume */
96
97 DBG("try mapping %lu pages\n", (unsigned long)num_entries); 94 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
98 95
99 if (sg_alloc_table(&st, num_entries, GFP_KERNEL)) 96 if (sg_alloc_table(st, num_entries, GFP_KERNEL))
100 goto err; 97 goto err;
101 98
102 *sg_list = sg = st.sgl; 99 for_each_sg(st->sgl, sg, num_entries, i)
103
104 for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
105 sg_set_page(sg, pages[i], PAGE_SIZE, 0); 100 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
106 101
107 *num_sg = pci_map_sg(intel_private.pcidev, *sg_list, 102 if (!pci_map_sg(intel_private.pcidev,
108 num_entries, PCI_DMA_BIDIRECTIONAL); 103 st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
109 if (unlikely(!*num_sg))
110 goto err; 104 goto err;
111 105
112 return 0; 106 return 0;
113 107
114err: 108err:
115 sg_free_table(&st); 109 sg_free_table(st);
116 return -ENOMEM; 110 return -ENOMEM;
117} 111}
118EXPORT_SYMBOL(intel_gtt_map_memory);
119 112
120void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg) 113static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
121{ 114{
122 struct sg_table st; 115 struct sg_table st;
123 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); 116 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
@@ -130,7 +123,6 @@ void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
130 123
131 sg_free_table(&st); 124 sg_free_table(&st);
132} 125}
133EXPORT_SYMBOL(intel_gtt_unmap_memory);
134 126
135static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) 127static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
136{ 128{
@@ -879,8 +871,7 @@ static bool i830_check_flags(unsigned int flags)
879 return false; 871 return false;
880} 872}
881 873
882void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, 874void intel_gtt_insert_sg_entries(struct sg_table *st,
883 unsigned int sg_len,
884 unsigned int pg_start, 875 unsigned int pg_start,
885 unsigned int flags) 876 unsigned int flags)
886{ 877{
@@ -892,12 +883,11 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
892 883
893 /* sg may merge pages, but we have to separate 884 /* sg may merge pages, but we have to separate
894 * per-page addr for GTT */ 885 * per-page addr for GTT */
895 for_each_sg(sg_list, sg, sg_len, i) { 886 for_each_sg(st->sgl, sg, st->nents, i) {
896 len = sg_dma_len(sg) >> PAGE_SHIFT; 887 len = sg_dma_len(sg) >> PAGE_SHIFT;
897 for (m = 0; m < len; m++) { 888 for (m = 0; m < len; m++) {
898 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 889 dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
899 intel_private.driver->write_entry(addr, 890 intel_private.driver->write_entry(addr, j, flags);
900 j, flags);
901 j++; 891 j++;
902 } 892 }
903 } 893 }
@@ -905,8 +895,10 @@ void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
905} 895}
906EXPORT_SYMBOL(intel_gtt_insert_sg_entries); 896EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
907 897
908void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, 898static void intel_gtt_insert_pages(unsigned int first_entry,
909 struct page **pages, unsigned int flags) 899 unsigned int num_entries,
900 struct page **pages,
901 unsigned int flags)
910{ 902{
911 int i, j; 903 int i, j;
912 904
@@ -917,7 +909,6 @@ void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
917 } 909 }
918 readl(intel_private.gtt+j-1); 910 readl(intel_private.gtt+j-1);
919} 911}
920EXPORT_SYMBOL(intel_gtt_insert_pages);
921 912
922static int intel_fake_agp_insert_entries(struct agp_memory *mem, 913static int intel_fake_agp_insert_entries(struct agp_memory *mem,
923 off_t pg_start, int type) 914 off_t pg_start, int type)
@@ -953,13 +944,15 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
953 global_cache_flush(); 944 global_cache_flush();
954 945
955 if (intel_private.base.needs_dmar) { 946 if (intel_private.base.needs_dmar) {
956 ret = intel_gtt_map_memory(mem->pages, mem->page_count, 947 struct sg_table st;
957 &mem->sg_list, &mem->num_sg); 948
949 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
958 if (ret != 0) 950 if (ret != 0)
959 return ret; 951 return ret;
960 952
961 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, 953 intel_gtt_insert_sg_entries(&st, pg_start, type);
962 pg_start, type); 954 mem->sg_list = st.sgl;
955 mem->num_sg = st.nents;
963 } else 956 } else
964 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages, 957 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
965 type); 958 type);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 3dbc7f17eb11..4a4274b348b6 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -101,6 +101,31 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
101EXPORT_SYMBOL(drm_clflush_pages); 101EXPORT_SYMBOL(drm_clflush_pages);
102 102
103void 103void
104drm_clflush_sg(struct sg_table *st)
105{
106#if defined(CONFIG_X86)
107 if (cpu_has_clflush) {
108 struct scatterlist *sg;
109 int i;
110
111 mb();
112 for_each_sg(st->sgl, sg, st->nents, i)
113 drm_clflush_page(sg_page(sg));
114 mb();
115
116 return;
117 }
118
119 if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
120 printk(KERN_ERR "Timed out waiting for cache flush.\n");
121#else
122 printk(KERN_ERR "Architecture has no drm_cache.c support\n");
123 WARN_ON_ONCE(1);
124#endif
125}
126EXPORT_SYMBOL(drm_clflush_sg);
127
128void
104drm_clflush_virt_range(char *addr, unsigned long length) 129drm_clflush_virt_range(char *addr, unsigned long length)
105{ 130{
106#if defined(CONFIG_X86) 131#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12a075747dcf..1d7502faebd6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1006,16 +1006,11 @@ struct drm_i915_gem_object {
1006 1006
1007 unsigned int has_aliasing_ppgtt_mapping:1; 1007 unsigned int has_aliasing_ppgtt_mapping:1;
1008 unsigned int has_global_gtt_mapping:1; 1008 unsigned int has_global_gtt_mapping:1;
1009 unsigned int has_dma_mapping:1;
1009 1010
1010 struct page **pages; 1011 struct sg_table *pages;
1011 int pages_pin_count; 1012 int pages_pin_count;
1012 1013
1013 /**
1014 * DMAR support
1015 */
1016 struct scatterlist *sg_list;
1017 int num_sg;
1018
1019 /* prime dma-buf support */ 1014 /* prime dma-buf support */
1020 struct sg_table *sg_table; 1015 struct sg_table *sg_table;
1021 void *dma_buf_vmapping; 1016 void *dma_buf_vmapping;
@@ -1342,6 +1337,15 @@ void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1342void i915_gem_lastclose(struct drm_device *dev); 1337void i915_gem_lastclose(struct drm_device *dev);
1343 1338
1344int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 1339int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1340static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1341{
1342 struct scatterlist *sg = obj->pages->sgl;
1343 while (n >= SG_MAX_SINGLE_ALLOC) {
1344 sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
1345 n -= SG_MAX_SINGLE_ALLOC - 1;
1346 }
1347 return sg_page(sg+n);
1348}
1345static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 1349static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1346{ 1350{
1347 BUG_ON(obj->pages == NULL); 1351 BUG_ON(obj->pages == NULL);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 26c8bf9c5fa6..8f001fa155a1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -411,6 +411,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
411 int hit_slowpath = 0; 411 int hit_slowpath = 0;
412 int prefaulted = 0; 412 int prefaulted = 0;
413 int needs_clflush = 0; 413 int needs_clflush = 0;
414 struct scatterlist *sg;
415 int i;
414 416
415 user_data = (char __user *) (uintptr_t) args->data_ptr; 417 user_data = (char __user *) (uintptr_t) args->data_ptr;
416 remain = args->size; 418 remain = args->size;
@@ -439,9 +441,15 @@ i915_gem_shmem_pread(struct drm_device *dev,
439 441
440 offset = args->offset; 442 offset = args->offset;
441 443
442 while (remain > 0) { 444 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
443 struct page *page; 445 struct page *page;
444 446
447 if (i < offset >> PAGE_SHIFT)
448 continue;
449
450 if (remain <= 0)
451 break;
452
445 /* Operation in this page 453 /* Operation in this page
446 * 454 *
447 * shmem_page_offset = offset within page in shmem file 455 * shmem_page_offset = offset within page in shmem file
@@ -452,7 +460,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
452 if ((shmem_page_offset + page_length) > PAGE_SIZE) 460 if ((shmem_page_offset + page_length) > PAGE_SIZE)
453 page_length = PAGE_SIZE - shmem_page_offset; 461 page_length = PAGE_SIZE - shmem_page_offset;
454 462
455 page = obj->pages[offset >> PAGE_SHIFT]; 463 page = sg_page(sg);
456 page_do_bit17_swizzling = obj_do_bit17_swizzling && 464 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
457 (page_to_phys(page) & (1 << 17)) != 0; 465 (page_to_phys(page) & (1 << 17)) != 0;
458 466
@@ -731,6 +739,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
731 int hit_slowpath = 0; 739 int hit_slowpath = 0;
732 int needs_clflush_after = 0; 740 int needs_clflush_after = 0;
733 int needs_clflush_before = 0; 741 int needs_clflush_before = 0;
742 int i;
743 struct scatterlist *sg;
734 744
735 user_data = (char __user *) (uintptr_t) args->data_ptr; 745 user_data = (char __user *) (uintptr_t) args->data_ptr;
736 remain = args->size; 746 remain = args->size;
@@ -765,10 +775,16 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
765 offset = args->offset; 775 offset = args->offset;
766 obj->dirty = 1; 776 obj->dirty = 1;
767 777
768 while (remain > 0) { 778 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
769 struct page *page; 779 struct page *page;
770 int partial_cacheline_write; 780 int partial_cacheline_write;
771 781
782 if (i < offset >> PAGE_SHIFT)
783 continue;
784
785 if (remain <= 0)
786 break;
787
772 /* Operation in this page 788 /* Operation in this page
773 * 789 *
774 * shmem_page_offset = offset within page in shmem file 790 * shmem_page_offset = offset within page in shmem file
@@ -787,7 +803,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
787 ((shmem_page_offset | page_length) 803 ((shmem_page_offset | page_length)
788 & (boot_cpu_data.x86_clflush_size - 1)); 804 & (boot_cpu_data.x86_clflush_size - 1));
789 805
790 page = obj->pages[offset >> PAGE_SHIFT]; 806 page = sg_page(sg);
791 page_do_bit17_swizzling = obj_do_bit17_swizzling && 807 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
792 (page_to_phys(page) & (1 << 17)) != 0; 808 (page_to_phys(page) & (1 << 17)) != 0;
793 809
@@ -1633,6 +1649,7 @@ static void
1633i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 1649i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1634{ 1650{
1635 int page_count = obj->base.size / PAGE_SIZE; 1651 int page_count = obj->base.size / PAGE_SIZE;
1652 struct scatterlist *sg;
1636 int ret, i; 1653 int ret, i;
1637 1654
1638 BUG_ON(obj->madv == __I915_MADV_PURGED); 1655 BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -1653,19 +1670,21 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1653 if (obj->madv == I915_MADV_DONTNEED) 1670 if (obj->madv == I915_MADV_DONTNEED)
1654 obj->dirty = 0; 1671 obj->dirty = 0;
1655 1672
1656 for (i = 0; i < page_count; i++) { 1673 for_each_sg(obj->pages->sgl, sg, page_count, i) {
1674 struct page *page = sg_page(sg);
1675
1657 if (obj->dirty) 1676 if (obj->dirty)
1658 set_page_dirty(obj->pages[i]); 1677 set_page_dirty(page);
1659 1678
1660 if (obj->madv == I915_MADV_WILLNEED) 1679 if (obj->madv == I915_MADV_WILLNEED)
1661 mark_page_accessed(obj->pages[i]); 1680 mark_page_accessed(page);
1662 1681
1663 page_cache_release(obj->pages[i]); 1682 page_cache_release(page);
1664 } 1683 }
1665 obj->dirty = 0; 1684 obj->dirty = 0;
1666 1685
1667 drm_free_large(obj->pages); 1686 sg_free_table(obj->pages);
1668 obj->pages = NULL; 1687 kfree(obj->pages);
1669} 1688}
1670 1689
1671static int 1690static int
@@ -1682,6 +1701,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1682 return -EBUSY; 1701 return -EBUSY;
1683 1702
1684 ops->put_pages(obj); 1703 ops->put_pages(obj);
1704 obj->pages = NULL;
1685 1705
1686 list_del(&obj->gtt_list); 1706 list_del(&obj->gtt_list);
1687 if (i915_gem_object_is_purgeable(obj)) 1707 if (i915_gem_object_is_purgeable(obj))
@@ -1739,6 +1759,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1739 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1759 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1740 int page_count, i; 1760 int page_count, i;
1741 struct address_space *mapping; 1761 struct address_space *mapping;
1762 struct sg_table *st;
1763 struct scatterlist *sg;
1742 struct page *page; 1764 struct page *page;
1743 gfp_t gfp; 1765 gfp_t gfp;
1744 1766
@@ -1749,20 +1771,27 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1749 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 1771 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1750 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 1772 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1751 1773
1752 /* Get the list of pages out of our struct file. They'll be pinned 1774 st = kmalloc(sizeof(*st), GFP_KERNEL);
1753 * at this point until we release them. 1775 if (st == NULL)
1754 */ 1776 return -ENOMEM;
1777
1755 page_count = obj->base.size / PAGE_SIZE; 1778 page_count = obj->base.size / PAGE_SIZE;
1756 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); 1779 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1757 if (obj->pages == NULL) 1780 sg_free_table(st);
1781 kfree(st);
1758 return -ENOMEM; 1782 return -ENOMEM;
1783 }
1759 1784
1760 /* Fail silently without starting the shrinker */ 1785 /* Get the list of pages out of our struct file. They'll be pinned
1786 * at this point until we release them.
1787 *
1788 * Fail silently without starting the shrinker
1789 */
1761 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 1790 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
1762 gfp = mapping_gfp_mask(mapping); 1791 gfp = mapping_gfp_mask(mapping);
1763 gfp |= __GFP_NORETRY | __GFP_NOWARN; 1792 gfp |= __GFP_NORETRY | __GFP_NOWARN;
1764 gfp &= ~(__GFP_IO | __GFP_WAIT); 1793 gfp &= ~(__GFP_IO | __GFP_WAIT);
1765 for (i = 0; i < page_count; i++) { 1794 for_each_sg(st->sgl, sg, page_count, i) {
1766 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 1795 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
1767 if (IS_ERR(page)) { 1796 if (IS_ERR(page)) {
1768 i915_gem_purge(dev_priv, page_count); 1797 i915_gem_purge(dev_priv, page_count);
@@ -1785,20 +1814,20 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1785 gfp &= ~(__GFP_IO | __GFP_WAIT); 1814 gfp &= ~(__GFP_IO | __GFP_WAIT);
1786 } 1815 }
1787 1816
1788 obj->pages[i] = page; 1817 sg_set_page(sg, page, PAGE_SIZE, 0);
1789 } 1818 }
1790 1819
1791 if (i915_gem_object_needs_bit17_swizzle(obj)) 1820 if (i915_gem_object_needs_bit17_swizzle(obj))
1792 i915_gem_object_do_bit_17_swizzle(obj); 1821 i915_gem_object_do_bit_17_swizzle(obj);
1793 1822
1823 obj->pages = st;
1794 return 0; 1824 return 0;
1795 1825
1796err_pages: 1826err_pages:
1797 while (i--) 1827 for_each_sg(st->sgl, sg, i, page_count)
1798 page_cache_release(obj->pages[i]); 1828 page_cache_release(sg_page(sg));
1799 1829 sg_free_table(st);
1800 drm_free_large(obj->pages); 1830 kfree(st);
1801 obj->pages = NULL;
1802 return PTR_ERR(page); 1831 return PTR_ERR(page);
1803} 1832}
1804 1833
@@ -2981,7 +3010,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2981 3010
2982 trace_i915_gem_object_clflush(obj); 3011 trace_i915_gem_object_clflush(obj);
2983 3012
2984 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE); 3013 drm_clflush_sg(obj->pages);
2985} 3014}
2986 3015
2987/** Flushes the GTT write domain for the object if it's dirty. */ 3016/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3731,6 +3760,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3731 i915_gem_object_put_pages(obj); 3760 i915_gem_object_put_pages(obj);
3732 i915_gem_object_free_mmap_offset(obj); 3761 i915_gem_object_free_mmap_offset(obj);
3733 3762
3763 BUG_ON(obj->pages);
3764
3734 drm_gem_object_release(&obj->base); 3765 drm_gem_object_release(&obj->base);
3735 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3766 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3736 3767
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index eca4726f414d..4bb1b94df5c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,33 +28,57 @@
28#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29 29
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir) 31 enum dma_data_direction dir)
32{ 32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct drm_device *dev = obj->base.dev; 34 struct sg_table *st;
35 int npages = obj->base.size / PAGE_SIZE; 35 struct scatterlist *src, *dst;
36 struct sg_table *sg; 36 int ret, i;
37 int ret;
38 int nents;
39 37
40 ret = i915_mutex_lock_interruptible(dev); 38 ret = i915_mutex_lock_interruptible(obj->base.dev);
41 if (ret) 39 if (ret)
42 return ERR_PTR(ret); 40 return ERR_PTR(ret);
43 41
44 ret = i915_gem_object_get_pages(obj); 42 ret = i915_gem_object_get_pages(obj);
45 if (ret) { 43 if (ret) {
46 sg = ERR_PTR(ret); 44 st = ERR_PTR(ret);
45 goto out;
46 }
47
48 /* Copy sg so that we make an independent mapping */
49 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
50 if (st == NULL) {
51 st = ERR_PTR(-ENOMEM);
52 goto out;
53 }
54
55 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
56 if (ret) {
57 kfree(st);
58 st = ERR_PTR(ret);
59 goto out;
60 }
61
62 src = obj->pages->sgl;
63 dst = st->sgl;
64 for (i = 0; i < obj->pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
66 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 sg_free_table(st);
72 kfree(st);
73 st = ERR_PTR(-ENOMEM);
47 goto out; 74 goto out;
48 } 75 }
49 76
50 /* link the pages into an SG then map the sg */
51 sg = drm_prime_pages_to_sg(obj->pages, npages);
52 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
53 i915_gem_object_pin_pages(obj); 77 i915_gem_object_pin_pages(obj);
54 78
55out: 79out:
56 mutex_unlock(&dev->struct_mutex); 80 mutex_unlock(&obj->base.dev->struct_mutex);
57 return sg; 81 return st;
58} 82}
59 83
60static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
@@ -80,7 +104,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
80{ 104{
81 struct drm_i915_gem_object *obj = dma_buf->priv; 105 struct drm_i915_gem_object *obj = dma_buf->priv;
82 struct drm_device *dev = obj->base.dev; 106 struct drm_device *dev = obj->base.dev;
83 int ret; 107 struct scatterlist *sg;
108 struct page **pages;
109 int ret, i;
84 110
85 ret = i915_mutex_lock_interruptible(dev); 111 ret = i915_mutex_lock_interruptible(dev);
86 if (ret) 112 if (ret)
@@ -92,22 +118,33 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
92 } 118 }
93 119
94 ret = i915_gem_object_get_pages(obj); 120 ret = i915_gem_object_get_pages(obj);
95 if (ret) { 121 if (ret)
96 mutex_unlock(&dev->struct_mutex); 122 goto error;
97 return ERR_PTR(ret);
98 }
99 123
100 obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); 124 ret = -ENOMEM;
101 if (!obj->dma_buf_vmapping) { 125
102 DRM_ERROR("failed to vmap object\n"); 126 pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
103 goto out_unlock; 127 if (pages == NULL)
104 } 128 goto error;
129
130 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
131 pages[i] = sg_page(sg);
132
133 obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
134 drm_free_large(pages);
135
136 if (!obj->dma_buf_vmapping)
137 goto error;
105 138
106 obj->vmapping_count = 1; 139 obj->vmapping_count = 1;
107 i915_gem_object_pin_pages(obj); 140 i915_gem_object_pin_pages(obj);
108out_unlock: 141out_unlock:
109 mutex_unlock(&dev->struct_mutex); 142 mutex_unlock(&dev->struct_mutex);
110 return obj->dma_buf_vmapping; 143 return obj->dma_buf_vmapping;
144
145error:
146 mutex_unlock(&dev->struct_mutex);
147 return ERR_PTR(ret);
111} 148}
112 149
113static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 150static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -184,22 +221,19 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
184}; 221};
185 222
186struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 223struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
187 struct drm_gem_object *gem_obj, int flags) 224 struct drm_gem_object *gem_obj, int flags)
188{ 225{
189 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 226 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
190 227
191 return dma_buf_export(obj, &i915_dmabuf_ops, 228 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
192 obj->base.size, 0600);
193} 229}
194 230
195struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 231struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
196 struct dma_buf *dma_buf) 232 struct dma_buf *dma_buf)
197{ 233{
198 struct dma_buf_attachment *attach; 234 struct dma_buf_attachment *attach;
199 struct sg_table *sg; 235 struct sg_table *sg;
200 struct drm_i915_gem_object *obj; 236 struct drm_i915_gem_object *obj;
201 int npages;
202 int size;
203 int ret; 237 int ret;
204 238
205 /* is this one of own objects? */ 239 /* is this one of own objects? */
@@ -223,21 +257,19 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
223 goto fail_detach; 257 goto fail_detach;
224 } 258 }
225 259
226 size = dma_buf->size;
227 npages = size / PAGE_SIZE;
228
229 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 260 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
230 if (obj == NULL) { 261 if (obj == NULL) {
231 ret = -ENOMEM; 262 ret = -ENOMEM;
232 goto fail_unmap; 263 goto fail_unmap;
233 } 264 }
234 265
235 ret = drm_gem_private_object_init(dev, &obj->base, size); 266 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
236 if (ret) { 267 if (ret) {
237 kfree(obj); 268 kfree(obj);
238 goto fail_unmap; 269 goto fail_unmap;
239 } 270 }
240 271
272 obj->has_dma_mapping = true;
241 obj->sg_table = sg; 273 obj->sg_table = sg;
242 obj->base.import_attach = attach; 274 obj->base.import_attach = attach;
243 275
@@ -249,3 +281,4 @@ fail_detach:
249 dma_buf_detach(dma_buf, attach); 281 dma_buf_detach(dma_buf, attach);
250 return ERR_PTR(ret); 282 return ERR_PTR(ret);
251} 283}
284
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e6b2205ecf6d..4ab008397d60 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -210,7 +210,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
210 if (ret) 210 if (ret)
211 return ret; 211 return ret;
212 212
213 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); 213 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
214 reloc->offset >> PAGE_SHIFT));
214 *(uint32_t *)(vaddr + page_offset) = reloc->delta; 215 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
215 kunmap_atomic(vaddr); 216 kunmap_atomic(vaddr);
216 } else { 217 } else {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 18477314d85d..e0c9bddb7d92 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -167,8 +167,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
167} 167}
168 168
169static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, 169static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
170 struct scatterlist *sg_list, 170 const struct sg_table *pages,
171 unsigned sg_len,
172 unsigned first_entry, 171 unsigned first_entry,
173 uint32_t pte_flags) 172 uint32_t pte_flags)
174{ 173{
@@ -180,12 +179,12 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
180 struct scatterlist *sg; 179 struct scatterlist *sg;
181 180
182 /* init sg walking */ 181 /* init sg walking */
183 sg = sg_list; 182 sg = pages->sgl;
184 i = 0; 183 i = 0;
185 segment_len = sg_dma_len(sg) >> PAGE_SHIFT; 184 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
186 m = 0; 185 m = 0;
187 186
188 while (i < sg_len) { 187 while (i < pages->nents) {
189 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]); 188 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
190 189
191 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { 190 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
@@ -194,13 +193,11 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
194 pt_vaddr[j] = pte | pte_flags; 193 pt_vaddr[j] = pte | pte_flags;
195 194
196 /* grab the next page */ 195 /* grab the next page */
197 m++; 196 if (++m == segment_len) {
198 if (m == segment_len) { 197 if (++i == pages->nents)
199 sg = sg_next(sg);
200 i++;
201 if (i == sg_len)
202 break; 198 break;
203 199
200 sg = sg_next(sg);
204 segment_len = sg_dma_len(sg) >> PAGE_SHIFT; 201 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
205 m = 0; 202 m = 0;
206 } 203 }
@@ -213,44 +210,10 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
213 } 210 }
214} 211}
215 212
216static void i915_ppgtt_insert_pages(struct i915_hw_ppgtt *ppgtt,
217 unsigned first_entry, unsigned num_entries,
218 struct page **pages, uint32_t pte_flags)
219{
220 uint32_t *pt_vaddr, pte;
221 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
222 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
223 unsigned last_pte, i;
224 dma_addr_t page_addr;
225
226 while (num_entries) {
227 last_pte = first_pte + num_entries;
228 last_pte = min_t(unsigned, last_pte, I915_PPGTT_PT_ENTRIES);
229
230 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
231
232 for (i = first_pte; i < last_pte; i++) {
233 page_addr = page_to_phys(*pages);
234 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
235 pt_vaddr[i] = pte | pte_flags;
236
237 pages++;
238 }
239
240 kunmap_atomic(pt_vaddr);
241
242 num_entries -= last_pte - first_pte;
243 first_pte = 0;
244 act_pd++;
245 }
246}
247
248void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 213void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
249 struct drm_i915_gem_object *obj, 214 struct drm_i915_gem_object *obj,
250 enum i915_cache_level cache_level) 215 enum i915_cache_level cache_level)
251{ 216{
252 struct drm_device *dev = obj->base.dev;
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 uint32_t pte_flags = GEN6_PTE_VALID; 217 uint32_t pte_flags = GEN6_PTE_VALID;
255 218
256 switch (cache_level) { 219 switch (cache_level) {
@@ -261,7 +224,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
261 pte_flags |= GEN6_PTE_CACHE_LLC; 224 pte_flags |= GEN6_PTE_CACHE_LLC;
262 break; 225 break;
263 case I915_CACHE_NONE: 226 case I915_CACHE_NONE:
264 if (IS_HASWELL(dev)) 227 if (IS_HASWELL(obj->base.dev))
265 pte_flags |= HSW_PTE_UNCACHED; 228 pte_flags |= HSW_PTE_UNCACHED;
266 else 229 else
267 pte_flags |= GEN6_PTE_UNCACHED; 230 pte_flags |= GEN6_PTE_UNCACHED;
@@ -270,26 +233,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
270 BUG(); 233 BUG();
271 } 234 }
272 235
273 if (obj->sg_table) { 236 i915_ppgtt_insert_sg_entries(ppgtt,
274 i915_ppgtt_insert_sg_entries(ppgtt, 237 obj->sg_table ?: obj->pages,
275 obj->sg_table->sgl, 238 obj->gtt_space->start >> PAGE_SHIFT,
276 obj->sg_table->nents, 239 pte_flags);
277 obj->gtt_space->start >> PAGE_SHIFT,
278 pte_flags);
279 } else if (dev_priv->mm.gtt->needs_dmar) {
280 BUG_ON(!obj->sg_list);
281
282 i915_ppgtt_insert_sg_entries(ppgtt,
283 obj->sg_list,
284 obj->num_sg,
285 obj->gtt_space->start >> PAGE_SHIFT,
286 pte_flags);
287 } else
288 i915_ppgtt_insert_pages(ppgtt,
289 obj->gtt_space->start >> PAGE_SHIFT,
290 obj->base.size >> PAGE_SHIFT,
291 obj->pages,
292 pte_flags);
293} 240}
294 241
295void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 242void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -361,44 +308,26 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
361 308
362int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 309int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
363{ 310{
364 struct drm_device *dev = obj->base.dev; 311 if (obj->has_dma_mapping)
365 struct drm_i915_private *dev_priv = dev->dev_private;
366
367 /* don't map imported dma buf objects */
368 if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table)
369 return intel_gtt_map_memory(obj->pages,
370 obj->base.size >> PAGE_SHIFT,
371 &obj->sg_list,
372 &obj->num_sg);
373 else
374 return 0; 312 return 0;
313
314 if (!dma_map_sg(&obj->base.dev->pdev->dev,
315 obj->pages->sgl, obj->pages->nents,
316 PCI_DMA_BIDIRECTIONAL))
317 return -ENOSPC;
318
319 return 0;
375} 320}
376 321
377void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 322void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
378 enum i915_cache_level cache_level) 323 enum i915_cache_level cache_level)
379{ 324{
380 struct drm_device *dev = obj->base.dev; 325 struct drm_device *dev = obj->base.dev;
381 struct drm_i915_private *dev_priv = dev->dev_private;
382 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 326 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
383 327
384 if (obj->sg_table) { 328 intel_gtt_insert_sg_entries(obj->sg_table ?: obj->pages,
385 intel_gtt_insert_sg_entries(obj->sg_table->sgl, 329 obj->gtt_space->start >> PAGE_SHIFT,
386 obj->sg_table->nents, 330 agp_type);
387 obj->gtt_space->start >> PAGE_SHIFT,
388 agp_type);
389 } else if (dev_priv->mm.gtt->needs_dmar) {
390 BUG_ON(!obj->sg_list);
391
392 intel_gtt_insert_sg_entries(obj->sg_list,
393 obj->num_sg,
394 obj->gtt_space->start >> PAGE_SHIFT,
395 agp_type);
396 } else
397 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
398 obj->base.size >> PAGE_SHIFT,
399 obj->pages,
400 agp_type);
401
402 obj->has_global_gtt_mapping = 1; 331 obj->has_global_gtt_mapping = 1;
403} 332}
404 333
@@ -418,10 +347,10 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
418 347
419 interruptible = do_idling(dev_priv); 348 interruptible = do_idling(dev_priv);
420 349
421 if (obj->sg_list) { 350 if (!obj->has_dma_mapping)
422 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); 351 dma_unmap_sg(&dev->pdev->dev,
423 obj->sg_list = NULL; 352 obj->pages->sgl, obj->pages->nents,
424 } 353 PCI_DMA_BIDIRECTIONAL);
425 354
426 undo_idling(dev_priv, interruptible); 355 undo_idling(dev_priv, interruptible);
427} 356}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b964df51cec7..8093ecd2ea31 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -470,18 +470,20 @@ i915_gem_swizzle_page(struct page *page)
470void 470void
471i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 471i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
472{ 472{
473 struct scatterlist *sg;
473 int page_count = obj->base.size >> PAGE_SHIFT; 474 int page_count = obj->base.size >> PAGE_SHIFT;
474 int i; 475 int i;
475 476
476 if (obj->bit_17 == NULL) 477 if (obj->bit_17 == NULL)
477 return; 478 return;
478 479
479 for (i = 0; i < page_count; i++) { 480 for_each_sg(obj->pages->sgl, sg, page_count, i) {
480 char new_bit_17 = page_to_phys(obj->pages[i]) >> 17; 481 struct page *page = sg_page(sg);
482 char new_bit_17 = page_to_phys(page) >> 17;
481 if ((new_bit_17 & 0x1) != 483 if ((new_bit_17 & 0x1) !=
482 (test_bit(i, obj->bit_17) != 0)) { 484 (test_bit(i, obj->bit_17) != 0)) {
483 i915_gem_swizzle_page(obj->pages[i]); 485 i915_gem_swizzle_page(page);
484 set_page_dirty(obj->pages[i]); 486 set_page_dirty(page);
485 } 487 }
486 } 488 }
487} 489}
@@ -489,6 +491,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
489void 491void
490i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 492i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
491{ 493{
494 struct scatterlist *sg;
492 int page_count = obj->base.size >> PAGE_SHIFT; 495 int page_count = obj->base.size >> PAGE_SHIFT;
493 int i; 496 int i;
494 497
@@ -502,8 +505,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
502 } 505 }
503 } 506 }
504 507
505 for (i = 0; i < page_count; i++) { 508 for_each_sg(obj->pages->sgl, sg, page_count, i) {
506 if (page_to_phys(obj->pages[i]) & (1 << 17)) 509 struct page *page = sg_page(sg);
510 if (page_to_phys(page) & (1 << 17))
507 __set_bit(i, obj->bit_17); 511 __set_bit(i, obj->bit_17);
508 else 512 else
509 __clear_bit(i, obj->bit_17); 513 __clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d6010135e404..dd49046bccd1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -888,20 +888,20 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
888 struct drm_i915_gem_object *src) 888 struct drm_i915_gem_object *src)
889{ 889{
890 struct drm_i915_error_object *dst; 890 struct drm_i915_error_object *dst;
891 int page, page_count; 891 int i, count;
892 u32 reloc_offset; 892 u32 reloc_offset;
893 893
894 if (src == NULL || src->pages == NULL) 894 if (src == NULL || src->pages == NULL)
895 return NULL; 895 return NULL;
896 896
897 page_count = src->base.size / PAGE_SIZE; 897 count = src->base.size / PAGE_SIZE;
898 898
899 dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC); 899 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
900 if (dst == NULL) 900 if (dst == NULL)
901 return NULL; 901 return NULL;
902 902
903 reloc_offset = src->gtt_offset; 903 reloc_offset = src->gtt_offset;
904 for (page = 0; page < page_count; page++) { 904 for (i = 0; i < count; i++) {
905 unsigned long flags; 905 unsigned long flags;
906 void *d; 906 void *d;
907 907
@@ -924,30 +924,33 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
924 memcpy_fromio(d, s, PAGE_SIZE); 924 memcpy_fromio(d, s, PAGE_SIZE);
925 io_mapping_unmap_atomic(s); 925 io_mapping_unmap_atomic(s);
926 } else { 926 } else {
927 struct page *page;
927 void *s; 928 void *s;
928 929
929 drm_clflush_pages(&src->pages[page], 1); 930 page = i915_gem_object_get_page(src, i);
931
932 drm_clflush_pages(&page, 1);
930 933
931 s = kmap_atomic(src->pages[page]); 934 s = kmap_atomic(page);
932 memcpy(d, s, PAGE_SIZE); 935 memcpy(d, s, PAGE_SIZE);
933 kunmap_atomic(s); 936 kunmap_atomic(s);
934 937
935 drm_clflush_pages(&src->pages[page], 1); 938 drm_clflush_pages(&page, 1);
936 } 939 }
937 local_irq_restore(flags); 940 local_irq_restore(flags);
938 941
939 dst->pages[page] = d; 942 dst->pages[i] = d;
940 943
941 reloc_offset += PAGE_SIZE; 944 reloc_offset += PAGE_SIZE;
942 } 945 }
943 dst->page_count = page_count; 946 dst->page_count = count;
944 dst->gtt_offset = src->gtt_offset; 947 dst->gtt_offset = src->gtt_offset;
945 948
946 return dst; 949 return dst;
947 950
948unwind: 951unwind:
949 while (page--) 952 while (i--)
950 kfree(dst->pages[page]); 953 kfree(dst->pages[i]);
951 kfree(dst); 954 kfree(dst);
952 return NULL; 955 return NULL;
953} 956}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 55cdb4d30a16..984a0c5fbf5d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -464,7 +464,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
464 goto err_unref; 464 goto err_unref;
465 465
466 pc->gtt_offset = obj->gtt_offset; 466 pc->gtt_offset = obj->gtt_offset;
467 pc->cpu_page = kmap(obj->pages[0]); 467 pc->cpu_page = kmap(sg_page(obj->pages->sgl));
468 if (pc->cpu_page == NULL) 468 if (pc->cpu_page == NULL)
469 goto err_unpin; 469 goto err_unpin;
470 470
@@ -491,7 +491,8 @@ cleanup_pipe_control(struct intel_ring_buffer *ring)
491 return; 491 return;
492 492
493 obj = pc->obj; 493 obj = pc->obj;
494 kunmap(obj->pages[0]); 494
495 kunmap(sg_page(obj->pages->sgl));
495 i915_gem_object_unpin(obj); 496 i915_gem_object_unpin(obj);
496 drm_gem_object_unreference(&obj->base); 497 drm_gem_object_unreference(&obj->base);
497 498
@@ -1026,7 +1027,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
1026 if (obj == NULL) 1027 if (obj == NULL)
1027 return; 1028 return;
1028 1029
1029 kunmap(obj->pages[0]); 1030 kunmap(sg_page(obj->pages->sgl));
1030 i915_gem_object_unpin(obj); 1031 i915_gem_object_unpin(obj);
1031 drm_gem_object_unreference(&obj->base); 1032 drm_gem_object_unreference(&obj->base);
1032 ring->status_page.obj = NULL; 1033 ring->status_page.obj = NULL;
@@ -1053,7 +1054,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
1053 } 1054 }
1054 1055
1055 ring->status_page.gfx_addr = obj->gtt_offset; 1056 ring->status_page.gfx_addr = obj->gtt_offset;
1056 ring->status_page.page_addr = kmap(obj->pages[0]); 1057 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
1057 if (ring->status_page.page_addr == NULL) { 1058 if (ring->status_page.page_addr == NULL) {
1058 ret = -ENOMEM; 1059 ret = -ENOMEM;
1059 goto err_unpin; 1060 goto err_unpin;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index d6b67bb9075f..d5f0c163eef1 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1367,6 +1367,7 @@ extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
1367 1367
1368/* Cache management (drm_cache.c) */ 1368/* Cache management (drm_cache.c) */
1369void drm_clflush_pages(struct page *pages[], unsigned long num_pages); 1369void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
1370void drm_clflush_sg(struct sg_table *st);
1370void drm_clflush_virt_range(char *addr, unsigned long length); 1371void drm_clflush_virt_range(char *addr, unsigned long length);
1371 1372
1372 /* Locking IOCTL support (drm_lock.h) */ 1373 /* Locking IOCTL support (drm_lock.h) */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 8e29d551bb3c..2e37e9f02e71 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -30,16 +30,10 @@ void intel_gmch_remove(void);
30bool intel_enable_gtt(void); 30bool intel_enable_gtt(void);
31 31
32void intel_gtt_chipset_flush(void); 32void intel_gtt_chipset_flush(void);
33void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg); 33void intel_gtt_insert_sg_entries(struct sg_table *st,
34void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
35int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
36 struct scatterlist **sg_list, int *num_sg);
37void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
38 unsigned int sg_len,
39 unsigned int pg_start, 34 unsigned int pg_start,
40 unsigned int flags); 35 unsigned int flags);
41void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries, 36void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
42 struct page **pages, unsigned int flags);
43 37
44/* Special gtt memory types */ 38/* Special gtt memory types */
45#define AGP_DCACHE_MEMORY 1 39#define AGP_DCACHE_MEMORY 1