aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-07-29 15:31:59 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-07-29 15:31:59 -0400
commit84210aeb4a6a77de8a3067b121026bad630cd3da (patch)
tree1a458ba7dba9955108349a49a4e878ed9ea79755 /drivers/gpu/drm/ttm
parent7d4dd028b022ddf8631b4530ed8d7777526f545e (diff)
parented8f0d9e708a1a7c9222e7d0a35d97521e904223 (diff)
Merge branch 'drm-radeon-kms' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-radeon-kms' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (35 commits) drm/radeon: set fb aperture sizes for framebuffer handoff. drm/ttm: fix highuser vs dma32 confusion. drm/radeon: Fix size used for benchmarking BO copies. drm/radeon: Add radeon.test parameter for running BO GPU copy tests. drm/radeon/kms: allow interruptible waits for objects. drm/ttm: powerpc: Fix Highmem cache flushing. x86: Export kmap_atomic_prot() needed for TTM. drm/ttm: Fix ttm in-kernel copying of pages with non-standard caching attributes. drm/ttm: Fix an oops and sync object leak. drm/radeon/kms: vram sizing on certain r100 chips needs workaround. drm/radeon: Pay more attention to object placement requested by userspace. drm/radeon: Fall back to evicting BOs with memcpy if necessary. drm/radeon: Don't unreserve twice on failure to validate. drm/radeon/kms: fix bandwidth computation on avivo hardware drm/radeon/kms: add initial colortiling support. drm/radeon/kms: fix hotspot handling on pre-avivo chips drm/radeon/kms: enable frac fb divs on rs600/rs690/rs740 drm/radeon/kms: add PLL flag to prefer frequencies <= the target freq drm/radeon/kms: block RN50 from using 3D engine. drm/radeon/kms: fix VRAM sizing like DDX does it. ...
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c63
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c69
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c25
4 files changed, 127 insertions, 33 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c1c407f7cca3..6538d4236989 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -43,7 +43,6 @@
43#define TTM_BO_HASH_ORDER 13 43#define TTM_BO_HASH_ORDER 13
44 44
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 46static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 47
49static inline uint32_t ttm_bo_type_flags(unsigned type) 48static inline uint32_t ttm_bo_type_flags(unsigned type)
@@ -224,6 +223,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
224 TTM_ASSERT_LOCKED(&bo->mutex); 223 TTM_ASSERT_LOCKED(&bo->mutex);
225 bo->ttm = NULL; 224 bo->ttm = NULL;
226 225
226 if (bdev->need_dma32)
227 page_flags |= TTM_PAGE_FLAG_DMA32;
228
227 switch (bo->type) { 229 switch (bo->type) {
228 case ttm_bo_type_device: 230 case ttm_bo_type_device:
229 if (zero_alloc) 231 if (zero_alloc)
@@ -304,6 +306,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
304 306
305 } 307 }
306 308
309 if (bdev->driver->move_notify)
310 bdev->driver->move_notify(bo, mem);
311
307 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 312 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
308 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 313 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
309 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 314 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
@@ -655,31 +660,52 @@ retry_pre_get:
655 return 0; 660 return 0;
656} 661}
657 662
663static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
664 uint32_t cur_placement,
665 uint32_t proposed_placement)
666{
667 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
668 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
669
670 /**
671 * Keep current caching if possible.
672 */
673
674 if ((cur_placement & caching) != 0)
675 result |= (cur_placement & caching);
676 else if ((man->default_caching & caching) != 0)
677 result |= man->default_caching;
678 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
679 result |= TTM_PL_FLAG_CACHED;
680 else if ((TTM_PL_FLAG_WC & caching) != 0)
681 result |= TTM_PL_FLAG_WC;
682 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
683 result |= TTM_PL_FLAG_UNCACHED;
684
685 return result;
686}
687
688
658static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 689static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
659 bool disallow_fixed, 690 bool disallow_fixed,
660 uint32_t mem_type, 691 uint32_t mem_type,
661 uint32_t mask, uint32_t *res_mask) 692 uint32_t proposed_placement,
693 uint32_t *masked_placement)
662{ 694{
663 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 695 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
664 696
665 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) 697 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
666 return false; 698 return false;
667 699
668 if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) 700 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
669 return false; 701 return false;
670 702
671 if ((mask & man->available_caching) == 0) 703 if ((proposed_placement & man->available_caching) == 0)
672 return false; 704 return false;
673 if (mask & man->default_caching)
674 cur_flags |= man->default_caching;
675 else if (mask & TTM_PL_FLAG_CACHED)
676 cur_flags |= TTM_PL_FLAG_CACHED;
677 else if (mask & TTM_PL_FLAG_WC)
678 cur_flags |= TTM_PL_FLAG_WC;
679 else
680 cur_flags |= TTM_PL_FLAG_UNCACHED;
681 705
682 *res_mask = cur_flags; 706 cur_flags |= (proposed_placement & man->available_caching);
707
708 *masked_placement = cur_flags;
683 return true; 709 return true;
684} 710}
685 711
@@ -723,6 +749,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
723 if (!type_ok) 749 if (!type_ok)
724 continue; 750 continue;
725 751
752 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
753 cur_flags);
754
726 if (mem_type == TTM_PL_SYSTEM) 755 if (mem_type == TTM_PL_SYSTEM)
727 break; 756 break;
728 757
@@ -779,6 +808,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
779 proposed_placement, &cur_flags)) 808 proposed_placement, &cur_flags))
780 continue; 809 continue;
781 810
811 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
812 cur_flags);
813
782 ret = ttm_bo_mem_force_space(bdev, mem, mem_type, 814 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
783 interruptible, no_wait); 815 interruptible, no_wait);
784 816
@@ -1305,7 +1337,8 @@ EXPORT_SYMBOL(ttm_bo_device_release);
1305 1337
1306int ttm_bo_device_init(struct ttm_bo_device *bdev, 1338int ttm_bo_device_init(struct ttm_bo_device *bdev,
1307 struct ttm_mem_global *mem_glob, 1339 struct ttm_mem_global *mem_glob,
1308 struct ttm_bo_driver *driver, uint64_t file_page_offset) 1340 struct ttm_bo_driver *driver, uint64_t file_page_offset,
1341 bool need_dma32)
1309{ 1342{
1310 int ret = -EINVAL; 1343 int ret = -EINVAL;
1311 1344
@@ -1342,6 +1375,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1342 INIT_LIST_HEAD(&bdev->ddestroy); 1375 INIT_LIST_HEAD(&bdev->ddestroy);
1343 INIT_LIST_HEAD(&bdev->swap_lru); 1376 INIT_LIST_HEAD(&bdev->swap_lru);
1344 bdev->dev_mapping = NULL; 1377 bdev->dev_mapping = NULL;
1378 bdev->need_dma32 = need_dma32;
1345 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); 1379 ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
1346 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); 1380 ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
1347 if (unlikely(ret != 0)) { 1381 if (unlikely(ret != 0)) {
@@ -1419,6 +1453,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1419 1453
1420 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1454 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1421} 1455}
1456EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1422 1457
1423static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1458static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1424{ 1459{
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index bdec583901eb..ce2e6f38ea01 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
136} 136}
137 137
138static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 138static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
139 unsigned long page) 139 unsigned long page,
140 pgprot_t prot)
140{ 141{
141 struct page *d = ttm_tt_get_page(ttm, page); 142 struct page *d = ttm_tt_get_page(ttm, page);
142 void *dst; 143 void *dst;
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
145 return -ENOMEM; 146 return -ENOMEM;
146 147
147 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
148 dst = kmap(d); 149
150#ifdef CONFIG_X86
151 dst = kmap_atomic_prot(d, KM_USER0, prot);
152#else
153 if (prot != PAGE_KERNEL)
154 dst = vmap(&d, 1, 0, prot);
155 else
156 dst = kmap(d);
157#endif
149 if (!dst) 158 if (!dst)
150 return -ENOMEM; 159 return -ENOMEM;
151 160
152 memcpy_fromio(dst, src, PAGE_SIZE); 161 memcpy_fromio(dst, src, PAGE_SIZE);
153 kunmap(d); 162
163#ifdef CONFIG_X86
164 kunmap_atomic(dst, KM_USER0);
165#else
166 if (prot != PAGE_KERNEL)
167 vunmap(dst);
168 else
169 kunmap(d);
170#endif
171
154 return 0; 172 return 0;
155} 173}
156 174
157static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 175static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
158 unsigned long page) 176 unsigned long page,
177 pgprot_t prot)
159{ 178{
160 struct page *s = ttm_tt_get_page(ttm, page); 179 struct page *s = ttm_tt_get_page(ttm, page);
161 void *src; 180 void *src;
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
164 return -ENOMEM; 183 return -ENOMEM;
165 184
166 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 185 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
167 src = kmap(s); 186#ifdef CONFIG_X86
187 src = kmap_atomic_prot(s, KM_USER0, prot);
188#else
189 if (prot != PAGE_KERNEL)
190 src = vmap(&s, 1, 0, prot);
191 else
192 src = kmap(s);
193#endif
168 if (!src) 194 if (!src)
169 return -ENOMEM; 195 return -ENOMEM;
170 196
171 memcpy_toio(dst, src, PAGE_SIZE); 197 memcpy_toio(dst, src, PAGE_SIZE);
172 kunmap(s); 198
199#ifdef CONFIG_X86
200 kunmap_atomic(src, KM_USER0);
201#else
202 if (prot != PAGE_KERNEL)
203 vunmap(src);
204 else
205 kunmap(s);
206#endif
207
173 return 0; 208 return 0;
174} 209}
175 210
@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
214 249
215 for (i = 0; i < new_mem->num_pages; ++i) { 250 for (i = 0; i < new_mem->num_pages; ++i) {
216 page = i * dir + add; 251 page = i * dir + add;
217 if (old_iomap == NULL) 252 if (old_iomap == NULL) {
218 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); 253 pgprot_t prot = ttm_io_prot(old_mem->placement,
219 else if (new_iomap == NULL) 254 PAGE_KERNEL);
220 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); 255 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
221 else 256 prot);
257 } else if (new_iomap == NULL) {
258 pgprot_t prot = ttm_io_prot(new_mem->placement,
259 PAGE_KERNEL);
260 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
261 prot);
262 } else
222 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 263 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
223 if (ret) 264 if (ret)
224 goto out1; 265 goto out1;
@@ -509,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
509 if (evict) { 550 if (evict) {
510 ret = ttm_bo_wait(bo, false, false, false); 551 ret = ttm_bo_wait(bo, false, false, false);
511 spin_unlock(&bo->lock); 552 spin_unlock(&bo->lock);
512 driver->sync_obj_unref(&bo->sync_obj); 553 if (tmp_obj)
513 554 driver->sync_obj_unref(&tmp_obj);
514 if (ret) 555 if (ret)
515 return ret; 556 return ret;
516 557
@@ -532,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
532 573
533 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 574 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
534 spin_unlock(&bo->lock); 575 spin_unlock(&bo->lock);
576 if (tmp_obj)
577 driver->sync_obj_unref(&tmp_obj);
535 578
536 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 579 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
537 if (ret) 580 if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe949a12fe40..33de7637c0c6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -101,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
101 return VM_FAULT_NOPAGE; 101 return VM_FAULT_NOPAGE;
102 } 102 }
103 103
104 if (bdev->driver->fault_reserve_notify)
105 bdev->driver->fault_reserve_notify(bo);
106
104 /* 107 /*
105 * Wait for buffer data in transit, due to a pipelined 108 * Wait for buffer data in transit, due to a pipelined
106 * move. 109 * move.
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 75dc8bd24592..b8b6c4a5f983 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -86,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
86 unsigned long i; 86 unsigned long i;
87 87
88 for (i = 0; i < num_pages; ++i) { 88 for (i = 0; i < num_pages; ++i) {
89 if (pages[i]) { 89 struct page *page = pages[i];
90 unsigned long start = (unsigned long)page_address(pages[i]); 90 void *page_virtual;
91 flush_dcache_range(start, start + PAGE_SIZE); 91
92 } 92 if (unlikely(page == NULL))
93 continue;
94
95 page_virtual = kmap_atomic(page, KM_USER0);
96 flush_dcache_range((unsigned long) page_virtual,
97 (unsigned long) page_virtual + PAGE_SIZE);
98 kunmap_atomic(page_virtual, KM_USER0);
93 } 99 }
94#else 100#else
95 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) 101 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
@@ -131,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
131 137
132static struct page *ttm_tt_alloc_page(unsigned page_flags) 138static struct page *ttm_tt_alloc_page(unsigned page_flags)
133{ 139{
140 gfp_t gfp_flags = GFP_USER;
141
134 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 142 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
135 return alloc_page(GFP_HIGHUSER | __GFP_ZERO); 143 gfp_flags |= __GFP_ZERO;
144
145 if (page_flags & TTM_PAGE_FLAG_DMA32)
146 gfp_flags |= __GFP_DMA32;
147 else
148 gfp_flags |= __GFP_HIGHMEM;
136 149
137 return alloc_page(GFP_HIGHUSER); 150 return alloc_page(gfp_flags);
138} 151}
139 152
140static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 153static void ttm_tt_free_user_pages(struct ttm_tt *ttm)