aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2009-08-19 23:38:04 -0400
committerDave Airlie <airlied@redhat.com>2009-08-19 23:38:04 -0400
commit51c8b4071d84d46cc100baa5931ad06b2a823c95 (patch)
tree098cf9d41ce1c548d922708a770a9efe35e434df /drivers/gpu/drm/ttm
parenta987fcaa805fcb24ba885c2e29fd4fdb6816f08f (diff)
parent6c30c53fd5ae6a99a23ad78e90c428d2c8ffb07f (diff)
Merge Linus master to drm-next
linux-next conflict reported needed resolution. Conflicts: drivers/gpu/drm/drm_crtc.c drivers/gpu/drm/drm_edid.c drivers/gpu/drm/i915/intel_sdvo.c drivers/gpu/drm/radeon/radeon_ttm.c drivers/gpu/drm/ttm/ttm_bo.c
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c70
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c69
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c25
4 files changed, 135 insertions, 36 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0d0b1b7afbc..fa87ccbcc6c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -43,7 +43,6 @@
43#define TTM_BO_HASH_ORDER 13 43#define TTM_BO_HASH_ORDER 13
44 44
45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); 45static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
46static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
47static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); 46static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48static void ttm_bo_global_kobj_release(struct kobject *kobj); 47static void ttm_bo_global_kobj_release(struct kobject *kobj);
49 48
@@ -259,6 +258,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
259 TTM_ASSERT_LOCKED(&bo->mutex); 258 TTM_ASSERT_LOCKED(&bo->mutex);
260 bo->ttm = NULL; 259 bo->ttm = NULL;
261 260
261 if (bdev->need_dma32)
262 page_flags |= TTM_PAGE_FLAG_DMA32;
263
262 switch (bo->type) { 264 switch (bo->type) {
263 case ttm_bo_type_device: 265 case ttm_bo_type_device:
264 if (zero_alloc) 266 if (zero_alloc)
@@ -339,6 +341,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
339 341
340 } 342 }
341 343
344 if (bdev->driver->move_notify)
345 bdev->driver->move_notify(bo, mem);
346
342 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 347 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
343 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 348 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
344 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); 349 ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
@@ -694,31 +699,52 @@ retry_pre_get:
694 return 0; 699 return 0;
695} 700}
696 701
702static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
703 uint32_t cur_placement,
704 uint32_t proposed_placement)
705{
706 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
707 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
708
709 /**
710 * Keep current caching if possible.
711 */
712
713 if ((cur_placement & caching) != 0)
714 result |= (cur_placement & caching);
715 else if ((man->default_caching & caching) != 0)
716 result |= man->default_caching;
717 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
718 result |= TTM_PL_FLAG_CACHED;
719 else if ((TTM_PL_FLAG_WC & caching) != 0)
720 result |= TTM_PL_FLAG_WC;
721 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
722 result |= TTM_PL_FLAG_UNCACHED;
723
724 return result;
725}
726
727
697static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 728static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
698 bool disallow_fixed, 729 bool disallow_fixed,
699 uint32_t mem_type, 730 uint32_t mem_type,
700 uint32_t mask, uint32_t *res_mask) 731 uint32_t proposed_placement,
732 uint32_t *masked_placement)
701{ 733{
702 uint32_t cur_flags = ttm_bo_type_flags(mem_type); 734 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
703 735
704 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) 736 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
705 return false; 737 return false;
706 738
707 if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0) 739 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
708 return false; 740 return false;
709 741
710 if ((mask & man->available_caching) == 0) 742 if ((proposed_placement & man->available_caching) == 0)
711 return false; 743 return false;
712 if (mask & man->default_caching)
713 cur_flags |= man->default_caching;
714 else if (mask & TTM_PL_FLAG_CACHED)
715 cur_flags |= TTM_PL_FLAG_CACHED;
716 else if (mask & TTM_PL_FLAG_WC)
717 cur_flags |= TTM_PL_FLAG_WC;
718 else
719 cur_flags |= TTM_PL_FLAG_UNCACHED;
720 744
721 *res_mask = cur_flags; 745 cur_flags |= (proposed_placement & man->available_caching);
746
747 *masked_placement = cur_flags;
722 return true; 748 return true;
723} 749}
724 750
@@ -763,6 +789,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
763 if (!type_ok) 789 if (!type_ok)
764 continue; 790 continue;
765 791
792 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
793 cur_flags);
794
766 if (mem_type == TTM_PL_SYSTEM) 795 if (mem_type == TTM_PL_SYSTEM)
767 break; 796 break;
768 797
@@ -819,6 +848,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
819 proposed_placement, &cur_flags)) 848 proposed_placement, &cur_flags))
820 continue; 849 continue;
821 850
851 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
852 cur_flags);
853
822 ret = ttm_bo_mem_force_space(bdev, mem, mem_type, 854 ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
823 interruptible, no_wait); 855 interruptible, no_wait);
824 856
@@ -1194,13 +1226,14 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1194int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) 1226int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1195{ 1227{
1196 struct ttm_bo_global *glob = bdev->glob; 1228 struct ttm_bo_global *glob = bdev->glob;
1197 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1229 struct ttm_mem_type_manager *man;
1198 int ret = -EINVAL; 1230 int ret = -EINVAL;
1199 1231
1200 if (mem_type >= TTM_NUM_MEM_TYPES) { 1232 if (mem_type >= TTM_NUM_MEM_TYPES) {
1201 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); 1233 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1202 return ret; 1234 return ret;
1203 } 1235 }
1236 man = &bdev->man[mem_type];
1204 1237
1205 if (!man->has_type) { 1238 if (!man->has_type) {
1206 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " 1239 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
@@ -1417,7 +1450,8 @@ EXPORT_SYMBOL(ttm_bo_device_release);
1417int ttm_bo_device_init(struct ttm_bo_device *bdev, 1450int ttm_bo_device_init(struct ttm_bo_device *bdev,
1418 struct ttm_bo_global *glob, 1451 struct ttm_bo_global *glob,
1419 struct ttm_bo_driver *driver, 1452 struct ttm_bo_driver *driver,
1420 uint64_t file_page_offset) 1453 uint64_t file_page_offset,
1454 bool need_dma32)
1421{ 1455{
1422 int ret = -EINVAL; 1456 int ret = -EINVAL;
1423 1457
@@ -1446,6 +1480,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1446 INIT_LIST_HEAD(&bdev->ddestroy); 1480 INIT_LIST_HEAD(&bdev->ddestroy);
1447 bdev->dev_mapping = NULL; 1481 bdev->dev_mapping = NULL;
1448 bdev->glob = glob; 1482 bdev->glob = glob;
1483 bdev->need_dma32 = need_dma32;
1449 1484
1450 mutex_lock(&glob->device_list_mutex); 1485 mutex_lock(&glob->device_list_mutex);
1451 list_add_tail(&bdev->device_list, &glob->device_list); 1486 list_add_tail(&bdev->device_list, &glob->device_list);
@@ -1511,6 +1546,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1511 1546
1512 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); 1547 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1513} 1548}
1549EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1514 1550
1515static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) 1551static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1516{ 1552{
@@ -1632,6 +1668,10 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1632 driver->sync_obj_unref(&sync_obj); 1668 driver->sync_obj_unref(&sync_obj);
1633 driver->sync_obj_unref(&tmp_obj); 1669 driver->sync_obj_unref(&tmp_obj);
1634 spin_lock(&bo->lock); 1670 spin_lock(&bo->lock);
1671 } else {
1672 spin_unlock(&bo->lock);
1673 driver->sync_obj_unref(&sync_obj);
1674 spin_lock(&bo->lock);
1635 } 1675 }
1636 } 1676 }
1637 return 0; 1677 return 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 12cd47aa18c..c70927ecda2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
136} 136}
137 137
138static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 138static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
139 unsigned long page) 139 unsigned long page,
140 pgprot_t prot)
140{ 141{
141 struct page *d = ttm_tt_get_page(ttm, page); 142 struct page *d = ttm_tt_get_page(ttm, page);
142 void *dst; 143 void *dst;
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
145 return -ENOMEM; 146 return -ENOMEM;
146 147
147 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 148 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
148 dst = kmap(d); 149
150#ifdef CONFIG_X86
151 dst = kmap_atomic_prot(d, KM_USER0, prot);
152#else
153 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
154 dst = vmap(&d, 1, 0, prot);
155 else
156 dst = kmap(d);
157#endif
149 if (!dst) 158 if (!dst)
150 return -ENOMEM; 159 return -ENOMEM;
151 160
152 memcpy_fromio(dst, src, PAGE_SIZE); 161 memcpy_fromio(dst, src, PAGE_SIZE);
153 kunmap(d); 162
163#ifdef CONFIG_X86
164 kunmap_atomic(dst, KM_USER0);
165#else
166 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
167 vunmap(dst);
168 else
169 kunmap(d);
170#endif
171
154 return 0; 172 return 0;
155} 173}
156 174
157static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 175static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
158 unsigned long page) 176 unsigned long page,
177 pgprot_t prot)
159{ 178{
160 struct page *s = ttm_tt_get_page(ttm, page); 179 struct page *s = ttm_tt_get_page(ttm, page);
161 void *src; 180 void *src;
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
164 return -ENOMEM; 183 return -ENOMEM;
165 184
166 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 185 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
167 src = kmap(s); 186#ifdef CONFIG_X86
187 src = kmap_atomic_prot(s, KM_USER0, prot);
188#else
189 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
190 src = vmap(&s, 1, 0, prot);
191 else
192 src = kmap(s);
193#endif
168 if (!src) 194 if (!src)
169 return -ENOMEM; 195 return -ENOMEM;
170 196
171 memcpy_toio(dst, src, PAGE_SIZE); 197 memcpy_toio(dst, src, PAGE_SIZE);
172 kunmap(s); 198
199#ifdef CONFIG_X86
200 kunmap_atomic(src, KM_USER0);
201#else
202 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
203 vunmap(src);
204 else
205 kunmap(s);
206#endif
207
173 return 0; 208 return 0;
174} 209}
175 210
@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
214 249
215 for (i = 0; i < new_mem->num_pages; ++i) { 250 for (i = 0; i < new_mem->num_pages; ++i) {
216 page = i * dir + add; 251 page = i * dir + add;
217 if (old_iomap == NULL) 252 if (old_iomap == NULL) {
218 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page); 253 pgprot_t prot = ttm_io_prot(old_mem->placement,
219 else if (new_iomap == NULL) 254 PAGE_KERNEL);
220 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page); 255 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
221 else 256 prot);
257 } else if (new_iomap == NULL) {
258 pgprot_t prot = ttm_io_prot(new_mem->placement,
259 PAGE_KERNEL);
260 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
261 prot);
262 } else
222 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 263 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
223 if (ret) 264 if (ret)
224 goto out1; 265 goto out1;
@@ -509,8 +550,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
509 if (evict) { 550 if (evict) {
510 ret = ttm_bo_wait(bo, false, false, false); 551 ret = ttm_bo_wait(bo, false, false, false);
511 spin_unlock(&bo->lock); 552 spin_unlock(&bo->lock);
512 driver->sync_obj_unref(&bo->sync_obj); 553 if (tmp_obj)
513 554 driver->sync_obj_unref(&tmp_obj);
514 if (ret) 555 if (ret)
515 return ret; 556 return ret;
516 557
@@ -532,6 +573,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
532 573
533 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 574 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
534 spin_unlock(&bo->lock); 575 spin_unlock(&bo->lock);
576 if (tmp_obj)
577 driver->sync_obj_unref(&tmp_obj);
535 578
536 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 579 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
537 if (ret) 580 if (ret)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 40b75032ea4..33de7637c0c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -101,6 +101,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
101 return VM_FAULT_NOPAGE; 101 return VM_FAULT_NOPAGE;
102 } 102 }
103 103
104 if (bdev->driver->fault_reserve_notify)
105 bdev->driver->fault_reserve_notify(bo);
106
104 /* 107 /*
105 * Wait for buffer data in transit, due to a pipelined 108 * Wait for buffer data in transit, due to a pipelined
106 * move. 109 * move.
@@ -327,7 +330,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
327 goto out_unref; 330 goto out_unref;
328 331
329 kmap_offset = dev_offset - bo->vm_node->start; 332 kmap_offset = dev_offset - bo->vm_node->start;
330 if (unlikely(kmap_offset) >= bo->num_pages) { 333 if (unlikely(kmap_offset >= bo->num_pages)) {
331 ret = -EFBIG; 334 ret = -EFBIG;
332 goto out_unref; 335 goto out_unref;
333 } 336 }
@@ -401,7 +404,7 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
401 bool dummy; 404 bool dummy;
402 405
403 kmap_offset = (*f_pos >> PAGE_SHIFT); 406 kmap_offset = (*f_pos >> PAGE_SHIFT);
404 if (unlikely(kmap_offset) >= bo->num_pages) 407 if (unlikely(kmap_offset >= bo->num_pages))
405 return -EFBIG; 408 return -EFBIG;
406 409
407 page_offset = *f_pos & ~PAGE_MASK; 410 page_offset = *f_pos & ~PAGE_MASK;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index b0f73096d37..42cca551976 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -86,10 +86,16 @@ void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages)
86 unsigned long i; 86 unsigned long i;
87 87
88 for (i = 0; i < num_pages; ++i) { 88 for (i = 0; i < num_pages; ++i) {
89 if (pages[i]) { 89 struct page *page = pages[i];
90 unsigned long start = (unsigned long)page_address(pages[i]); 90 void *page_virtual;
91 flush_dcache_range(start, start + PAGE_SIZE); 91
92 } 92 if (unlikely(page == NULL))
93 continue;
94
95 page_virtual = kmap_atomic(page, KM_USER0);
96 flush_dcache_range((unsigned long) page_virtual,
97 (unsigned long) page_virtual + PAGE_SIZE);
98 kunmap_atomic(page_virtual, KM_USER0);
93 } 99 }
94#else 100#else
95 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0) 101 if (on_each_cpu(ttm_tt_ipi_handler, NULL, 1) != 0)
@@ -131,10 +137,17 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
131 137
132static struct page *ttm_tt_alloc_page(unsigned page_flags) 138static struct page *ttm_tt_alloc_page(unsigned page_flags)
133{ 139{
140 gfp_t gfp_flags = GFP_USER;
141
134 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) 142 if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
135 return alloc_page(GFP_HIGHUSER | __GFP_ZERO); 143 gfp_flags |= __GFP_ZERO;
144
145 if (page_flags & TTM_PAGE_FLAG_DMA32)
146 gfp_flags |= __GFP_DMA32;
147 else
148 gfp_flags |= __GFP_HIGHMEM;
136 149
137 return alloc_page(GFP_HIGHUSER); 150 return alloc_page(gfp_flags);
138} 151}
139 152
140static void ttm_tt_free_user_pages(struct ttm_tt *ttm) 153static void ttm_tt_free_user_pages(struct ttm_tt *ttm)