aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm/ttm_bo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c97
1 files changed, 32 insertions, 65 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a1cb783c7131..148a322d8f5d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
38 30
39#include "ttm/ttm_module.h" 31#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h" 32#include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
45#include <linux/mm.h> 37#include <linux/mm.h>
46#include <linux/file.h> 38#include <linux/file.h>
47#include <linux/module.h> 39#include <linux/module.h>
40#include <asm/atomic.h>
48 41
49#define TTM_ASSERT_LOCKED(param) 42#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...) 43#define TTM_DEBUG(fmt, arg...)
@@ -231,6 +224,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
231 int ret; 224 int ret;
232 225
233 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 226 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
227 /**
228 * Deadlock avoidance for multi-bo reserving.
229 */
234 if (use_sequence && bo->seq_valid && 230 if (use_sequence && bo->seq_valid &&
235 (sequence - bo->val_seq < (1 << 31))) { 231 (sequence - bo->val_seq < (1 << 31))) {
236 return -EAGAIN; 232 return -EAGAIN;
@@ -248,6 +244,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
248 } 244 }
249 245
250 if (use_sequence) { 246 if (use_sequence) {
247 /**
248 * Wake up waiters that may need to recheck for deadlock,
249 * if we decreased the sequence number.
250 */
251 if (unlikely((bo->val_seq - sequence < (1 << 31))
252 || !bo->seq_valid))
253 wake_up_all(&bo->event_queue);
254
251 bo->val_seq = sequence; 255 bo->val_seq = sequence;
252 bo->seq_valid = true; 256 bo->seq_valid = true;
253 } else { 257 } else {
@@ -452,6 +456,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
452 ttm_bo_mem_put(bo, &bo->mem); 456 ttm_bo_mem_put(bo, &bo->mem);
453 457
454 atomic_set(&bo->reserved, 0); 458 atomic_set(&bo->reserved, 0);
459
460 /*
461 * Make processes trying to reserve really pick it up.
462 */
463 smp_mb__after_atomic_dec();
455 wake_up_all(&bo->event_queue); 464 wake_up_all(&bo->event_queue);
456} 465}
457 466
@@ -460,7 +469,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
460 struct ttm_bo_device *bdev = bo->bdev; 469 struct ttm_bo_device *bdev = bo->bdev;
461 struct ttm_bo_global *glob = bo->glob; 470 struct ttm_bo_global *glob = bo->glob;
462 struct ttm_bo_driver *driver; 471 struct ttm_bo_driver *driver;
463 void *sync_obj; 472 void *sync_obj = NULL;
464 void *sync_obj_arg; 473 void *sync_obj_arg;
465 int put_count; 474 int put_count;
466 int ret; 475 int ret;
@@ -495,17 +504,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
495 spin_lock(&glob->lru_lock); 504 spin_lock(&glob->lru_lock);
496 } 505 }
497queue: 506queue:
498 sync_obj = bo->sync_obj;
499 sync_obj_arg = bo->sync_obj_arg;
500 driver = bdev->driver; 507 driver = bdev->driver;
508 if (bo->sync_obj)
509 sync_obj = driver->sync_obj_ref(bo->sync_obj);
510 sync_obj_arg = bo->sync_obj_arg;
501 511
502 kref_get(&bo->list_kref); 512 kref_get(&bo->list_kref);
503 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 513 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
504 spin_unlock(&glob->lru_lock); 514 spin_unlock(&glob->lru_lock);
505 spin_unlock(&bo->lock); 515 spin_unlock(&bo->lock);
506 516
507 if (sync_obj) 517 if (sync_obj) {
508 driver->sync_obj_flush(sync_obj, sync_obj_arg); 518 driver->sync_obj_flush(sync_obj, sync_obj_arg);
519 driver->sync_obj_unref(&sync_obj);
520 }
509 schedule_delayed_work(&bdev->wq, 521 schedule_delayed_work(&bdev->wq,
510 ((HZ / 100) < 1) ? 1 : HZ / 100); 522 ((HZ / 100) < 1) ? 1 : HZ / 100);
511} 523}
@@ -822,7 +834,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
822 bool no_wait_gpu) 834 bool no_wait_gpu)
823{ 835{
824 struct ttm_bo_device *bdev = bo->bdev; 836 struct ttm_bo_device *bdev = bo->bdev;
825 struct ttm_bo_global *glob = bdev->glob;
826 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 837 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
827 int ret; 838 int ret;
828 839
@@ -832,12 +843,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
832 return ret; 843 return ret;
833 if (mem->mm_node) 844 if (mem->mm_node)
834 break; 845 break;
835 spin_lock(&glob->lru_lock);
836 if (list_empty(&man->lru)) {
837 spin_unlock(&glob->lru_lock);
838 break;
839 }
840 spin_unlock(&glob->lru_lock);
841 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 846 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
842 no_wait_reserve, no_wait_gpu); 847 no_wait_reserve, no_wait_gpu);
843 if (unlikely(ret != 0)) 848 if (unlikely(ret != 0))
@@ -1125,35 +1130,9 @@ EXPORT_SYMBOL(ttm_bo_validate);
1125int ttm_bo_check_placement(struct ttm_buffer_object *bo, 1130int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1126 struct ttm_placement *placement) 1131 struct ttm_placement *placement)
1127{ 1132{
1128 int i; 1133 BUG_ON((placement->fpfn || placement->lpfn) &&
1134 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1129 1135
1130 if (placement->fpfn || placement->lpfn) {
1131 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1132 printk(KERN_ERR TTM_PFX "Page number range to small "
1133 "Need %lu pages, range is [%u, %u]\n",
1134 bo->mem.num_pages, placement->fpfn,
1135 placement->lpfn);
1136 return -EINVAL;
1137 }
1138 }
1139 for (i = 0; i < placement->num_placement; i++) {
1140 if (!capable(CAP_SYS_ADMIN)) {
1141 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1142 printk(KERN_ERR TTM_PFX "Need to be root to "
1143 "modify NO_EVICT status.\n");
1144 return -EINVAL;
1145 }
1146 }
1147 }
1148 for (i = 0; i < placement->num_busy_placement; i++) {
1149 if (!capable(CAP_SYS_ADMIN)) {
1150 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1151 printk(KERN_ERR TTM_PFX "Need to be root to "
1152 "modify NO_EVICT status.\n");
1153 return -EINVAL;
1154 }
1155 }
1156 }
1157 return 0; 1136 return 0;
1158} 1137}
1159 1138
@@ -1176,6 +1155,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1176 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1155 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1177 if (num_pages == 0) { 1156 if (num_pages == 0) {
1178 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); 1157 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1158 if (destroy)
1159 (*destroy)(bo);
1160 else
1161 kfree(bo);
1179 return -EINVAL; 1162 return -EINVAL;
1180 } 1163 }
1181 bo->destroy = destroy; 1164 bo->destroy = destroy;
@@ -1369,18 +1352,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1369 int ret = -EINVAL; 1352 int ret = -EINVAL;
1370 struct ttm_mem_type_manager *man; 1353 struct ttm_mem_type_manager *man;
1371 1354
1372 if (type >= TTM_NUM_MEM_TYPES) { 1355 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1373 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1374 return ret;
1375 }
1376
1377 man = &bdev->man[type]; 1356 man = &bdev->man[type];
1378 if (man->has_type) { 1357 BUG_ON(man->has_type);
1379 printk(KERN_ERR TTM_PFX
1380 "Memory manager already initialized for type %d\n",
1381 type);
1382 return ret;
1383 }
1384 1358
1385 ret = bdev->driver->init_mem_type(bdev, type, man); 1359 ret = bdev->driver->init_mem_type(bdev, type, man);
1386 if (ret) 1360 if (ret)
@@ -1389,13 +1363,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1389 1363
1390 ret = 0; 1364 ret = 0;
1391 if (type != TTM_PL_SYSTEM) { 1365 if (type != TTM_PL_SYSTEM) {
1392 if (!p_size) {
1393 printk(KERN_ERR TTM_PFX
1394 "Zero size memory manager type %d\n",
1395 type);
1396 return ret;
1397 }
1398
1399 ret = (*man->func->init)(man, p_size); 1366 ret = (*man->func->init)(man, p_size);
1400 if (ret) 1367 if (ret)
1401 return ret; 1368 return ret;