aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2017-01-10 08:08:28 -0500
committerAlex Deucher <alexander.deucher@amd.com>2017-01-27 12:20:33 -0500
commitcf6c467d67d319e239aec57d7ba31cb9946f29bf (patch)
tree8e6ab6e828cda2485ec80cd05b70777142725ab2
parent2ee7fc92cfd327fe41377f64a7f04ddc30c851e7 (diff)
drm/ttm: add BO priorities for the LRUs
This way the driver can specify a priority for a BO which has the effect that a BO is only evicted when all other BOs with a lower priority are evicted first. Reviewed-by: Sinclair Yeh <syeh@vmware.com> Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Roger.He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c67
-rw-r--r--include/drm/ttm/ttm_bo_api.h2
-rw-r--r--include/drm/ttm/ttm_bo_driver.h6
4 files changed, 52 insertions, 27 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 25568079fda4..d53445074897 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1166,8 +1166,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
1166 struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; 1166 struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i];
1167 1167
1168 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) 1168 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
1169 lru->lru[j] = &adev->mman.bdev.man[j].lru; 1169 lru->lru[j] = &adev->mman.bdev.man[j].lru[0];
1170 lru->swap_lru = &adev->mman.bdev.glob->swap_lru; 1170 lru->swap_lru = &adev->mman.bdev.glob->swap_lru[0];
1171 } 1171 }
1172 1172
1173 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) 1173 for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 89bbcf0300f4..0b287d8947a4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -242,13 +242,13 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
242 242
243struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo) 243struct list_head *ttm_bo_default_lru_tail(struct ttm_buffer_object *bo)
244{ 244{
245 return bo->bdev->man[bo->mem.mem_type].lru.prev; 245 return bo->bdev->man[bo->mem.mem_type].lru[bo->priority].prev;
246} 246}
247EXPORT_SYMBOL(ttm_bo_default_lru_tail); 247EXPORT_SYMBOL(ttm_bo_default_lru_tail);
248 248
249struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo) 249struct list_head *ttm_bo_default_swap_lru_tail(struct ttm_buffer_object *bo)
250{ 250{
251 return bo->glob->swap_lru.prev; 251 return bo->glob->swap_lru[bo->priority].prev;
252} 252}
253EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail); 253EXPORT_SYMBOL(ttm_bo_default_swap_lru_tail);
254 254
@@ -741,20 +741,27 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
741 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 741 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
742 struct ttm_buffer_object *bo; 742 struct ttm_buffer_object *bo;
743 int ret = -EBUSY, put_count; 743 int ret = -EBUSY, put_count;
744 unsigned i;
744 745
745 spin_lock(&glob->lru_lock); 746 spin_lock(&glob->lru_lock);
746 list_for_each_entry(bo, &man->lru, lru) { 747 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
747 ret = __ttm_bo_reserve(bo, false, true, NULL); 748 list_for_each_entry(bo, &man->lru[i], lru) {
748 if (ret) 749 ret = __ttm_bo_reserve(bo, false, true, NULL);
749 continue; 750 if (ret)
751 continue;
750 752
751 if (place && !bdev->driver->eviction_valuable(bo, place)) { 753 if (place && !bdev->driver->eviction_valuable(bo,
752 __ttm_bo_unreserve(bo); 754 place)) {
753 ret = -EBUSY; 755 __ttm_bo_unreserve(bo);
754 continue; 756 ret = -EBUSY;
757 continue;
758 }
759
760 break;
755 } 761 }
756 762
757 break; 763 if (!ret)
764 break;
758 } 765 }
759 766
760 if (ret) { 767 if (ret) {
@@ -1197,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1197 } 1204 }
1198 atomic_inc(&bo->glob->bo_count); 1205 atomic_inc(&bo->glob->bo_count);
1199 drm_vma_node_reset(&bo->vma_node); 1206 drm_vma_node_reset(&bo->vma_node);
1207 bo->priority = 0;
1200 1208
1201 /* 1209 /*
1202 * For ttm_bo_type_device buffers, allocate 1210 * For ttm_bo_type_device buffers, allocate
@@ -1297,18 +1305,21 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1297 struct ttm_bo_global *glob = bdev->glob; 1305 struct ttm_bo_global *glob = bdev->glob;
1298 struct dma_fence *fence; 1306 struct dma_fence *fence;
1299 int ret; 1307 int ret;
1308 unsigned i;
1300 1309
1301 /* 1310 /*
1302 * Can't use standard list traversal since we're unlocking. 1311 * Can't use standard list traversal since we're unlocking.
1303 */ 1312 */
1304 1313
1305 spin_lock(&glob->lru_lock); 1314 spin_lock(&glob->lru_lock);
1306 while (!list_empty(&man->lru)) { 1315 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1307 spin_unlock(&glob->lru_lock); 1316 while (!list_empty(&man->lru[i])) {
1308 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false); 1317 spin_unlock(&glob->lru_lock);
1309 if (ret) 1318 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
1310 return ret; 1319 if (ret)
1311 spin_lock(&glob->lru_lock); 1320 return ret;
1321 spin_lock(&glob->lru_lock);
1322 }
1312 } 1323 }
1313 spin_unlock(&glob->lru_lock); 1324 spin_unlock(&glob->lru_lock);
1314 1325
@@ -1385,6 +1396,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1385{ 1396{
1386 int ret = -EINVAL; 1397 int ret = -EINVAL;
1387 struct ttm_mem_type_manager *man; 1398 struct ttm_mem_type_manager *man;
1399 unsigned i;
1388 1400
1389 BUG_ON(type >= TTM_NUM_MEM_TYPES); 1401 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1390 man = &bdev->man[type]; 1402 man = &bdev->man[type];
@@ -1410,7 +1422,8 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1410 man->use_type = true; 1422 man->use_type = true;
1411 man->size = p_size; 1423 man->size = p_size;
1412 1424
1413 INIT_LIST_HEAD(&man->lru); 1425 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1426 INIT_LIST_HEAD(&man->lru[i]);
1414 man->move = NULL; 1427 man->move = NULL;
1415 1428
1416 return 0; 1429 return 0;
@@ -1442,6 +1455,7 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
1442 container_of(ref, struct ttm_bo_global_ref, ref); 1455 container_of(ref, struct ttm_bo_global_ref, ref);
1443 struct ttm_bo_global *glob = ref->object; 1456 struct ttm_bo_global *glob = ref->object;
1444 int ret; 1457 int ret;
1458 unsigned i;
1445 1459
1446 mutex_init(&glob->device_list_mutex); 1460 mutex_init(&glob->device_list_mutex);
1447 spin_lock_init(&glob->lru_lock); 1461 spin_lock_init(&glob->lru_lock);
@@ -1453,7 +1467,8 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
1453 goto out_no_drp; 1467 goto out_no_drp;
1454 } 1468 }
1455 1469
1456 INIT_LIST_HEAD(&glob->swap_lru); 1470 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1471 INIT_LIST_HEAD(&glob->swap_lru[i]);
1457 INIT_LIST_HEAD(&glob->device_list); 1472 INIT_LIST_HEAD(&glob->device_list);
1458 1473
1459 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout); 1474 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
@@ -1512,8 +1527,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
1512 if (list_empty(&bdev->ddestroy)) 1527 if (list_empty(&bdev->ddestroy))
1513 TTM_DEBUG("Delayed destroy list was clean\n"); 1528 TTM_DEBUG("Delayed destroy list was clean\n");
1514 1529
1515 if (list_empty(&bdev->man[0].lru)) 1530 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
1516 TTM_DEBUG("Swap list was clean\n"); 1531 if (list_empty(&bdev->man[0].lru[0]))
1532 TTM_DEBUG("Swap list %d was clean\n", i);
1517 spin_unlock(&glob->lru_lock); 1533 spin_unlock(&glob->lru_lock);
1518 1534
1519 drm_vma_offset_manager_destroy(&bdev->vma_manager); 1535 drm_vma_offset_manager_destroy(&bdev->vma_manager);
@@ -1665,10 +1681,15 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1665 int ret = -EBUSY; 1681 int ret = -EBUSY;
1666 int put_count; 1682 int put_count;
1667 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1683 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1684 unsigned i;
1668 1685
1669 spin_lock(&glob->lru_lock); 1686 spin_lock(&glob->lru_lock);
1670 list_for_each_entry(bo, &glob->swap_lru, swap) { 1687 for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
1671 ret = __ttm_bo_reserve(bo, false, true, NULL); 1688 list_for_each_entry(bo, &glob->swap_lru[i], swap) {
1689 ret = __ttm_bo_reserve(bo, false, true, NULL);
1690 if (!ret)
1691 break;
1692 }
1672 if (!ret) 1693 if (!ret)
1673 break; 1694 break;
1674 } 1695 }
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 652e45be97c8..c356df40ac49 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -215,6 +215,8 @@ struct ttm_buffer_object {
215 215
216 struct drm_vma_offset_node vma_node; 216 struct drm_vma_offset_node vma_node;
217 217
218 unsigned priority;
219
218 /** 220 /**
219 * Special members that are protected by the reserve lock 221 * Special members that are protected by the reserve lock
220 * and the bo::lock when written to. Can be read with 222 * and the bo::lock when written to. Can be read with
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8b4bec7bd57b..c8407e737542 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -42,6 +42,8 @@
42#include <linux/spinlock.h> 42#include <linux/spinlock.h>
43#include <linux/reservation.h> 43#include <linux/reservation.h>
44 44
45#define TTM_MAX_BO_PRIORITY 16
46
45struct ttm_backend_func { 47struct ttm_backend_func {
46 /** 48 /**
47 * struct ttm_backend_func member bind 49 * struct ttm_backend_func member bind
@@ -298,7 +300,7 @@ struct ttm_mem_type_manager {
298 * Protected by the global->lru_lock. 300 * Protected by the global->lru_lock.
299 */ 301 */
300 302
301 struct list_head lru; 303 struct list_head lru[TTM_MAX_BO_PRIORITY];
302 304
303 /* 305 /*
304 * Protected by @move_lock. 306 * Protected by @move_lock.
@@ -518,7 +520,7 @@ struct ttm_bo_global {
518 /** 520 /**
519 * Protected by the lru_lock. 521 * Protected by the lru_lock.
520 */ 522 */
521 struct list_head swap_lru; 523 struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
522 524
523 /** 525 /**
524 * Internal protection. 526 * Internal protection.