aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2011-11-11 15:42:57 -0500
committerDave Airlie <airlied@redhat.com>2011-12-06 05:40:11 -0500
commit57de4ba959b290f0b8cf36ecd5e7f1b29d4b8a12 (patch)
tree8063f4dfaf1a22bf8cf7a5f0410d4e4929b250ec /drivers/gpu/drm
parent8e7e70522d760c4ccd4cd370ebfa0ba69e006c6e (diff)
drm/ttm: simplify memory accounting for ttm user v2
Provide helper function to compute the kernel memory size needed for each buffer object. Move all the accounting inside ttm, simplifying driver and avoiding code duplication accross them. v2 fix accounting of ghost object, one would have thought that i would have run into the issue since a longtime but it seems ghost object are rare when you have plenty of vram ;) Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c52
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c35
5 files changed, 53 insertions, 49 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index d6326af9fcc0..7ac7bc3a8de3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -93,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
93{ 93{
94 struct drm_nouveau_private *dev_priv = dev->dev_private; 94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_bo *nvbo; 95 struct nouveau_bo *nvbo;
96 size_t acc_size;
96 int ret; 97 int ret;
97 98
98 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 99 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
@@ -115,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
115 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 116 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
116 nouveau_bo_placement_set(nvbo, flags, 0); 117 nouveau_bo_placement_set(nvbo, flags, 0);
117 118
119 acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
120 sizeof(struct nouveau_bo));
121
118 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 122 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
119 ttm_bo_type_device, &nvbo->placement, 123 ttm_bo_type_device, &nvbo->placement,
120 align >> PAGE_SHIFT, 0, false, NULL, size, 124 align >> PAGE_SHIFT, 0, false, NULL, acc_size,
121 nouveau_bo_del_ttm); 125 nouveau_bo_del_ttm);
122 if (ret) { 126 if (ret) {
123 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 127 /* ttm will call nouveau_bo_del_ttm if it fails.. */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1c851521f458..695b4800329a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -95,6 +95,7 @@ int radeon_bo_create(struct radeon_device *rdev,
95 enum ttm_bo_type type; 95 enum ttm_bo_type type;
96 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 96 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
97 unsigned long max_size = 0; 97 unsigned long max_size = 0;
98 size_t acc_size;
98 int r; 99 int r;
99 100
100 size = ALIGN(size, PAGE_SIZE); 101 size = ALIGN(size, PAGE_SIZE);
@@ -117,6 +118,9 @@ int radeon_bo_create(struct radeon_device *rdev,
117 return -ENOMEM; 118 return -ENOMEM;
118 } 119 }
119 120
121 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
122 sizeof(struct radeon_bo));
123
120retry: 124retry:
121 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 125 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
122 if (bo == NULL) 126 if (bo == NULL)
@@ -134,8 +138,8 @@ retry:
134 /* Kernel allocation are uninterruptible */ 138 /* Kernel allocation are uninterruptible */
135 mutex_lock(&rdev->vram_mutex); 139 mutex_lock(&rdev->vram_mutex);
136 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 140 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
137 &bo->placement, page_align, 0, !kernel, NULL, size, 141 &bo->placement, page_align, 0, !kernel, NULL,
138 &radeon_ttm_bo_destroy); 142 acc_size, &radeon_ttm_bo_destroy);
139 mutex_unlock(&rdev->vram_mutex); 143 mutex_unlock(&rdev->vram_mutex);
140 if (unlikely(r != 0)) { 144 if (unlikely(r != 0)) {
141 if (r != -ERESTARTSYS) { 145 if (r != -ERESTARTSYS) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb7352712750..de7ad9991902 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
137 struct ttm_buffer_object *bo = 137 struct ttm_buffer_object *bo =
138 container_of(list_kref, struct ttm_buffer_object, list_kref); 138 container_of(list_kref, struct ttm_buffer_object, list_kref);
139 struct ttm_bo_device *bdev = bo->bdev; 139 struct ttm_bo_device *bdev = bo->bdev;
140 size_t acc_size = bo->acc_size;
140 141
141 BUG_ON(atomic_read(&bo->list_kref.refcount)); 142 BUG_ON(atomic_read(&bo->list_kref.refcount));
142 BUG_ON(atomic_read(&bo->kref.refcount)); 143 BUG_ON(atomic_read(&bo->kref.refcount));
@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
152 if (bo->destroy) 153 if (bo->destroy)
153 bo->destroy(bo); 154 bo->destroy(bo);
154 else { 155 else {
155 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
156 kfree(bo); 156 kfree(bo);
157 } 157 }
158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
158} 159}
159 160
160int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) 161int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
@@ -1157,6 +1158,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1157{ 1158{
1158 int ret = 0; 1159 int ret = 0;
1159 unsigned long num_pages; 1160 unsigned long num_pages;
1161 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1162
1163 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1164 if (ret) {
1165 printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
1166 if (destroy)
1167 (*destroy)(bo);
1168 else
1169 kfree(bo);
1170 return -ENOMEM;
1171 }
1160 1172
1161 size += buffer_start & ~PAGE_MASK; 1173 size += buffer_start & ~PAGE_MASK;
1162 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1174 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1227,14 +1239,34 @@ out_err:
1227} 1239}
1228EXPORT_SYMBOL(ttm_bo_init); 1240EXPORT_SYMBOL(ttm_bo_init);
1229 1241
1230static inline size_t ttm_bo_size(struct ttm_bo_global *glob, 1242size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1231 unsigned long num_pages) 1243 unsigned long bo_size,
1244 unsigned struct_size)
1232{ 1245{
1233 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & 1246 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1234 PAGE_MASK; 1247 size_t size = 0;
1235 1248
1236 return glob->ttm_bo_size + 2 * page_array_size; 1249 size += ttm_round_pot(struct_size);
1250 size += PAGE_ALIGN(npages * sizeof(void *));
1251 size += ttm_round_pot(sizeof(struct ttm_tt));
1252 return size;
1237} 1253}
1254EXPORT_SYMBOL(ttm_bo_acc_size);
1255
1256size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1257 unsigned long bo_size,
1258 unsigned struct_size)
1259{
1260 unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
1261 size_t size = 0;
1262
1263 size += ttm_round_pot(struct_size);
1264 size += PAGE_ALIGN(npages * sizeof(void *));
1265 size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
1266 size += ttm_round_pot(sizeof(struct ttm_dma_tt));
1267 return size;
1268}
1269EXPORT_SYMBOL(ttm_bo_dma_acc_size);
1238 1270
1239int ttm_bo_create(struct ttm_bo_device *bdev, 1271int ttm_bo_create(struct ttm_bo_device *bdev,
1240 unsigned long size, 1272 unsigned long size,
@@ -1248,10 +1280,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1248{ 1280{
1249 struct ttm_buffer_object *bo; 1281 struct ttm_buffer_object *bo;
1250 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1282 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1283 size_t acc_size;
1251 int ret; 1284 int ret;
1252 1285
1253 size_t acc_size = 1286 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1254 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1255 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); 1287 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1256 if (unlikely(ret != 0)) 1288 if (unlikely(ret != 0))
1257 return ret; 1289 return ret;
@@ -1437,10 +1469,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
1437 goto out_no_shrink; 1469 goto out_no_shrink;
1438 } 1470 }
1439 1471
1440 glob->ttm_bo_extra_size = ttm_round_pot(sizeof(struct ttm_tt));
1441 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1442 ttm_round_pot(sizeof(struct ttm_buffer_object));
1443
1444 atomic_set(&glob->bo_count, 0); 1472 atomic_set(&glob->bo_count, 0);
1445 1473
1446 ret = kobject_init_and_add( 1474 ret = kobject_init_and_add(
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 60f204d67dbb..f8187ead7b37 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -445,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
445 kref_init(&fbo->list_kref); 445 kref_init(&fbo->list_kref);
446 kref_init(&fbo->kref); 446 kref_init(&fbo->kref);
447 fbo->destroy = &ttm_transfered_destroy; 447 fbo->destroy = &ttm_transfered_destroy;
448 fbo->acc_size = 0;
448 449
449 *new_obj = fbo; 450 *new_obj = fbo;
450 return 0; 451 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 86c5e4cceb31..2eb84a55aee7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1517,29 +1517,10 @@ out_bad_surface:
1517/** 1517/**
1518 * Buffer management. 1518 * Buffer management.
1519 */ 1519 */
1520
1521static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
1522 unsigned long num_pages)
1523{
1524 static size_t bo_user_size = ~0;
1525
1526 size_t page_array_size =
1527 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
1528
1529 if (unlikely(bo_user_size == ~0)) {
1530 bo_user_size = glob->ttm_bo_extra_size +
1531 ttm_round_pot(sizeof(struct vmw_dma_buffer));
1532 }
1533
1534 return bo_user_size + page_array_size;
1535}
1536
1537void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) 1520void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
1538{ 1521{
1539 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); 1522 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1540 struct ttm_bo_global *glob = bo->glob;
1541 1523
1542 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
1543 kfree(vmw_bo); 1524 kfree(vmw_bo);
1544} 1525}
1545 1526
@@ -1550,24 +1531,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
1550 void (*bo_free) (struct ttm_buffer_object *bo)) 1531 void (*bo_free) (struct ttm_buffer_object *bo))
1551{ 1532{
1552 struct ttm_bo_device *bdev = &dev_priv->bdev; 1533 struct ttm_bo_device *bdev = &dev_priv->bdev;
1553 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1554 size_t acc_size; 1534 size_t acc_size;
1555 int ret; 1535 int ret;
1556 1536
1557 BUG_ON(!bo_free); 1537 BUG_ON(!bo_free);
1558 1538
1559 acc_size = 1539 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
1560 vmw_dmabuf_acc_size(bdev->glob,
1561 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1562
1563 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1564 if (unlikely(ret != 0)) {
1565 /* we must free the bo here as
1566 * ttm_buffer_object_init does so as well */
1567 bo_free(&vmw_bo->base);
1568 return ret;
1569 }
1570
1571 memset(vmw_bo, 0, sizeof(*vmw_bo)); 1540 memset(vmw_bo, 0, sizeof(*vmw_bo));
1572 1541
1573 INIT_LIST_HEAD(&vmw_bo->validate_list); 1542 INIT_LIST_HEAD(&vmw_bo->validate_list);
@@ -1582,9 +1551,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
1582static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) 1551static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1583{ 1552{
1584 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 1553 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
1585 struct ttm_bo_global *glob = bo->glob;
1586 1554
1587 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
1588 kfree(vmw_user_bo); 1555 kfree(vmw_user_bo);
1589} 1556}
1590 1557