aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2011-10-07 09:23:06 -0400
committerDave Airlie <airlied@redhat.com>2011-10-10 04:01:44 -0400
commit414ee50b3a111983056b1a828fac08f9e8fbc7e9 (patch)
treed946756ccdca539c8ca707f02b2a47a3e589c649 /drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
parent1c248b7d2960faec3e1b8f3f9c5d9d0df28e0a3c (diff)
vmwgfx: Implement memory accounting for resources
Contexts, surfaces and streams allocate persistent kernel memory as the direct result of user-space requests. Make sure this memory is accounted as graphics memory, to avoid DOS vulnerabilities. Also take the TTM read lock around resource creation to block switched-out dri clients from allocating resources. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c172
1 files changed, 146 insertions, 26 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 93a68a61419..c7cff3debe1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -39,6 +39,7 @@ struct vmw_user_context {
39struct vmw_user_surface { 39struct vmw_user_surface {
40 struct ttm_base_object base; 40 struct ttm_base_object base;
41 struct vmw_surface srf; 41 struct vmw_surface srf;
42 uint32_t size;
42}; 43};
43 44
44struct vmw_user_dma_buffer { 45struct vmw_user_dma_buffer {
@@ -67,6 +68,11 @@ struct vmw_surface_offset {
67 uint32_t bo_offset; 68 uint32_t bo_offset;
68}; 69};
69 70
71
72static uint64_t vmw_user_context_size;
73static uint64_t vmw_user_surface_size;
74static uint64_t vmw_user_stream_size;
75
70static inline struct vmw_dma_buffer * 76static inline struct vmw_dma_buffer *
71vmw_dma_buffer(struct ttm_buffer_object *bo) 77vmw_dma_buffer(struct ttm_buffer_object *bo)
72{ 78{
@@ -343,8 +349,11 @@ static void vmw_user_context_free(struct vmw_resource *res)
343{ 349{
344 struct vmw_user_context *ctx = 350 struct vmw_user_context *ctx =
345 container_of(res, struct vmw_user_context, res); 351 container_of(res, struct vmw_user_context, res);
352 struct vmw_private *dev_priv = res->dev_priv;
346 353
347 kfree(ctx); 354 kfree(ctx);
355 ttm_mem_global_free(vmw_mem_glob(dev_priv),
356 vmw_user_context_size);
348} 357}
349 358
350/** 359/**
@@ -398,23 +407,56 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
398 struct drm_file *file_priv) 407 struct drm_file *file_priv)
399{ 408{
400 struct vmw_private *dev_priv = vmw_priv(dev); 409 struct vmw_private *dev_priv = vmw_priv(dev);
401 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); 410 struct vmw_user_context *ctx;
402 struct vmw_resource *res; 411 struct vmw_resource *res;
403 struct vmw_resource *tmp; 412 struct vmw_resource *tmp;
404 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; 413 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
405 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 414 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
415 struct vmw_master *vmaster = vmw_master(file_priv->master);
406 int ret; 416 int ret;
407 417
408 if (unlikely(ctx == NULL)) 418
409 return -ENOMEM; 419 /*
420 * Approximate idr memory usage with 128 bytes. It will be limited
421 * by maximum number_of contexts anyway.
422 */
423
424 if (unlikely(vmw_user_context_size == 0))
425 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
426
427 ret = ttm_read_lock(&vmaster->lock, true);
428 if (unlikely(ret != 0))
429 return ret;
430
431 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
432 vmw_user_context_size,
433 false, true);
434 if (unlikely(ret != 0)) {
435 if (ret != -ERESTARTSYS)
436 DRM_ERROR("Out of graphics memory for context"
437 " creation.\n");
438 goto out_unlock;
439 }
440
441 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
442 if (unlikely(ctx == NULL)) {
443 ttm_mem_global_free(vmw_mem_glob(dev_priv),
444 vmw_user_context_size);
445 ret = -ENOMEM;
446 goto out_unlock;
447 }
410 448
411 res = &ctx->res; 449 res = &ctx->res;
412 ctx->base.shareable = false; 450 ctx->base.shareable = false;
413 ctx->base.tfile = NULL; 451 ctx->base.tfile = NULL;
414 452
453 /*
454 * From here on, the destructor takes over resource freeing.
455 */
456
415 ret = vmw_context_init(dev_priv, res, vmw_user_context_free); 457 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
416 if (unlikely(ret != 0)) 458 if (unlikely(ret != 0))
417 return ret; 459 goto out_unlock;
418 460
419 tmp = vmw_resource_reference(&ctx->res); 461 tmp = vmw_resource_reference(&ctx->res);
420 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, 462 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
@@ -428,6 +470,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
428 arg->cid = res->id; 470 arg->cid = res->id;
429out_err: 471out_err:
430 vmw_resource_unreference(&res); 472 vmw_resource_unreference(&res);
473out_unlock:
474 ttm_read_unlock(&vmaster->lock);
431 return ret; 475 return ret;
432 476
433} 477}
@@ -1095,6 +1139,8 @@ static void vmw_user_surface_free(struct vmw_resource *res)
1095 struct vmw_surface *srf = container_of(res, struct vmw_surface, res); 1139 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1096 struct vmw_user_surface *user_srf = 1140 struct vmw_user_surface *user_srf =
1097 container_of(srf, struct vmw_user_surface, srf); 1141 container_of(srf, struct vmw_user_surface, srf);
1142 struct vmw_private *dev_priv = srf->res.dev_priv;
1143 uint32_t size = user_srf->size;
1098 1144
1099 if (srf->backup) 1145 if (srf->backup)
1100 ttm_bo_unref(&srf->backup); 1146 ttm_bo_unref(&srf->backup);
@@ -1102,6 +1148,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
1102 kfree(srf->sizes); 1148 kfree(srf->sizes);
1103 kfree(srf->snooper.image); 1149 kfree(srf->snooper.image);
1104 kfree(user_srf); 1150 kfree(user_srf);
1151 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1105} 1152}
1106 1153
1107/** 1154/**
@@ -1226,9 +1273,45 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1226 struct vmw_surface_offset *cur_offset; 1273 struct vmw_surface_offset *cur_offset;
1227 uint32_t stride_bpp; 1274 uint32_t stride_bpp;
1228 uint32_t bpp; 1275 uint32_t bpp;
1276 uint32_t num_sizes;
1277 uint32_t size;
1278 struct vmw_master *vmaster = vmw_master(file_priv->master);
1229 1279
1230 if (unlikely(user_srf == NULL)) 1280 if (unlikely(vmw_user_surface_size == 0))
1231 return -ENOMEM; 1281 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1282 128;
1283
1284 num_sizes = 0;
1285 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
1286 num_sizes += req->mip_levels[i];
1287
1288 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1289 DRM_VMW_MAX_MIP_LEVELS)
1290 return -EINVAL;
1291
1292 size = vmw_user_surface_size + 128 +
1293 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
1294 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
1295
1296
1297 ret = ttm_read_lock(&vmaster->lock, true);
1298 if (unlikely(ret != 0))
1299 return ret;
1300
1301 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1302 size, false, true);
1303 if (unlikely(ret != 0)) {
1304 if (ret != -ERESTARTSYS)
1305 DRM_ERROR("Out of graphics memory for surface"
1306 " creation.\n");
1307 goto out_unlock;
1308 }
1309
1310 user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
1311 if (unlikely(user_srf == NULL)) {
1312 ret = -ENOMEM;
1313 goto out_no_user_srf;
1314 }
1232 1315
1233 srf = &user_srf->srf; 1316 srf = &user_srf->srf;
1234 res = &srf->res; 1317 res = &srf->res;
@@ -1239,20 +1322,13 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1239 srf->backup = NULL; 1322 srf->backup = NULL;
1240 1323
1241 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 1324 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
1242 srf->num_sizes = 0; 1325 srf->num_sizes = num_sizes;
1243 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 1326 user_srf->size = size;
1244 srf->num_sizes += srf->mip_levels[i];
1245
1246 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1247 DRM_VMW_MAX_MIP_LEVELS) {
1248 ret = -EINVAL;
1249 goto out_err0;
1250 }
1251 1327
1252 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); 1328 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
1253 if (unlikely(srf->sizes == NULL)) { 1329 if (unlikely(srf->sizes == NULL)) {
1254 ret = -ENOMEM; 1330 ret = -ENOMEM;
1255 goto out_err0; 1331 goto out_no_sizes;
1256 } 1332 }
1257 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), 1333 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1258 GFP_KERNEL); 1334 GFP_KERNEL);
@@ -1268,7 +1344,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1268 srf->num_sizes * sizeof(*srf->sizes)); 1344 srf->num_sizes * sizeof(*srf->sizes));
1269 if (unlikely(ret != 0)) { 1345 if (unlikely(ret != 0)) {
1270 ret = -EFAULT; 1346 ret = -EFAULT;
1271 goto out_err1; 1347 goto out_no_copy;
1272 } 1348 }
1273 1349
1274 cur_bo_offset = 0; 1350 cur_bo_offset = 0;
@@ -1305,7 +1381,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1305 if (!srf->snooper.image) { 1381 if (!srf->snooper.image) {
1306 DRM_ERROR("Failed to allocate cursor_image\n"); 1382 DRM_ERROR("Failed to allocate cursor_image\n");
1307 ret = -ENOMEM; 1383 ret = -ENOMEM;
1308 goto out_err1; 1384 goto out_no_copy;
1309 } 1385 }
1310 } else { 1386 } else {
1311 srf->snooper.image = NULL; 1387 srf->snooper.image = NULL;
@@ -1322,7 +1398,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1322 1398
1323 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); 1399 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1324 if (unlikely(ret != 0)) 1400 if (unlikely(ret != 0))
1325 return ret; 1401 goto out_unlock;
1326 1402
1327 tmp = vmw_resource_reference(&srf->res); 1403 tmp = vmw_resource_reference(&srf->res);
1328 ret = ttm_base_object_init(tfile, &user_srf->base, 1404 ret = ttm_base_object_init(tfile, &user_srf->base,
@@ -1332,7 +1408,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1332 if (unlikely(ret != 0)) { 1408 if (unlikely(ret != 0)) {
1333 vmw_resource_unreference(&tmp); 1409 vmw_resource_unreference(&tmp);
1334 vmw_resource_unreference(&res); 1410 vmw_resource_unreference(&res);
1335 return ret; 1411 goto out_unlock;
1336 } 1412 }
1337 1413
1338 rep->sid = user_srf->base.hash.key; 1414 rep->sid = user_srf->base.hash.key;
@@ -1340,13 +1416,19 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1340 DRM_ERROR("Created bad Surface ID.\n"); 1416 DRM_ERROR("Created bad Surface ID.\n");
1341 1417
1342 vmw_resource_unreference(&res); 1418 vmw_resource_unreference(&res);
1419
1420 ttm_read_unlock(&vmaster->lock);
1343 return 0; 1421 return 0;
1344out_err1: 1422out_no_copy:
1345 kfree(srf->offsets); 1423 kfree(srf->offsets);
1346out_no_offsets: 1424out_no_offsets:
1347 kfree(srf->sizes); 1425 kfree(srf->sizes);
1348out_err0: 1426out_no_sizes:
1349 kfree(user_srf); 1427 kfree(user_srf);
1428out_no_user_srf:
1429 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1430out_unlock:
1431 ttm_read_unlock(&vmaster->lock);
1350 return ret; 1432 return ret;
1351} 1433}
1352 1434
@@ -1690,8 +1772,11 @@ static void vmw_user_stream_free(struct vmw_resource *res)
1690{ 1772{
1691 struct vmw_user_stream *stream = 1773 struct vmw_user_stream *stream =
1692 container_of(res, struct vmw_user_stream, stream.res); 1774 container_of(res, struct vmw_user_stream, stream.res);
1775 struct vmw_private *dev_priv = res->dev_priv;
1693 1776
1694 kfree(stream); 1777 kfree(stream);
1778 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1779 vmw_user_stream_size);
1695} 1780}
1696 1781
1697/** 1782/**
@@ -1745,23 +1830,56 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1745 struct drm_file *file_priv) 1830 struct drm_file *file_priv)
1746{ 1831{
1747 struct vmw_private *dev_priv = vmw_priv(dev); 1832 struct vmw_private *dev_priv = vmw_priv(dev);
1748 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL); 1833 struct vmw_user_stream *stream;
1749 struct vmw_resource *res; 1834 struct vmw_resource *res;
1750 struct vmw_resource *tmp; 1835 struct vmw_resource *tmp;
1751 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; 1836 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1752 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 1837 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1838 struct vmw_master *vmaster = vmw_master(file_priv->master);
1753 int ret; 1839 int ret;
1754 1840
1755 if (unlikely(stream == NULL)) 1841 /*
1756 return -ENOMEM; 1842 * Approximate idr memory usage with 128 bytes. It will be limited
1843 * by maximum number_of streams anyway?
1844 */
1845
1846 if (unlikely(vmw_user_stream_size == 0))
1847 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
1848
1849 ret = ttm_read_lock(&vmaster->lock, true);
1850 if (unlikely(ret != 0))
1851 return ret;
1852
1853 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1854 vmw_user_stream_size,
1855 false, true);
1856 if (unlikely(ret != 0)) {
1857 if (ret != -ERESTARTSYS)
1858 DRM_ERROR("Out of graphics memory for stream"
1859 " creation.\n");
1860 goto out_unlock;
1861 }
1862
1863
1864 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1865 if (unlikely(stream == NULL)) {
1866 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1867 vmw_user_stream_size);
1868 ret = -ENOMEM;
1869 goto out_unlock;
1870 }
1757 1871
1758 res = &stream->stream.res; 1872 res = &stream->stream.res;
1759 stream->base.shareable = false; 1873 stream->base.shareable = false;
1760 stream->base.tfile = NULL; 1874 stream->base.tfile = NULL;
1761 1875
1876 /*
1877 * From here on, the destructor takes over resource freeing.
1878 */
1879
1762 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); 1880 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1763 if (unlikely(ret != 0)) 1881 if (unlikely(ret != 0))
1764 return ret; 1882 goto out_unlock;
1765 1883
1766 tmp = vmw_resource_reference(res); 1884 tmp = vmw_resource_reference(res);
1767 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, 1885 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -1775,6 +1893,8 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1775 arg->stream_id = res->id; 1893 arg->stream_id = res->id;
1776out_err: 1894out_err:
1777 vmw_resource_unreference(&res); 1895 vmw_resource_unreference(&res);
1896out_unlock:
1897 ttm_read_unlock(&vmaster->lock);
1778 return ret; 1898 return ret;
1779} 1899}
1780 1900