aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2014-12-02 06:32:24 -0500
committerThomas Hellstrom <thellstrom@vmware.com>2014-12-03 03:48:14 -0500
commit1f563a6a46544602183e7493b6ef69769d3d76d9 (patch)
tree1709bb7a43770826db00b2fe8718063a05fea50a /drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
parente338c4c2b620ba4e75fd3576f8142eb93be12ce3 (diff)
drm/vmwgfx: Don't use memory accounting for kernel-side fence objects
Kernel side fence objects are used when unbinding resources and may thus be created as part of a memory reclaim operation. This might trigger recursive memory reclaims and result in the kernel running out of stack space. So a simple way out is to avoid accounting of these fence objects. In principle this is OK since while user-space can trigger the creation of such objects, it can't really hold on to them. However, their lifetime is quite long, so some form of accounting should perhaps be implemented in the future. Fixes kernel crashes when running, for example viewperf11 ensight-04 test 3 with low system memory settings. Cc: <stable@vger.kernel.org> Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_fence.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c22
1 files changed, 2 insertions, 20 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 197164fd7803..6773938b6e40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
545 545
546static void vmw_fence_destroy(struct vmw_fence_obj *fence) 546static void vmw_fence_destroy(struct vmw_fence_obj *fence)
547{ 547{
548 struct vmw_fence_manager *fman = fman_from_fence(fence);
549
550 fence_free(&fence->base); 548 fence_free(&fence->base);
551
552 /*
553 * Free kernel space accounting.
554 */
555 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
556 fman->fence_size);
557} 549}
558 550
559int vmw_fence_create(struct vmw_fence_manager *fman, 551int vmw_fence_create(struct vmw_fence_manager *fman,
560 uint32_t seqno, 552 uint32_t seqno,
561 struct vmw_fence_obj **p_fence) 553 struct vmw_fence_obj **p_fence)
562{ 554{
563 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
564 struct vmw_fence_obj *fence; 555 struct vmw_fence_obj *fence;
565 int ret; 556 int ret;
566 557
567 ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
568 false, false);
569 if (unlikely(ret != 0))
570 return ret;
571
572 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 558 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
573 if (unlikely(fence == NULL)) { 559 if (unlikely(fence == NULL))
574 ret = -ENOMEM; 560 return -ENOMEM;
575 goto out_no_object;
576 }
577 561
578 ret = vmw_fence_obj_init(fman, fence, seqno, 562 ret = vmw_fence_obj_init(fman, fence, seqno,
579 vmw_fence_destroy); 563 vmw_fence_destroy);
@@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
585 569
586out_err_init: 570out_err_init:
587 kfree(fence); 571 kfree(fence);
588out_no_object:
589 ttm_mem_global_free(mem_glob, fman->fence_size);
590 return ret; 572 return ret;
591} 573}
592 574