aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2013-11-14 13:49:05 -0500
committerThomas Hellstrom <thellstrom@vmware.com>2013-11-20 06:46:54 -0500
commitc58f009e01c918717379c206a63baa66f56a77f9 (patch)
treeca283d5304390d33e526788141923f8db2008c51 /drivers/gpu/drm
parent0bc254257bfd9b25f64a68b719ee70a303b6d051 (diff)
drm/ttm: Remove set_need_resched from the ttm fault handler
Addresses "[BUG] completely bonkers use of set_need_resched + VM_FAULT_NOPAGE". In the first occurence it was used to try to be nice while releasing the mmap_sem and retrying the fault to work around a locking inversion. The second occurence was never used. There has been some discussion whether we should change the locking order to mmap_sem -> bo_reserve. This patch doesn't address that issue, and leaves that locking order undefined. The solution that we release the mmap_sem if tryreserve fails and wait for the buffer to become unreserved is something we want in any case, and follows how the core vm system waits for pages to be come unlocked while releasing the mmap_sem. The code also outlines what needs to be changed if we want to establish the locking order as mmap_sem -> bo::reserve. One slight issue that remains with this code is that the fault handler might be prone to starvation if another thread countinously reserves the buffer. IMO that usage pattern is highly unlikely. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c26
2 files changed, 54 insertions, 7 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d5a646ebe6a..07e02c4bf5a8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -151,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
151 atomic_dec(&bo->glob->bo_count); 151 atomic_dec(&bo->glob->bo_count);
152 if (bo->resv == &bo->ttm_resv) 152 if (bo->resv == &bo->ttm_resv)
153 reservation_object_fini(&bo->ttm_resv); 153 reservation_object_fini(&bo->ttm_resv);
154 154 mutex_destroy(&bo->wu_mutex);
155 if (bo->destroy) 155 if (bo->destroy)
156 bo->destroy(bo); 156 bo->destroy(bo);
157 else { 157 else {
@@ -1123,6 +1123,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1123 INIT_LIST_HEAD(&bo->ddestroy); 1123 INIT_LIST_HEAD(&bo->ddestroy);
1124 INIT_LIST_HEAD(&bo->swap); 1124 INIT_LIST_HEAD(&bo->swap);
1125 INIT_LIST_HEAD(&bo->io_reserve_lru); 1125 INIT_LIST_HEAD(&bo->io_reserve_lru);
1126 mutex_init(&bo->wu_mutex);
1126 bo->bdev = bdev; 1127 bo->bdev = bdev;
1127 bo->glob = bdev->glob; 1128 bo->glob = bdev->glob;
1128 bo->type = type; 1129 bo->type = type;
@@ -1704,3 +1705,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1704 ; 1705 ;
1705} 1706}
1706EXPORT_SYMBOL(ttm_bo_swapout_all); 1707EXPORT_SYMBOL(ttm_bo_swapout_all);
1708
1709/**
1710 * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
1711 * unreserved
1712 *
1713 * @bo: Pointer to buffer
1714 */
1715int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
1716{
1717 int ret;
1718
1719 /*
1720 * In the absense of a wait_unlocked API,
1721 * Use the bo::wu_mutex to avoid triggering livelocks due to
1722 * concurrent use of this function. Note that this use of
1723 * bo::wu_mutex can go away if we change locking order to
1724 * mmap_sem -> bo::reserve.
1725 */
1726 ret = mutex_lock_interruptible(&bo->wu_mutex);
1727 if (unlikely(ret != 0))
1728 return -ERESTARTSYS;
1729 if (!ww_mutex_is_locked(&bo->resv->lock))
1730 goto out_unlock;
1731 ret = ttm_bo_reserve_nolru(bo, true, false, false, NULL);
1732 if (unlikely(ret != 0))
1733 goto out_unlock;
1734 ww_mutex_unlock(&bo->resv->lock);
1735
1736out_unlock:
1737 mutex_unlock(&bo->wu_mutex);
1738 return ret;
1739}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index ac617f3ecd0c..b249ab9b1eb2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -107,13 +107,28 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 /* 107 /*
108 * Work around locking order reversal in fault / nopfn 108 * Work around locking order reversal in fault / nopfn
109 * between mmap_sem and bo_reserve: Perform a trylock operation 109 * between mmap_sem and bo_reserve: Perform a trylock operation
110 * for reserve, and if it fails, retry the fault after scheduling. 110 * for reserve, and if it fails, retry the fault after waiting
111 * for the buffer to become unreserved.
111 */ 112 */
112 113 ret = ttm_bo_reserve(bo, true, true, false, NULL);
113 ret = ttm_bo_reserve(bo, true, true, false, 0);
114 if (unlikely(ret != 0)) { 114 if (unlikely(ret != 0)) {
115 if (ret == -EBUSY) 115 if (ret != -EBUSY)
116 set_need_resched(); 116 return VM_FAULT_NOPAGE;
117
118 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
119 if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
120 up_read(&vma->vm_mm->mmap_sem);
121 (void) ttm_bo_wait_unreserved(bo);
122 }
123
124 return VM_FAULT_RETRY;
125 }
126
127 /*
128 * If we'd want to change locking order to
129 * mmap_sem -> bo::reserve, we'd use a blocking reserve here
130 * instead of retrying the fault...
131 */
117 return VM_FAULT_NOPAGE; 132 return VM_FAULT_NOPAGE;
118 } 133 }
119 134
@@ -123,7 +138,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
123 case 0: 138 case 0:
124 break; 139 break;
125 case -EBUSY: 140 case -EBUSY:
126 set_need_resched();
127 case -ERESTARTSYS: 141 case -ERESTARTSYS:
128 retval = VM_FAULT_NOPAGE; 142 retval = VM_FAULT_NOPAGE;
129 goto out_unlock; 143 goto out_unlock;