aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2018-06-20 05:51:02 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2018-07-03 14:40:48 -0400
commit14dba7178491e2bc411dc2e4542295709c1fb9e6 (patch)
treeb389fa88db41986e6b1d5492997e68a92490c19e /drivers/gpu
parent3fbeccf8ceb1651b376a14bfe1005d518a4d8fa0 (diff)
drm/vmwgfx: Reorganize the fence wait loop
Reorganize the fence wait loop somewhat to make it look more like the examples in set_current_state() kerneldoc, and add some code comments. Also if we're about to time out, make sure we check again whether the fence is actually signaled. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Brian Paul <brianp@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c38
1 files changed, 26 insertions, 12 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 9ed544f8958f..ea41d74d8341 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -175,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
175 struct vmw_private *dev_priv = fman->dev_priv; 175 struct vmw_private *dev_priv = fman->dev_priv;
176 struct vmwgfx_wait_cb cb; 176 struct vmwgfx_wait_cb cb;
177 long ret = timeout; 177 long ret = timeout;
178 unsigned long irq_flags;
179 178
180 if (likely(vmw_fence_obj_signaled(fence))) 179 if (likely(vmw_fence_obj_signaled(fence)))
181 return timeout; 180 return timeout;
@@ -183,7 +182,7 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
183 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 182 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
184 vmw_seqno_waiter_add(dev_priv); 183 vmw_seqno_waiter_add(dev_priv);
185 184
186 spin_lock_irqsave(f->lock, irq_flags); 185 spin_lock(f->lock);
187 186
188 if (intr && signal_pending(current)) { 187 if (intr && signal_pending(current)) {
189 ret = -ERESTARTSYS; 188 ret = -ERESTARTSYS;
@@ -194,30 +193,45 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
194 cb.task = current; 193 cb.task = current;
195 list_add(&cb.base.node, &f->cb_list); 194 list_add(&cb.base.node, &f->cb_list);
196 195
197 while (ret > 0) { 196 for (;;) {
198 __vmw_fences_update(fman); 197 __vmw_fences_update(fman);
199 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
200 break;
201 198
199 /*
200 * We can use the barrier free __set_current_state() since
201 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
202 * fence spinlock.
203 */
202 if (intr) 204 if (intr)
203 __set_current_state(TASK_INTERRUPTIBLE); 205 __set_current_state(TASK_INTERRUPTIBLE);
204 else 206 else
205 __set_current_state(TASK_UNINTERRUPTIBLE); 207 __set_current_state(TASK_UNINTERRUPTIBLE);
206 spin_unlock_irqrestore(f->lock, irq_flags);
207 208
208 ret = schedule_timeout(ret); 209 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
210 if (ret == 0 && timeout > 0)
211 ret = 1;
212 break;
213 }
209 214
210 spin_lock_irqsave(f->lock, irq_flags); 215 if (intr && signal_pending(current)) {
211 if (ret > 0 && intr && signal_pending(current))
212 ret = -ERESTARTSYS; 216 ret = -ERESTARTSYS;
213 } 217 break;
218 }
214 219
220 if (ret == 0)
221 break;
222
223 spin_unlock(f->lock);
224
225 ret = schedule_timeout(ret);
226
227 spin_lock(f->lock);
228 }
229 __set_current_state(TASK_RUNNING);
215 if (!list_empty(&cb.base.node)) 230 if (!list_empty(&cb.base.node))
216 list_del(&cb.base.node); 231 list_del(&cb.base.node);
217 __set_current_state(TASK_RUNNING);
218 232
219out: 233out:
220 spin_unlock_irqrestore(f->lock, irq_flags); 234 spin_unlock(f->lock);
221 235
222 vmw_seqno_waiter_remove(dev_priv); 236 vmw_seqno_waiter_remove(dev_priv);
223 237