aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_irq.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index e92298a6a383..48701d2c8c0f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -53,7 +53,7 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
53 return IRQ_NONE; 53 return IRQ_NONE;
54} 54}
55 55
56static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) 56static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
57{ 57{
58 uint32_t busy; 58 uint32_t busy;
59 59
@@ -64,43 +64,43 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
64 return (busy == 0); 64 return (busy == 0);
65} 65}
66 66
67void vmw_update_sequence(struct vmw_private *dev_priv, 67void vmw_update_seqno(struct vmw_private *dev_priv,
68 struct vmw_fifo_state *fifo_state) 68 struct vmw_fifo_state *fifo_state)
69{ 69{
70 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 70 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
71 71
72 uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); 72 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
73 73
74 if (dev_priv->last_read_sequence != sequence) { 74 if (dev_priv->last_read_seqno != seqno) {
75 dev_priv->last_read_sequence = sequence; 75 dev_priv->last_read_seqno = seqno;
76 vmw_fence_pull(&fifo_state->fence_queue, sequence); 76 vmw_marker_pull(&fifo_state->marker_queue, seqno);
77 } 77 }
78} 78}
79 79
80bool vmw_fence_signaled(struct vmw_private *dev_priv, 80bool vmw_seqno_passed(struct vmw_private *dev_priv,
81 uint32_t sequence) 81 uint32_t seqno)
82{ 82{
83 struct vmw_fifo_state *fifo_state; 83 struct vmw_fifo_state *fifo_state;
84 bool ret; 84 bool ret;
85 85
86 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 86 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
87 return true; 87 return true;
88 88
89 fifo_state = &dev_priv->fifo; 89 fifo_state = &dev_priv->fifo;
90 vmw_update_sequence(dev_priv, fifo_state); 90 vmw_update_seqno(dev_priv, fifo_state);
91 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 91 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
92 return true; 92 return true;
93 93
94 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && 94 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
95 vmw_fifo_idle(dev_priv, sequence)) 95 vmw_fifo_idle(dev_priv, seqno))
96 return true; 96 return true;
97 97
98 /** 98 /**
99 * Then check if the sequence is higher than what we've actually 99 * Then check if the seqno is higher than what we've actually
100 * emitted. Then the fence is stale and signaled. 100 * emitted. Then the fence is stale and signaled.
101 */ 101 */
102 102
103 ret = ((atomic_read(&dev_priv->fence_seq) - sequence) 103 ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
104 > VMW_FENCE_WRAP); 104 > VMW_FENCE_WRAP);
105 105
106 return ret; 106 return ret;
@@ -109,7 +109,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
109int vmw_fallback_wait(struct vmw_private *dev_priv, 109int vmw_fallback_wait(struct vmw_private *dev_priv,
110 bool lazy, 110 bool lazy,
111 bool fifo_idle, 111 bool fifo_idle,
112 uint32_t sequence, 112 uint32_t seqno,
113 bool interruptible, 113 bool interruptible,
114 unsigned long timeout) 114 unsigned long timeout)
115{ 115{
@@ -123,7 +123,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
123 DEFINE_WAIT(__wait); 123 DEFINE_WAIT(__wait);
124 124
125 wait_condition = (fifo_idle) ? &vmw_fifo_idle : 125 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
126 &vmw_fence_signaled; 126 &vmw_seqno_passed;
127 127
128 /** 128 /**
129 * Block command submission while waiting for idle. 129 * Block command submission while waiting for idle.
@@ -131,14 +131,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
131 131
132 if (fifo_idle) 132 if (fifo_idle)
133 down_read(&fifo_state->rwsem); 133 down_read(&fifo_state->rwsem);
134 signal_seq = atomic_read(&dev_priv->fence_seq); 134 signal_seq = atomic_read(&dev_priv->marker_seq);
135 ret = 0; 135 ret = 0;
136 136
137 for (;;) { 137 for (;;) {
138 prepare_to_wait(&dev_priv->fence_queue, &__wait, 138 prepare_to_wait(&dev_priv->fence_queue, &__wait,
139 (interruptible) ? 139 (interruptible) ?
140 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 140 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
141 if (wait_condition(dev_priv, sequence)) 141 if (wait_condition(dev_priv, seqno))
142 break; 142 break;
143 if (time_after_eq(jiffies, end_jiffies)) { 143 if (time_after_eq(jiffies, end_jiffies)) {
144 DRM_ERROR("SVGA device lockup.\n"); 144 DRM_ERROR("SVGA device lockup.\n");
@@ -175,28 +175,28 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
175 return ret; 175 return ret;
176} 176}
177 177
178int vmw_wait_fence(struct vmw_private *dev_priv, 178int vmw_wait_seqno(struct vmw_private *dev_priv,
179 bool lazy, uint32_t sequence, 179 bool lazy, uint32_t seqno,
180 bool interruptible, unsigned long timeout) 180 bool interruptible, unsigned long timeout)
181{ 181{
182 long ret; 182 long ret;
183 unsigned long irq_flags; 183 unsigned long irq_flags;
184 struct vmw_fifo_state *fifo = &dev_priv->fifo; 184 struct vmw_fifo_state *fifo = &dev_priv->fifo;
185 185
186 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 186 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
187 return 0; 187 return 0;
188 188
189 if (likely(vmw_fence_signaled(dev_priv, sequence))) 189 if (likely(vmw_seqno_passed(dev_priv, seqno)))
190 return 0; 190 return 0;
191 191
192 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 192 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
193 193
194 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) 194 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
195 return vmw_fallback_wait(dev_priv, lazy, true, sequence, 195 return vmw_fallback_wait(dev_priv, lazy, true, seqno,
196 interruptible, timeout); 196 interruptible, timeout);
197 197
198 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 198 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
199 return vmw_fallback_wait(dev_priv, lazy, false, sequence, 199 return vmw_fallback_wait(dev_priv, lazy, false, seqno,
200 interruptible, timeout); 200 interruptible, timeout);
201 201
202 mutex_lock(&dev_priv->hw_mutex); 202 mutex_lock(&dev_priv->hw_mutex);
@@ -214,12 +214,12 @@ int vmw_wait_fence(struct vmw_private *dev_priv,
214 if (interruptible) 214 if (interruptible)
215 ret = wait_event_interruptible_timeout 215 ret = wait_event_interruptible_timeout
216 (dev_priv->fence_queue, 216 (dev_priv->fence_queue,
217 vmw_fence_signaled(dev_priv, sequence), 217 vmw_seqno_passed(dev_priv, seqno),
218 timeout); 218 timeout);
219 else 219 else
220 ret = wait_event_timeout 220 ret = wait_event_timeout
221 (dev_priv->fence_queue, 221 (dev_priv->fence_queue,
222 vmw_fence_signaled(dev_priv, sequence), 222 vmw_seqno_passed(dev_priv, seqno),
223 timeout); 223 timeout);
224 224
225 if (unlikely(ret == 0)) 225 if (unlikely(ret == 0))
@@ -293,5 +293,5 @@ int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
293 return -EBUSY; 293 return -EBUSY;
294 294
295 timeout = (unsigned long)arg->kernel_cookie - timeout; 295 timeout = (unsigned long)arg->kernel_cookie - timeout;
296 return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout); 296 return vmw_wait_seqno(vmw_priv(dev), true, arg->seqno, true, timeout);
297} 297}