diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_irq.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 286 |
1 files changed, 286 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c new file mode 100644 index 000000000000..4d7cb5393860 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -0,0 +1,286 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "drmP.h" | ||
29 | #include "vmwgfx_drv.h" | ||
30 | |||
31 | #define VMW_FENCE_WRAP (1 << 24) | ||
32 | |||
33 | irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) | ||
34 | { | ||
35 | struct drm_device *dev = (struct drm_device *)arg; | ||
36 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
37 | uint32_t status; | ||
38 | |||
39 | spin_lock(&dev_priv->irq_lock); | ||
40 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
41 | spin_unlock(&dev_priv->irq_lock); | ||
42 | |||
43 | if (status & SVGA_IRQFLAG_ANY_FENCE) | ||
44 | wake_up_all(&dev_priv->fence_queue); | ||
45 | if (status & SVGA_IRQFLAG_FIFO_PROGRESS) | ||
46 | wake_up_all(&dev_priv->fifo_queue); | ||
47 | |||
48 | if (likely(status)) { | ||
49 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
50 | return IRQ_HANDLED; | ||
51 | } | ||
52 | |||
53 | return IRQ_NONE; | ||
54 | } | ||
55 | |||
56 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) | ||
57 | { | ||
58 | uint32_t busy; | ||
59 | |||
60 | mutex_lock(&dev_priv->hw_mutex); | ||
61 | busy = vmw_read(dev_priv, SVGA_REG_BUSY); | ||
62 | mutex_unlock(&dev_priv->hw_mutex); | ||
63 | |||
64 | return (busy == 0); | ||
65 | } | ||
66 | |||
67 | |||
68 | bool vmw_fence_signaled(struct vmw_private *dev_priv, | ||
69 | uint32_t sequence) | ||
70 | { | ||
71 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
72 | struct vmw_fifo_state *fifo_state; | ||
73 | bool ret; | ||
74 | |||
75 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | ||
76 | return true; | ||
77 | |||
78 | dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
79 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | ||
80 | return true; | ||
81 | |||
82 | fifo_state = &dev_priv->fifo; | ||
83 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && | ||
84 | vmw_fifo_idle(dev_priv, sequence)) | ||
85 | return true; | ||
86 | |||
87 | /** | ||
88 | * Then check if the sequence is higher than what we've actually | ||
89 | * emitted. Then the fence is stale and signaled. | ||
90 | */ | ||
91 | |||
92 | ret = ((atomic_read(&dev_priv->fence_seq) - sequence) | ||
93 | > VMW_FENCE_WRAP); | ||
94 | |||
95 | return ret; | ||
96 | } | ||
97 | |||
98 | int vmw_fallback_wait(struct vmw_private *dev_priv, | ||
99 | bool lazy, | ||
100 | bool fifo_idle, | ||
101 | uint32_t sequence, | ||
102 | bool interruptible, | ||
103 | unsigned long timeout) | ||
104 | { | ||
105 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | ||
106 | |||
107 | uint32_t count = 0; | ||
108 | uint32_t signal_seq; | ||
109 | int ret; | ||
110 | unsigned long end_jiffies = jiffies + timeout; | ||
111 | bool (*wait_condition)(struct vmw_private *, uint32_t); | ||
112 | DEFINE_WAIT(__wait); | ||
113 | |||
114 | wait_condition = (fifo_idle) ? &vmw_fifo_idle : | ||
115 | &vmw_fence_signaled; | ||
116 | |||
117 | /** | ||
118 | * Block command submission while waiting for idle. | ||
119 | */ | ||
120 | |||
121 | if (fifo_idle) | ||
122 | down_read(&fifo_state->rwsem); | ||
123 | signal_seq = atomic_read(&dev_priv->fence_seq); | ||
124 | ret = 0; | ||
125 | |||
126 | for (;;) { | ||
127 | prepare_to_wait(&dev_priv->fence_queue, &__wait, | ||
128 | (interruptible) ? | ||
129 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | ||
130 | if (wait_condition(dev_priv, sequence)) | ||
131 | break; | ||
132 | if (time_after_eq(jiffies, end_jiffies)) { | ||
133 | DRM_ERROR("SVGA device lockup.\n"); | ||
134 | break; | ||
135 | } | ||
136 | if (lazy) | ||
137 | schedule_timeout(1); | ||
138 | else if ((++count & 0x0F) == 0) { | ||
139 | /** | ||
140 | * FIXME: Use schedule_hr_timeout here for | ||
141 | * newer kernels and lower CPU utilization. | ||
142 | */ | ||
143 | |||
144 | __set_current_state(TASK_RUNNING); | ||
145 | schedule(); | ||
146 | __set_current_state((interruptible) ? | ||
147 | TASK_INTERRUPTIBLE : | ||
148 | TASK_UNINTERRUPTIBLE); | ||
149 | } | ||
150 | if (interruptible && signal_pending(current)) { | ||
151 | ret = -ERESTARTSYS; | ||
152 | break; | ||
153 | } | ||
154 | } | ||
155 | finish_wait(&dev_priv->fence_queue, &__wait); | ||
156 | if (ret == 0 && fifo_idle) { | ||
157 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
158 | iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); | ||
159 | } | ||
160 | wake_up_all(&dev_priv->fence_queue); | ||
161 | if (fifo_idle) | ||
162 | up_read(&fifo_state->rwsem); | ||
163 | |||
164 | return ret; | ||
165 | } | ||
166 | |||
167 | int vmw_wait_fence(struct vmw_private *dev_priv, | ||
168 | bool lazy, uint32_t sequence, | ||
169 | bool interruptible, unsigned long timeout) | ||
170 | { | ||
171 | long ret; | ||
172 | unsigned long irq_flags; | ||
173 | struct vmw_fifo_state *fifo = &dev_priv->fifo; | ||
174 | |||
175 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | ||
176 | return 0; | ||
177 | |||
178 | if (likely(vmw_fence_signaled(dev_priv, sequence))) | ||
179 | return 0; | ||
180 | |||
181 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
182 | |||
183 | if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) | ||
184 | return vmw_fallback_wait(dev_priv, lazy, true, sequence, | ||
185 | interruptible, timeout); | ||
186 | |||
187 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | ||
188 | return vmw_fallback_wait(dev_priv, lazy, false, sequence, | ||
189 | interruptible, timeout); | ||
190 | |||
191 | mutex_lock(&dev_priv->hw_mutex); | ||
192 | if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) { | ||
193 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
194 | outl(SVGA_IRQFLAG_ANY_FENCE, | ||
195 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
196 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
197 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | ||
198 | SVGA_IRQFLAG_ANY_FENCE); | ||
199 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
200 | } | ||
201 | mutex_unlock(&dev_priv->hw_mutex); | ||
202 | |||
203 | if (interruptible) | ||
204 | ret = wait_event_interruptible_timeout | ||
205 | (dev_priv->fence_queue, | ||
206 | vmw_fence_signaled(dev_priv, sequence), | ||
207 | timeout); | ||
208 | else | ||
209 | ret = wait_event_timeout | ||
210 | (dev_priv->fence_queue, | ||
211 | vmw_fence_signaled(dev_priv, sequence), | ||
212 | timeout); | ||
213 | |||
214 | if (unlikely(ret == 0)) | ||
215 | ret = -EBUSY; | ||
216 | else if (likely(ret > 0)) | ||
217 | ret = 0; | ||
218 | |||
219 | mutex_lock(&dev_priv->hw_mutex); | ||
220 | if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) { | ||
221 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
222 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
223 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | ||
224 | ~SVGA_IRQFLAG_ANY_FENCE); | ||
225 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
226 | } | ||
227 | mutex_unlock(&dev_priv->hw_mutex); | ||
228 | |||
229 | return ret; | ||
230 | } | ||
231 | |||
232 | void vmw_irq_preinstall(struct drm_device *dev) | ||
233 | { | ||
234 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
235 | uint32_t status; | ||
236 | |||
237 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | ||
238 | return; | ||
239 | |||
240 | spin_lock_init(&dev_priv->irq_lock); | ||
241 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
242 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
243 | } | ||
244 | |||
245 | int vmw_irq_postinstall(struct drm_device *dev) | ||
246 | { | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | void vmw_irq_uninstall(struct drm_device *dev) | ||
251 | { | ||
252 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
253 | uint32_t status; | ||
254 | |||
255 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | ||
256 | return; | ||
257 | |||
258 | mutex_lock(&dev_priv->hw_mutex); | ||
259 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); | ||
260 | mutex_unlock(&dev_priv->hw_mutex); | ||
261 | |||
262 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
263 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
264 | } | ||
265 | |||
266 | #define VMW_FENCE_WAIT_TIMEOUT 3*HZ; | ||
267 | |||
268 | int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, | ||
269 | struct drm_file *file_priv) | ||
270 | { | ||
271 | struct drm_vmw_fence_wait_arg *arg = | ||
272 | (struct drm_vmw_fence_wait_arg *)data; | ||
273 | unsigned long timeout; | ||
274 | |||
275 | if (!arg->cookie_valid) { | ||
276 | arg->cookie_valid = 1; | ||
277 | arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT; | ||
278 | } | ||
279 | |||
280 | timeout = jiffies; | ||
281 | if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) | ||
282 | return -EBUSY; | ||
283 | |||
284 | timeout = (unsigned long)arg->kernel_cookie - timeout; | ||
285 | return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout); | ||
286 | } | ||