diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 538 |
1 files changed, 538 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c new file mode 100644 index 000000000000..39d43a01d846 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -0,0 +1,538 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_drv.h" | ||
29 | #include "drmP.h" | ||
30 | #include "ttm/ttm_placement.h" | ||
31 | |||
32 | bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | ||
33 | { | ||
34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
35 | uint32_t fifo_min, hwversion; | ||
36 | |||
37 | fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
38 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) | ||
39 | return false; | ||
40 | |||
41 | hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION); | ||
42 | if (hwversion == 0) | ||
43 | return false; | ||
44 | |||
45 | if (hwversion < SVGA3D_HWVERSION_WS65_B1) | ||
46 | return false; | ||
47 | |||
48 | return true; | ||
49 | } | ||
50 | |||
51 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | ||
52 | { | ||
53 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
54 | uint32_t max; | ||
55 | uint32_t min; | ||
56 | uint32_t dummy; | ||
57 | int ret; | ||
58 | |||
59 | fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; | ||
60 | fifo->static_buffer = vmalloc(fifo->static_buffer_size); | ||
61 | if (unlikely(fifo->static_buffer == NULL)) | ||
62 | return -ENOMEM; | ||
63 | |||
64 | fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE; | ||
65 | fifo->last_data_size = 0; | ||
66 | fifo->last_buffer_add = false; | ||
67 | fifo->last_buffer = vmalloc(fifo->last_buffer_size); | ||
68 | if (unlikely(fifo->last_buffer == NULL)) { | ||
69 | ret = -ENOMEM; | ||
70 | goto out_err; | ||
71 | } | ||
72 | |||
73 | fifo->dynamic_buffer = NULL; | ||
74 | fifo->reserved_size = 0; | ||
75 | fifo->using_bounce_buffer = false; | ||
76 | |||
77 | mutex_init(&fifo->fifo_mutex); | ||
78 | init_rwsem(&fifo->rwsem); | ||
79 | |||
80 | /* | ||
81 | * Allow mapping the first page read-only to user-space. | ||
82 | */ | ||
83 | |||
84 | DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); | ||
85 | DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); | ||
86 | DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); | ||
87 | |||
88 | mutex_lock(&dev_priv->hw_mutex); | ||
89 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | ||
90 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | ||
91 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | ||
92 | |||
93 | min = 4; | ||
94 | if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) | ||
95 | min = vmw_read(dev_priv, SVGA_REG_MEM_REGS); | ||
96 | min <<= 2; | ||
97 | |||
98 | if (min < PAGE_SIZE) | ||
99 | min = PAGE_SIZE; | ||
100 | |||
101 | iowrite32(min, fifo_mem + SVGA_FIFO_MIN); | ||
102 | iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX); | ||
103 | wmb(); | ||
104 | iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
105 | iowrite32(min, fifo_mem + SVGA_FIFO_STOP); | ||
106 | iowrite32(0, fifo_mem + SVGA_FIFO_BUSY); | ||
107 | mb(); | ||
108 | |||
109 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); | ||
110 | mutex_unlock(&dev_priv->hw_mutex); | ||
111 | |||
112 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | ||
113 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
114 | fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); | ||
115 | |||
116 | DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n", | ||
117 | (unsigned int) max, | ||
118 | (unsigned int) min, | ||
119 | (unsigned int) fifo->capabilities); | ||
120 | |||
121 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); | ||
122 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | ||
123 | |||
124 | return vmw_fifo_send_fence(dev_priv, &dummy); | ||
125 | out_err: | ||
126 | vfree(fifo->static_buffer); | ||
127 | fifo->static_buffer = NULL; | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) | ||
132 | { | ||
133 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
134 | |||
135 | mutex_lock(&dev_priv->hw_mutex); | ||
136 | |||
137 | if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { | ||
138 | iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); | ||
139 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); | ||
140 | } | ||
141 | |||
142 | mutex_unlock(&dev_priv->hw_mutex); | ||
143 | } | ||
144 | |||
145 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | ||
146 | { | ||
147 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
148 | |||
149 | mutex_lock(&dev_priv->hw_mutex); | ||
150 | |||
151 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) | ||
152 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | ||
153 | |||
154 | dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
155 | |||
156 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, | ||
157 | dev_priv->config_done_state); | ||
158 | vmw_write(dev_priv, SVGA_REG_ENABLE, | ||
159 | dev_priv->enable_state); | ||
160 | |||
161 | mutex_unlock(&dev_priv->hw_mutex); | ||
162 | |||
163 | if (likely(fifo->last_buffer != NULL)) { | ||
164 | vfree(fifo->last_buffer); | ||
165 | fifo->last_buffer = NULL; | ||
166 | } | ||
167 | |||
168 | if (likely(fifo->static_buffer != NULL)) { | ||
169 | vfree(fifo->static_buffer); | ||
170 | fifo->static_buffer = NULL; | ||
171 | } | ||
172 | |||
173 | if (likely(fifo->dynamic_buffer != NULL)) { | ||
174 | vfree(fifo->dynamic_buffer); | ||
175 | fifo->dynamic_buffer = NULL; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) | ||
180 | { | ||
181 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
182 | uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); | ||
183 | uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
184 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
185 | uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); | ||
186 | |||
187 | return ((max - next_cmd) + (stop - min) <= bytes); | ||
188 | } | ||
189 | |||
190 | static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, | ||
191 | uint32_t bytes, bool interruptible, | ||
192 | unsigned long timeout) | ||
193 | { | ||
194 | int ret = 0; | ||
195 | unsigned long end_jiffies = jiffies + timeout; | ||
196 | DEFINE_WAIT(__wait); | ||
197 | |||
198 | DRM_INFO("Fifo wait noirq.\n"); | ||
199 | |||
200 | for (;;) { | ||
201 | prepare_to_wait(&dev_priv->fifo_queue, &__wait, | ||
202 | (interruptible) ? | ||
203 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | ||
204 | if (!vmw_fifo_is_full(dev_priv, bytes)) | ||
205 | break; | ||
206 | if (time_after_eq(jiffies, end_jiffies)) { | ||
207 | ret = -EBUSY; | ||
208 | DRM_ERROR("SVGA device lockup.\n"); | ||
209 | break; | ||
210 | } | ||
211 | schedule_timeout(1); | ||
212 | if (interruptible && signal_pending(current)) { | ||
213 | ret = -ERESTARTSYS; | ||
214 | break; | ||
215 | } | ||
216 | } | ||
217 | finish_wait(&dev_priv->fifo_queue, &__wait); | ||
218 | wake_up_all(&dev_priv->fifo_queue); | ||
219 | DRM_INFO("Fifo noirq exit.\n"); | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | static int vmw_fifo_wait(struct vmw_private *dev_priv, | ||
224 | uint32_t bytes, bool interruptible, | ||
225 | unsigned long timeout) | ||
226 | { | ||
227 | long ret = 1L; | ||
228 | unsigned long irq_flags; | ||
229 | |||
230 | if (likely(!vmw_fifo_is_full(dev_priv, bytes))) | ||
231 | return 0; | ||
232 | |||
233 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL); | ||
234 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | ||
235 | return vmw_fifo_wait_noirq(dev_priv, bytes, | ||
236 | interruptible, timeout); | ||
237 | |||
238 | mutex_lock(&dev_priv->hw_mutex); | ||
239 | if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { | ||
240 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
241 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, | ||
242 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
243 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
244 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | ||
245 | SVGA_IRQFLAG_FIFO_PROGRESS); | ||
246 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
247 | } | ||
248 | mutex_unlock(&dev_priv->hw_mutex); | ||
249 | |||
250 | if (interruptible) | ||
251 | ret = wait_event_interruptible_timeout | ||
252 | (dev_priv->fifo_queue, | ||
253 | !vmw_fifo_is_full(dev_priv, bytes), timeout); | ||
254 | else | ||
255 | ret = wait_event_timeout | ||
256 | (dev_priv->fifo_queue, | ||
257 | !vmw_fifo_is_full(dev_priv, bytes), timeout); | ||
258 | |||
259 | if (unlikely(ret == 0)) | ||
260 | ret = -EBUSY; | ||
261 | else if (likely(ret > 0)) | ||
262 | ret = 0; | ||
263 | |||
264 | mutex_lock(&dev_priv->hw_mutex); | ||
265 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { | ||
266 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
267 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
268 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | ||
269 | ~SVGA_IRQFLAG_FIFO_PROGRESS); | ||
270 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
271 | } | ||
272 | mutex_unlock(&dev_priv->hw_mutex); | ||
273 | |||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | ||
278 | { | ||
279 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | ||
280 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
281 | uint32_t max; | ||
282 | uint32_t min; | ||
283 | uint32_t next_cmd; | ||
284 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | ||
285 | int ret; | ||
286 | |||
287 | mutex_lock(&fifo_state->fifo_mutex); | ||
288 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | ||
289 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
290 | next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
291 | |||
292 | if (unlikely(bytes >= (max - min))) | ||
293 | goto out_err; | ||
294 | |||
295 | BUG_ON(fifo_state->reserved_size != 0); | ||
296 | BUG_ON(fifo_state->dynamic_buffer != NULL); | ||
297 | |||
298 | fifo_state->reserved_size = bytes; | ||
299 | |||
300 | while (1) { | ||
301 | uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); | ||
302 | bool need_bounce = false; | ||
303 | bool reserve_in_place = false; | ||
304 | |||
305 | if (next_cmd >= stop) { | ||
306 | if (likely((next_cmd + bytes < max || | ||
307 | (next_cmd + bytes == max && stop > min)))) | ||
308 | reserve_in_place = true; | ||
309 | |||
310 | else if (vmw_fifo_is_full(dev_priv, bytes)) { | ||
311 | ret = vmw_fifo_wait(dev_priv, bytes, | ||
312 | false, 3 * HZ); | ||
313 | if (unlikely(ret != 0)) | ||
314 | goto out_err; | ||
315 | } else | ||
316 | need_bounce = true; | ||
317 | |||
318 | } else { | ||
319 | |||
320 | if (likely((next_cmd + bytes < stop))) | ||
321 | reserve_in_place = true; | ||
322 | else { | ||
323 | ret = vmw_fifo_wait(dev_priv, bytes, | ||
324 | false, 3 * HZ); | ||
325 | if (unlikely(ret != 0)) | ||
326 | goto out_err; | ||
327 | } | ||
328 | } | ||
329 | |||
330 | if (reserve_in_place) { | ||
331 | if (reserveable || bytes <= sizeof(uint32_t)) { | ||
332 | fifo_state->using_bounce_buffer = false; | ||
333 | |||
334 | if (reserveable) | ||
335 | iowrite32(bytes, fifo_mem + | ||
336 | SVGA_FIFO_RESERVED); | ||
337 | return fifo_mem + (next_cmd >> 2); | ||
338 | } else { | ||
339 | need_bounce = true; | ||
340 | } | ||
341 | } | ||
342 | |||
343 | if (need_bounce) { | ||
344 | fifo_state->using_bounce_buffer = true; | ||
345 | if (bytes < fifo_state->static_buffer_size) | ||
346 | return fifo_state->static_buffer; | ||
347 | else { | ||
348 | fifo_state->dynamic_buffer = vmalloc(bytes); | ||
349 | return fifo_state->dynamic_buffer; | ||
350 | } | ||
351 | } | ||
352 | } | ||
353 | out_err: | ||
354 | fifo_state->reserved_size = 0; | ||
355 | mutex_unlock(&fifo_state->fifo_mutex); | ||
356 | return NULL; | ||
357 | } | ||
358 | |||
359 | static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, | ||
360 | __le32 __iomem *fifo_mem, | ||
361 | uint32_t next_cmd, | ||
362 | uint32_t max, uint32_t min, uint32_t bytes) | ||
363 | { | ||
364 | uint32_t chunk_size = max - next_cmd; | ||
365 | uint32_t rest; | ||
366 | uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? | ||
367 | fifo_state->dynamic_buffer : fifo_state->static_buffer; | ||
368 | |||
369 | if (bytes < chunk_size) | ||
370 | chunk_size = bytes; | ||
371 | |||
372 | iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED); | ||
373 | mb(); | ||
374 | memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size); | ||
375 | rest = bytes - chunk_size; | ||
376 | if (rest) | ||
377 | memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), | ||
378 | rest); | ||
379 | } | ||
380 | |||
381 | static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, | ||
382 | __le32 __iomem *fifo_mem, | ||
383 | uint32_t next_cmd, | ||
384 | uint32_t max, uint32_t min, uint32_t bytes) | ||
385 | { | ||
386 | uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? | ||
387 | fifo_state->dynamic_buffer : fifo_state->static_buffer; | ||
388 | |||
389 | while (bytes > 0) { | ||
390 | iowrite32(*buffer++, fifo_mem + (next_cmd >> 2)); | ||
391 | next_cmd += sizeof(uint32_t); | ||
392 | if (unlikely(next_cmd == max)) | ||
393 | next_cmd = min; | ||
394 | mb(); | ||
395 | iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
396 | mb(); | ||
397 | bytes -= sizeof(uint32_t); | ||
398 | } | ||
399 | } | ||
400 | |||
401 | void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | ||
402 | { | ||
403 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | ||
404 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
405 | uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
406 | uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); | ||
407 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
408 | bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | ||
409 | |||
410 | BUG_ON((bytes & 3) != 0); | ||
411 | BUG_ON(bytes > fifo_state->reserved_size); | ||
412 | |||
413 | fifo_state->reserved_size = 0; | ||
414 | |||
415 | if (fifo_state->using_bounce_buffer) { | ||
416 | if (reserveable) | ||
417 | vmw_fifo_res_copy(fifo_state, fifo_mem, | ||
418 | next_cmd, max, min, bytes); | ||
419 | else | ||
420 | vmw_fifo_slow_copy(fifo_state, fifo_mem, | ||
421 | next_cmd, max, min, bytes); | ||
422 | |||
423 | if (fifo_state->dynamic_buffer) { | ||
424 | vfree(fifo_state->dynamic_buffer); | ||
425 | fifo_state->dynamic_buffer = NULL; | ||
426 | } | ||
427 | |||
428 | } | ||
429 | |||
430 | down_write(&fifo_state->rwsem); | ||
431 | if (fifo_state->using_bounce_buffer || reserveable) { | ||
432 | next_cmd += bytes; | ||
433 | if (next_cmd >= max) | ||
434 | next_cmd -= max - min; | ||
435 | mb(); | ||
436 | iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
437 | } | ||
438 | |||
439 | if (reserveable) | ||
440 | iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); | ||
441 | mb(); | ||
442 | up_write(&fifo_state->rwsem); | ||
443 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
444 | mutex_unlock(&fifo_state->fifo_mutex); | ||
445 | } | ||
446 | |||
447 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | ||
448 | { | ||
449 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | ||
450 | struct svga_fifo_cmd_fence *cmd_fence; | ||
451 | void *fm; | ||
452 | int ret = 0; | ||
453 | uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); | ||
454 | |||
455 | fm = vmw_fifo_reserve(dev_priv, bytes); | ||
456 | if (unlikely(fm == NULL)) { | ||
457 | *sequence = atomic_read(&dev_priv->fence_seq); | ||
458 | ret = -ENOMEM; | ||
459 | (void)vmw_fallback_wait(dev_priv, false, true, *sequence, | ||
460 | false, 3*HZ); | ||
461 | goto out_err; | ||
462 | } | ||
463 | |||
464 | do { | ||
465 | *sequence = atomic_add_return(1, &dev_priv->fence_seq); | ||
466 | } while (*sequence == 0); | ||
467 | |||
468 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { | ||
469 | |||
470 | /* | ||
471 | * Don't request hardware to send a fence. The | ||
472 | * waiting code in vmwgfx_irq.c will emulate this. | ||
473 | */ | ||
474 | |||
475 | vmw_fifo_commit(dev_priv, 0); | ||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); | ||
480 | cmd_fence = (struct svga_fifo_cmd_fence *) | ||
481 | ((unsigned long)fm + sizeof(__le32)); | ||
482 | |||
483 | iowrite32(*sequence, &cmd_fence->fence); | ||
484 | fifo_state->last_buffer_add = true; | ||
485 | vmw_fifo_commit(dev_priv, bytes); | ||
486 | fifo_state->last_buffer_add = false; | ||
487 | |||
488 | out_err: | ||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | /** | ||
493 | * Map the first page of the FIFO read-only to user-space. | ||
494 | */ | ||
495 | |||
496 | static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
497 | { | ||
498 | int ret; | ||
499 | unsigned long address = (unsigned long)vmf->virtual_address; | ||
500 | |||
501 | if (address != vma->vm_start) | ||
502 | return VM_FAULT_SIGBUS; | ||
503 | |||
504 | ret = vm_insert_pfn(vma, address, vma->vm_pgoff); | ||
505 | if (likely(ret == -EBUSY || ret == 0)) | ||
506 | return VM_FAULT_NOPAGE; | ||
507 | else if (ret == -ENOMEM) | ||
508 | return VM_FAULT_OOM; | ||
509 | |||
510 | return VM_FAULT_SIGBUS; | ||
511 | } | ||
512 | |||
513 | static struct vm_operations_struct vmw_fifo_vm_ops = { | ||
514 | .fault = vmw_fifo_vm_fault, | ||
515 | .open = NULL, | ||
516 | .close = NULL | ||
517 | }; | ||
518 | |||
519 | int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma) | ||
520 | { | ||
521 | struct drm_file *file_priv; | ||
522 | struct vmw_private *dev_priv; | ||
523 | |||
524 | file_priv = (struct drm_file *)filp->private_data; | ||
525 | dev_priv = vmw_priv(file_priv->minor->dev); | ||
526 | |||
527 | if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) || | ||
528 | (vma->vm_end - vma->vm_start) != PAGE_SIZE) | ||
529 | return -EINVAL; | ||
530 | |||
531 | vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); | ||
532 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED; | ||
533 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | ||
534 | vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED, | ||
535 | vma->vm_page_prot); | ||
536 | vma->vm_ops = &vmw_fifo_vm_ops; | ||
537 | return 0; | ||
538 | } | ||