aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
diff options
context:
space:
mode:
authorJakob Bornecrantz <jakob@vmware.com>2009-12-09 19:19:58 -0500
committerDave Airlie <airlied@redhat.com>2009-12-14 17:38:43 -0500
commitfb1d9738ca053ea8afa5e86af6463155f983b01c (patch)
tree53aa407922c989f48aead5fcf61f9945ca6051d5 /drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
parent632f61178d0473861ba77e774bb654b37bc7eccc (diff)
drm/vmwgfx: Add DRM driver for VMware Virtual GPU
This commit adds the vmwgfx driver for the VWware Virtual GPU aka SVGA. The driver is under staging the same as Nouveau and Radeon KMS. Hopefully the 2D ioctls are bug free and don't need changing, so that part of the API should be stable. But there there is a pretty big chance that the 3D API will change in the future. Signed-off-by: Thomas Hellström <thellstrom@vmware.com> Signed-off-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c521
1 files changed, 521 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
new file mode 100644
index 000000000000..76b0693e2458
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -0,0 +1,521 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "drmP.h"
30#include "ttm/ttm_placement.h"
31
32int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t max;
36 uint32_t min;
37 uint32_t dummy;
38 int ret;
39
40 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
41 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
42 if (unlikely(fifo->static_buffer == NULL))
43 return -ENOMEM;
44
45 fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
46 fifo->last_data_size = 0;
47 fifo->last_buffer_add = false;
48 fifo->last_buffer = vmalloc(fifo->last_buffer_size);
49 if (unlikely(fifo->last_buffer == NULL)) {
50 ret = -ENOMEM;
51 goto out_err;
52 }
53
54 fifo->dynamic_buffer = NULL;
55 fifo->reserved_size = 0;
56 fifo->using_bounce_buffer = false;
57
58 init_rwsem(&fifo->rwsem);
59
60 /*
61 * Allow mapping the first page read-only to user-space.
62 */
63
64 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
65 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
66 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
67
68 mutex_lock(&dev_priv->hw_mutex);
69 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
70 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
71 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
72
73 min = 4;
74 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
75 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
76 min <<= 2;
77
78 if (min < PAGE_SIZE)
79 min = PAGE_SIZE;
80
81 iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
82 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
83 wmb();
84 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
85 iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
86 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
87 mb();
88
89 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
90 mutex_unlock(&dev_priv->hw_mutex);
91
92 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
93 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
94 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
95
96 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
97 (unsigned int) max,
98 (unsigned int) min,
99 (unsigned int) fifo->capabilities);
100
101 dev_priv->fence_seq = (uint32_t) -100;
102 dev_priv->last_read_sequence = (uint32_t) -100;
103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
104
105 return vmw_fifo_send_fence(dev_priv, &dummy);
106out_err:
107 vfree(fifo->static_buffer);
108 fifo->static_buffer = NULL;
109 return ret;
110}
111
112void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
113{
114 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
115
116 mutex_lock(&dev_priv->hw_mutex);
117
118 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
119 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
120 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
121 }
122
123 mutex_unlock(&dev_priv->hw_mutex);
124}
125
126void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
127{
128 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
129
130 mutex_lock(&dev_priv->hw_mutex);
131
132 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
133 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
134
135 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
136
137 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
138 dev_priv->config_done_state);
139 vmw_write(dev_priv, SVGA_REG_ENABLE,
140 dev_priv->enable_state);
141
142 mutex_unlock(&dev_priv->hw_mutex);
143
144 if (likely(fifo->last_buffer != NULL)) {
145 vfree(fifo->last_buffer);
146 fifo->last_buffer = NULL;
147 }
148
149 if (likely(fifo->static_buffer != NULL)) {
150 vfree(fifo->static_buffer);
151 fifo->static_buffer = NULL;
152 }
153
154 if (likely(fifo->dynamic_buffer != NULL)) {
155 vfree(fifo->dynamic_buffer);
156 fifo->dynamic_buffer = NULL;
157 }
158}
159
160static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
161{
162 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
163 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
164 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
165 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
166 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
167
168 return ((max - next_cmd) + (stop - min) <= bytes);
169}
170
171static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
172 uint32_t bytes, bool interruptible,
173 unsigned long timeout)
174{
175 int ret = 0;
176 unsigned long end_jiffies = jiffies + timeout;
177 DEFINE_WAIT(__wait);
178
179 DRM_INFO("Fifo wait noirq.\n");
180
181 for (;;) {
182 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
183 (interruptible) ?
184 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
185 if (!vmw_fifo_is_full(dev_priv, bytes))
186 break;
187 if (time_after_eq(jiffies, end_jiffies)) {
188 ret = -EBUSY;
189 DRM_ERROR("SVGA device lockup.\n");
190 break;
191 }
192 schedule_timeout(1);
193 if (interruptible && signal_pending(current)) {
194 ret = -ERESTART;
195 break;
196 }
197 }
198 finish_wait(&dev_priv->fifo_queue, &__wait);
199 wake_up_all(&dev_priv->fifo_queue);
200 DRM_INFO("Fifo noirq exit.\n");
201 return ret;
202}
203
204static int vmw_fifo_wait(struct vmw_private *dev_priv,
205 uint32_t bytes, bool interruptible,
206 unsigned long timeout)
207{
208 long ret = 1L;
209 unsigned long irq_flags;
210
211 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
212 return 0;
213
214 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
215 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
216 return vmw_fifo_wait_noirq(dev_priv, bytes,
217 interruptible, timeout);
218
219 mutex_lock(&dev_priv->hw_mutex);
220 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
221 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
222 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
223 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
224 vmw_write(dev_priv, SVGA_REG_IRQMASK,
225 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
226 SVGA_IRQFLAG_FIFO_PROGRESS);
227 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
228 }
229 mutex_unlock(&dev_priv->hw_mutex);
230
231 if (interruptible)
232 ret = wait_event_interruptible_timeout
233 (dev_priv->fifo_queue,
234 !vmw_fifo_is_full(dev_priv, bytes), timeout);
235 else
236 ret = wait_event_timeout
237 (dev_priv->fifo_queue,
238 !vmw_fifo_is_full(dev_priv, bytes), timeout);
239
240 if (unlikely(ret == -ERESTARTSYS))
241 ret = -ERESTART;
242 else if (unlikely(ret == 0))
243 ret = -EBUSY;
244 else if (likely(ret > 0))
245 ret = 0;
246
247 mutex_lock(&dev_priv->hw_mutex);
248 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
249 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
250 vmw_write(dev_priv, SVGA_REG_IRQMASK,
251 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
252 ~SVGA_IRQFLAG_FIFO_PROGRESS);
253 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
254 }
255 mutex_unlock(&dev_priv->hw_mutex);
256
257 return ret;
258}
259
260void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
261{
262 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
263 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
264 uint32_t max;
265 uint32_t min;
266 uint32_t next_cmd;
267 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
268 int ret;
269
270 down_write(&fifo_state->rwsem);
271 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
272 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
273 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
274
275 if (unlikely(bytes >= (max - min)))
276 goto out_err;
277
278 BUG_ON(fifo_state->reserved_size != 0);
279 BUG_ON(fifo_state->dynamic_buffer != NULL);
280
281 fifo_state->reserved_size = bytes;
282
283 while (1) {
284 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
285 bool need_bounce = false;
286 bool reserve_in_place = false;
287
288 if (next_cmd >= stop) {
289 if (likely((next_cmd + bytes < max ||
290 (next_cmd + bytes == max && stop > min))))
291 reserve_in_place = true;
292
293 else if (vmw_fifo_is_full(dev_priv, bytes)) {
294 ret = vmw_fifo_wait(dev_priv, bytes,
295 false, 3 * HZ);
296 if (unlikely(ret != 0))
297 goto out_err;
298 } else
299 need_bounce = true;
300
301 } else {
302
303 if (likely((next_cmd + bytes < stop)))
304 reserve_in_place = true;
305 else {
306 ret = vmw_fifo_wait(dev_priv, bytes,
307 false, 3 * HZ);
308 if (unlikely(ret != 0))
309 goto out_err;
310 }
311 }
312
313 if (reserve_in_place) {
314 if (reserveable || bytes <= sizeof(uint32_t)) {
315 fifo_state->using_bounce_buffer = false;
316
317 if (reserveable)
318 iowrite32(bytes, fifo_mem +
319 SVGA_FIFO_RESERVED);
320 return fifo_mem + (next_cmd >> 2);
321 } else {
322 need_bounce = true;
323 }
324 }
325
326 if (need_bounce) {
327 fifo_state->using_bounce_buffer = true;
328 if (bytes < fifo_state->static_buffer_size)
329 return fifo_state->static_buffer;
330 else {
331 fifo_state->dynamic_buffer = vmalloc(bytes);
332 return fifo_state->dynamic_buffer;
333 }
334 }
335 }
336out_err:
337 fifo_state->reserved_size = 0;
338 up_write(&fifo_state->rwsem);
339 return NULL;
340}
341
342static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
343 __le32 __iomem *fifo_mem,
344 uint32_t next_cmd,
345 uint32_t max, uint32_t min, uint32_t bytes)
346{
347 uint32_t chunk_size = max - next_cmd;
348 uint32_t rest;
349 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
350 fifo_state->dynamic_buffer : fifo_state->static_buffer;
351
352 if (bytes < chunk_size)
353 chunk_size = bytes;
354
355 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
356 mb();
357 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
358 rest = bytes - chunk_size;
359 if (rest)
360 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
361 rest);
362}
363
364static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
365 __le32 __iomem *fifo_mem,
366 uint32_t next_cmd,
367 uint32_t max, uint32_t min, uint32_t bytes)
368{
369 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
370 fifo_state->dynamic_buffer : fifo_state->static_buffer;
371
372 while (bytes > 0) {
373 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
374 next_cmd += sizeof(uint32_t);
375 if (unlikely(next_cmd == max))
376 next_cmd = min;
377 mb();
378 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
379 mb();
380 bytes -= sizeof(uint32_t);
381 }
382}
383
384void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
385{
386 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
387 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
388 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
389 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
390 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
391 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
392
393 BUG_ON((bytes & 3) != 0);
394 BUG_ON(bytes > fifo_state->reserved_size);
395
396 fifo_state->reserved_size = 0;
397
398 if (fifo_state->using_bounce_buffer) {
399 if (reserveable)
400 vmw_fifo_res_copy(fifo_state, fifo_mem,
401 next_cmd, max, min, bytes);
402 else
403 vmw_fifo_slow_copy(fifo_state, fifo_mem,
404 next_cmd, max, min, bytes);
405
406 if (fifo_state->dynamic_buffer) {
407 vfree(fifo_state->dynamic_buffer);
408 fifo_state->dynamic_buffer = NULL;
409 }
410
411 }
412
413 if (fifo_state->using_bounce_buffer || reserveable) {
414 next_cmd += bytes;
415 if (next_cmd >= max)
416 next_cmd -= max - min;
417 mb();
418 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
419 }
420
421 if (reserveable)
422 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
423 mb();
424 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
425 up_write(&fifo_state->rwsem);
426}
427
428int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
429{
430 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
431 struct svga_fifo_cmd_fence *cmd_fence;
432 void *fm;
433 int ret = 0;
434 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
435
436 fm = vmw_fifo_reserve(dev_priv, bytes);
437 if (unlikely(fm == NULL)) {
438 down_write(&fifo_state->rwsem);
439 *sequence = dev_priv->fence_seq;
440 up_write(&fifo_state->rwsem);
441 ret = -ENOMEM;
442 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
443 false, 3*HZ);
444 goto out_err;
445 }
446
447 do {
448 *sequence = dev_priv->fence_seq++;
449 } while (*sequence == 0);
450
451 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
452
453 /*
454 * Don't request hardware to send a fence. The
455 * waiting code in vmwgfx_irq.c will emulate this.
456 */
457
458 vmw_fifo_commit(dev_priv, 0);
459 return 0;
460 }
461
462 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
463 cmd_fence = (struct svga_fifo_cmd_fence *)
464 ((unsigned long)fm + sizeof(__le32));
465
466 iowrite32(*sequence, &cmd_fence->fence);
467 fifo_state->last_buffer_add = true;
468 vmw_fifo_commit(dev_priv, bytes);
469 fifo_state->last_buffer_add = false;
470
471out_err:
472 return ret;
473}
474
475/**
476 * Map the first page of the FIFO read-only to user-space.
477 */
478
479static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
480{
481 int ret;
482 unsigned long address = (unsigned long)vmf->virtual_address;
483
484 if (address != vma->vm_start)
485 return VM_FAULT_SIGBUS;
486
487 ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
488 if (likely(ret == -EBUSY || ret == 0))
489 return VM_FAULT_NOPAGE;
490 else if (ret == -ENOMEM)
491 return VM_FAULT_OOM;
492
493 return VM_FAULT_SIGBUS;
494}
495
496static struct vm_operations_struct vmw_fifo_vm_ops = {
497 .fault = vmw_fifo_vm_fault,
498 .open = NULL,
499 .close = NULL
500};
501
502int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
503{
504 struct drm_file *file_priv;
505 struct vmw_private *dev_priv;
506
507 file_priv = (struct drm_file *)filp->private_data;
508 dev_priv = vmw_priv(file_priv->minor->dev);
509
510 if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
511 (vma->vm_end - vma->vm_start) != PAGE_SIZE)
512 return -EINVAL;
513
514 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
515 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
516 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
517 vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
518 vma->vm_page_prot);
519 vma->vm_ops = &vmw_fifo_vm_ops;
520 return 0;
521}