aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/drm_drv.c13
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_ioc32.c89
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c23
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c2
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c4
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c16
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/r100.c54
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h5
-rw-r--r--drivers/gpu/drm/radeon/r200.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c24
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c19
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c267
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c506
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h74
-rw-r--r--drivers/gpu/drm/radeon/radeon.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c14
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c14
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c2
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c77
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig13
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile9
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h1793
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_escape.h89
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_overlay.h201
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h1346
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_types.h45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c229
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c726
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h513
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c621
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c742
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c519
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c213
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c81
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c293
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c872
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h102
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c516
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c634
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1183
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c99
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--include/drm/Kbuild1
-rw-r--r--include/drm/drmP.h5
-rw-r--r--include/drm/ttm/ttm_object.h6
-rw-r--r--include/drm/vmwgfx_drm.h574
75 files changed, 12674 insertions, 225 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 470ef6779db3..39c5aa75b8f1 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_DRM_I830) += i830/
30obj-$(CONFIG_DRM_I915) += i915/ 30obj-$(CONFIG_DRM_I915) += i915/
31obj-$(CONFIG_DRM_SIS) += sis/ 31obj-$(CONFIG_DRM_SIS) += sis/
32obj-$(CONFIG_DRM_SAVAGE)+= savage/ 32obj-$(CONFIG_DRM_SAVAGE)+= savage/
33obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
33obj-$(CONFIG_DRM_VIA) +=via/ 34obj-$(CONFIG_DRM_VIA) +=via/
34obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ 35obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
35obj-y += i2c/ 36obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index ff2f1042cb44..766c46875a20 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -434,11 +434,11 @@ static int drm_version(struct drm_device *dev, void *data,
434 * Looks up the ioctl function in the ::ioctls table, checking for root 434 * Looks up the ioctl function in the ::ioctls table, checking for root
435 * previleges if so required, and dispatches to the respective function. 435 * previleges if so required, and dispatches to the respective function.
436 */ 436 */
437int drm_ioctl(struct inode *inode, struct file *filp, 437long drm_ioctl(struct file *filp,
438 unsigned int cmd, unsigned long arg) 438 unsigned int cmd, unsigned long arg)
439{ 439{
440 struct drm_file *file_priv = filp->private_data; 440 struct drm_file *file_priv = filp->private_data;
441 struct drm_device *dev = file_priv->minor->dev; 441 struct drm_device *dev;
442 struct drm_ioctl_desc *ioctl; 442 struct drm_ioctl_desc *ioctl;
443 drm_ioctl_t *func; 443 drm_ioctl_t *func;
444 unsigned int nr = DRM_IOCTL_NR(cmd); 444 unsigned int nr = DRM_IOCTL_NR(cmd);
@@ -446,6 +446,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
446 char stack_kdata[128]; 446 char stack_kdata[128];
447 char *kdata = NULL; 447 char *kdata = NULL;
448 448
449 dev = file_priv->minor->dev;
449 atomic_inc(&dev->ioctl_count); 450 atomic_inc(&dev->ioctl_count);
450 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 451 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
451 ++file_priv->ioctl_count; 452 ++file_priv->ioctl_count;
@@ -501,7 +502,13 @@ int drm_ioctl(struct inode *inode, struct file *filp,
501 goto err_i1; 502 goto err_i1;
502 } 503 }
503 } 504 }
504 retcode = func(dev, kdata, file_priv); 505 if (ioctl->flags & DRM_UNLOCKED)
506 retcode = func(dev, kdata, file_priv);
507 else {
508 lock_kernel();
509 retcode = func(dev, kdata, file_priv);
510 unlock_kernel();
511 }
505 512
506 if (cmd & IOC_OUT) { 513 if (cmd & IOC_OUT) {
507 if (copy_to_user((void __user *)arg, kdata, 514 if (copy_to_user((void __user *)arg, kdata,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c39b26f1abed..5c9f79877cbf 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -913,7 +913,7 @@ static int drm_cvt_modes(struct drm_connector *connector,
913 const int rates[] = { 60, 85, 75, 60, 50 }; 913 const int rates[] = { 60, 85, 75, 60, 50 };
914 914
915 for (i = 0; i < 4; i++) { 915 for (i = 0; i < 4; i++) {
916 int width, height; 916 int uninitialized_var(width), height;
917 cvt = &(timing->data.other_data.data.cvt[i]); 917 cvt = &(timing->data.other_data.data.cvt[i]);
918 918
919 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; 919 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 282d9fdf9f4e..d61d185cf040 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -104,7 +104,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
104 &version->desc)) 104 &version->desc))
105 return -EFAULT; 105 return -EFAULT;
106 106
107 err = drm_ioctl(file->f_path.dentry->d_inode, file, 107 err = drm_ioctl(file,
108 DRM_IOCTL_VERSION, (unsigned long)version); 108 DRM_IOCTL_VERSION, (unsigned long)version);
109 if (err) 109 if (err)
110 return err; 110 return err;
@@ -145,8 +145,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
145 &u->unique)) 145 &u->unique))
146 return -EFAULT; 146 return -EFAULT;
147 147
148 err = drm_ioctl(file->f_path.dentry->d_inode, file, 148 err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
149 DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
150 if (err) 149 if (err)
151 return err; 150 return err;
152 151
@@ -174,8 +173,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
174 &u->unique)) 173 &u->unique))
175 return -EFAULT; 174 return -EFAULT;
176 175
177 return drm_ioctl(file->f_path.dentry->d_inode, file, 176 return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
178 DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
179} 177}
180 178
181typedef struct drm_map32 { 179typedef struct drm_map32 {
@@ -205,8 +203,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
205 if (__put_user(idx, &map->offset)) 203 if (__put_user(idx, &map->offset))
206 return -EFAULT; 204 return -EFAULT;
207 205
208 err = drm_ioctl(file->f_path.dentry->d_inode, file, 206 err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
209 DRM_IOCTL_GET_MAP, (unsigned long)map);
210 if (err) 207 if (err)
211 return err; 208 return err;
212 209
@@ -246,8 +243,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
246 || __put_user(m32.flags, &map->flags)) 243 || __put_user(m32.flags, &map->flags))
247 return -EFAULT; 244 return -EFAULT;
248 245
249 err = drm_ioctl(file->f_path.dentry->d_inode, file, 246 err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
250 DRM_IOCTL_ADD_MAP, (unsigned long)map);
251 if (err) 247 if (err)
252 return err; 248 return err;
253 249
@@ -284,8 +280,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
284 if (__put_user((void *)(unsigned long)handle, &map->handle)) 280 if (__put_user((void *)(unsigned long)handle, &map->handle))
285 return -EFAULT; 281 return -EFAULT;
286 282
287 return drm_ioctl(file->f_path.dentry->d_inode, file, 283 return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
288 DRM_IOCTL_RM_MAP, (unsigned long)map);
289} 284}
290 285
291typedef struct drm_client32 { 286typedef struct drm_client32 {
@@ -314,8 +309,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
314 if (__put_user(idx, &client->idx)) 309 if (__put_user(idx, &client->idx))
315 return -EFAULT; 310 return -EFAULT;
316 311
317 err = drm_ioctl(file->f_path.dentry->d_inode, file, 312 err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
318 DRM_IOCTL_GET_CLIENT, (unsigned long)client);
319 if (err) 313 if (err)
320 return err; 314 return err;
321 315
@@ -351,8 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
351 if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats))) 345 if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
352 return -EFAULT; 346 return -EFAULT;
353 347
354 err = drm_ioctl(file->f_path.dentry->d_inode, file, 348 err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
355 DRM_IOCTL_GET_STATS, (unsigned long)stats);
356 if (err) 349 if (err)
357 return err; 350 return err;
358 351
@@ -395,8 +388,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
395 || __put_user(agp_start, &buf->agp_start)) 388 || __put_user(agp_start, &buf->agp_start))
396 return -EFAULT; 389 return -EFAULT;
397 390
398 err = drm_ioctl(file->f_path.dentry->d_inode, file, 391 err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
399 DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
400 if (err) 392 if (err)
401 return err; 393 return err;
402 394
@@ -427,8 +419,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
427 || __put_user(b32.high_mark, &buf->high_mark)) 419 || __put_user(b32.high_mark, &buf->high_mark))
428 return -EFAULT; 420 return -EFAULT;
429 421
430 return drm_ioctl(file->f_path.dentry->d_inode, file, 422 return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
431 DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
432} 423}
433 424
434typedef struct drm_buf_info32 { 425typedef struct drm_buf_info32 {
@@ -469,8 +460,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
469 || __put_user(list, &request->list)) 460 || __put_user(list, &request->list))
470 return -EFAULT; 461 return -EFAULT;
471 462
472 err = drm_ioctl(file->f_path.dentry->d_inode, file, 463 err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
473 DRM_IOCTL_INFO_BUFS, (unsigned long)request);
474 if (err) 464 if (err)
475 return err; 465 return err;
476 466
@@ -531,8 +521,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
531 || __put_user(list, &request->list)) 521 || __put_user(list, &request->list))
532 return -EFAULT; 522 return -EFAULT;
533 523
534 err = drm_ioctl(file->f_path.dentry->d_inode, file, 524 err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
535 DRM_IOCTL_MAP_BUFS, (unsigned long)request);
536 if (err) 525 if (err)
537 return err; 526 return err;
538 527
@@ -578,8 +567,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
578 &request->list)) 567 &request->list))
579 return -EFAULT; 568 return -EFAULT;
580 569
581 return drm_ioctl(file->f_path.dentry->d_inode, file, 570 return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
582 DRM_IOCTL_FREE_BUFS, (unsigned long)request);
583} 571}
584 572
585typedef struct drm_ctx_priv_map32 { 573typedef struct drm_ctx_priv_map32 {
@@ -605,8 +593,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
605 &request->handle)) 593 &request->handle))
606 return -EFAULT; 594 return -EFAULT;
607 595
608 return drm_ioctl(file->f_path.dentry->d_inode, file, 596 return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
609 DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
610} 597}
611 598
612static int compat_drm_getsareactx(struct file *file, unsigned int cmd, 599static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
@@ -628,8 +615,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
628 if (__put_user(ctx_id, &request->ctx_id)) 615 if (__put_user(ctx_id, &request->ctx_id))
629 return -EFAULT; 616 return -EFAULT;
630 617
631 err = drm_ioctl(file->f_path.dentry->d_inode, file, 618 err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
632 DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
633 if (err) 619 if (err)
634 return err; 620 return err;
635 621
@@ -664,8 +650,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
664 &res->contexts)) 650 &res->contexts))
665 return -EFAULT; 651 return -EFAULT;
666 652
667 err = drm_ioctl(file->f_path.dentry->d_inode, file, 653 err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
668 DRM_IOCTL_RES_CTX, (unsigned long)res);
669 if (err) 654 if (err)
670 return err; 655 return err;
671 656
@@ -718,8 +703,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
718 &d->request_sizes)) 703 &d->request_sizes))
719 return -EFAULT; 704 return -EFAULT;
720 705
721 err = drm_ioctl(file->f_path.dentry->d_inode, file, 706 err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
722 DRM_IOCTL_DMA, (unsigned long)d);
723 if (err) 707 if (err)
724 return err; 708 return err;
725 709
@@ -751,8 +735,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
751 if (put_user(m32.mode, &mode->mode)) 735 if (put_user(m32.mode, &mode->mode))
752 return -EFAULT; 736 return -EFAULT;
753 737
754 return drm_ioctl(file->f_path.dentry->d_inode, file, 738 return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
755 DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
756} 739}
757 740
758typedef struct drm_agp_info32 { 741typedef struct drm_agp_info32 {
@@ -781,8 +764,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
781 if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) 764 if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
782 return -EFAULT; 765 return -EFAULT;
783 766
784 err = drm_ioctl(file->f_path.dentry->d_inode, file, 767 err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
785 DRM_IOCTL_AGP_INFO, (unsigned long)info);
786 if (err) 768 if (err)
787 return err; 769 return err;
788 770
@@ -827,16 +809,14 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
827 || __put_user(req32.type, &request->type)) 809 || __put_user(req32.type, &request->type))
828 return -EFAULT; 810 return -EFAULT;
829 811
830 err = drm_ioctl(file->f_path.dentry->d_inode, file, 812 err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
831 DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
832 if (err) 813 if (err)
833 return err; 814 return err;
834 815
835 if (__get_user(req32.handle, &request->handle) 816 if (__get_user(req32.handle, &request->handle)
836 || __get_user(req32.physical, &request->physical) 817 || __get_user(req32.physical, &request->physical)
837 || copy_to_user(argp, &req32, sizeof(req32))) { 818 || copy_to_user(argp, &req32, sizeof(req32))) {
838 drm_ioctl(file->f_path.dentry->d_inode, file, 819 drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
839 DRM_IOCTL_AGP_FREE, (unsigned long)request);
840 return -EFAULT; 820 return -EFAULT;
841 } 821 }
842 822
@@ -856,8 +836,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
856 || __put_user(handle, &request->handle)) 836 || __put_user(handle, &request->handle))
857 return -EFAULT; 837 return -EFAULT;
858 838
859 return drm_ioctl(file->f_path.dentry->d_inode, file, 839 return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
860 DRM_IOCTL_AGP_FREE, (unsigned long)request);
861} 840}
862 841
863typedef struct drm_agp_binding32 { 842typedef struct drm_agp_binding32 {
@@ -881,8 +860,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
881 || __put_user(req32.offset, &request->offset)) 860 || __put_user(req32.offset, &request->offset))
882 return -EFAULT; 861 return -EFAULT;
883 862
884 return drm_ioctl(file->f_path.dentry->d_inode, file, 863 return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
885 DRM_IOCTL_AGP_BIND, (unsigned long)request);
886} 864}
887 865
888static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, 866static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
@@ -898,8 +876,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
898 || __put_user(handle, &request->handle)) 876 || __put_user(handle, &request->handle))
899 return -EFAULT; 877 return -EFAULT;
900 878
901 return drm_ioctl(file->f_path.dentry->d_inode, file, 879 return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
902 DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
903} 880}
904#endif /* __OS_HAS_AGP */ 881#endif /* __OS_HAS_AGP */
905 882
@@ -923,8 +900,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
923 || __put_user(x, &request->size)) 900 || __put_user(x, &request->size))
924 return -EFAULT; 901 return -EFAULT;
925 902
926 err = drm_ioctl(file->f_path.dentry->d_inode, file, 903 err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
927 DRM_IOCTL_SG_ALLOC, (unsigned long)request);
928 if (err) 904 if (err)
929 return err; 905 return err;
930 906
@@ -950,8 +926,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
950 || __put_user(x << PAGE_SHIFT, &request->handle)) 926 || __put_user(x << PAGE_SHIFT, &request->handle))
951 return -EFAULT; 927 return -EFAULT;
952 928
953 return drm_ioctl(file->f_path.dentry->d_inode, file, 929 return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
954 DRM_IOCTL_SG_FREE, (unsigned long)request);
955} 930}
956 931
957#if defined(CONFIG_X86) || defined(CONFIG_IA64) 932#if defined(CONFIG_X86) || defined(CONFIG_IA64)
@@ -981,8 +956,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
981 __put_user(update32.data, &request->data)) 956 __put_user(update32.data, &request->data))
982 return -EFAULT; 957 return -EFAULT;
983 958
984 err = drm_ioctl(file->f_path.dentry->d_inode, file, 959 err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
985 DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
986 return err; 960 return err;
987} 961}
988#endif 962#endif
@@ -1023,8 +997,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
1023 || __put_user(req32.request.signal, &request->request.signal)) 997 || __put_user(req32.request.signal, &request->request.signal))
1024 return -EFAULT; 998 return -EFAULT;
1025 999
1026 err = drm_ioctl(file->f_path.dentry->d_inode, file, 1000 err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
1027 DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
1028 if (err) 1001 if (err)
1029 return err; 1002 return err;
1030 1003
@@ -1094,16 +1067,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1094 * than always failing. 1067 * than always failing.
1095 */ 1068 */
1096 if (nr >= ARRAY_SIZE(drm_compat_ioctls)) 1069 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
1097 return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); 1070 return drm_ioctl(filp, cmd, arg);
1098 1071
1099 fn = drm_compat_ioctls[nr]; 1072 fn = drm_compat_ioctls[nr];
1100 1073
1101 lock_kernel(); /* XXX for now */
1102 if (fn != NULL) 1074 if (fn != NULL)
1103 ret = (*fn) (filp, cmd, arg); 1075 ret = (*fn) (filp, cmd, arg);
1104 else 1076 else
1105 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 1077 ret = drm_ioctl(filp, cmd, arg);
1106 unlock_kernel();
1107 1078
1108 return ret; 1079 return ret;
1109} 1080}
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index d7d7eac3ddd2..cdec32977129 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -358,7 +358,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
358 if (entry->size >= size + wasted) { 358 if (entry->size >= size + wasted) {
359 if (!best_match) 359 if (!best_match)
360 return entry; 360 return entry;
361 if (size < best_size) { 361 if (entry->size < best_size) {
362 best = entry; 362 best = entry;
363 best_size = entry->size; 363 best_size = entry->size;
364 } 364 }
@@ -408,7 +408,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
408 if (entry->size >= size + wasted) { 408 if (entry->size >= size + wasted) {
409 if (!best_match) 409 if (!best_match)
410 return entry; 410 return entry;
411 if (size < best_size) { 411 if (entry->size < best_size) {
412 best = entry; 412 best = entry;
413 best_size = entry->size; 413 best_size = entry->size;
414 } 414 }
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 7d1d88cdf2dc..de32d22a8c39 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -115,7 +115,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
115static const struct file_operations i810_buffer_fops = { 115static const struct file_operations i810_buffer_fops = {
116 .open = drm_open, 116 .open = drm_open,
117 .release = drm_release, 117 .release = drm_release,
118 .ioctl = drm_ioctl, 118 .unlocked_ioctl = drm_ioctl,
119 .mmap = i810_mmap_buffers, 119 .mmap = i810_mmap_buffers,
120 .fasync = drm_fasync, 120 .fasync = drm_fasync,
121}; 121};
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index fabb9a817966..c1e02752e023 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -59,7 +59,7 @@ static struct drm_driver driver = {
59 .owner = THIS_MODULE, 59 .owner = THIS_MODULE,
60 .open = drm_open, 60 .open = drm_open,
61 .release = drm_release, 61 .release = drm_release,
62 .ioctl = drm_ioctl, 62 .unlocked_ioctl = drm_ioctl,
63 .mmap = drm_mmap, 63 .mmap = drm_mmap,
64 .poll = drm_poll, 64 .poll = drm_poll,
65 .fasync = drm_fasync, 65 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 877bf6cb14a4..06bd732e6463 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -117,7 +117,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
117static const struct file_operations i830_buffer_fops = { 117static const struct file_operations i830_buffer_fops = {
118 .open = drm_open, 118 .open = drm_open,
119 .release = drm_release, 119 .release = drm_release,
120 .ioctl = drm_ioctl, 120 .unlocked_ioctl = drm_ioctl,
121 .mmap = i830_mmap_buffers, 121 .mmap = i830_mmap_buffers,
122 .fasync = drm_fasync, 122 .fasync = drm_fasync,
123}; 123};
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 389597e4a623..44f990bed8f4 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -70,7 +70,7 @@ static struct drm_driver driver = {
70 .owner = THIS_MODULE, 70 .owner = THIS_MODULE,
71 .open = drm_open, 71 .open = drm_open,
72 .release = drm_release, 72 .release = drm_release,
73 .ioctl = drm_ioctl, 73 .unlocked_ioctl = drm_ioctl,
74 .mmap = drm_mmap, 74 .mmap = drm_mmap,
75 .poll = drm_poll, 75 .poll = drm_poll,
76 .fasync = drm_fasync, 76 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2fa217862058..24286ca168fc 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -329,7 +329,7 @@ static struct drm_driver driver = {
329 .owner = THIS_MODULE, 329 .owner = THIS_MODULE,
330 .open = drm_open, 330 .open = drm_open,
331 .release = drm_release, 331 .release = drm_release,
332 .ioctl = drm_ioctl, 332 .unlocked_ioctl = drm_ioctl,
333 .mmap = drm_gem_mmap, 333 .mmap = drm_gem_mmap,
334 .poll = drm_poll, 334 .poll = drm_poll,
335 .fasync = drm_fasync, 335 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 1fe68a251b75..13b028994b2b 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -66,8 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
66 &batchbuffer->cliprects)) 66 &batchbuffer->cliprects))
67 return -EFAULT; 67 return -EFAULT;
68 68
69 return drm_ioctl(file->f_path.dentry->d_inode, file, 69 return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
70 DRM_IOCTL_I915_BATCHBUFFER,
71 (unsigned long)batchbuffer); 70 (unsigned long)batchbuffer);
72} 71}
73 72
@@ -102,8 +101,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
102 &cmdbuffer->cliprects)) 101 &cmdbuffer->cliprects))
103 return -EFAULT; 102 return -EFAULT;
104 103
105 return drm_ioctl(file->f_path.dentry->d_inode, file, 104 return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
106 DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer); 105 (unsigned long)cmdbuffer);
107} 106}
108 107
109typedef struct drm_i915_irq_emit32 { 108typedef struct drm_i915_irq_emit32 {
@@ -125,8 +124,8 @@ static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
125 &request->irq_seq)) 124 &request->irq_seq))
126 return -EFAULT; 125 return -EFAULT;
127 126
128 return drm_ioctl(file->f_path.dentry->d_inode, file, 127 return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
129 DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request); 128 (unsigned long)request);
130} 129}
131typedef struct drm_i915_getparam32 { 130typedef struct drm_i915_getparam32 {
132 int param; 131 int param;
@@ -149,8 +148,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
149 &request->value)) 148 &request->value))
150 return -EFAULT; 149 return -EFAULT;
151 150
152 return drm_ioctl(file->f_path.dentry->d_inode, file, 151 return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
153 DRM_IOCTL_I915_GETPARAM, (unsigned long)request); 152 (unsigned long)request);
154} 153}
155 154
156typedef struct drm_i915_mem_alloc32 { 155typedef struct drm_i915_mem_alloc32 {
@@ -178,8 +177,8 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
178 &request->region_offset)) 177 &request->region_offset))
179 return -EFAULT; 178 return -EFAULT;
180 179
181 return drm_ioctl(file->f_path.dentry->d_inode, file, 180 return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
182 DRM_IOCTL_I915_ALLOC, (unsigned long)request); 181 (unsigned long)request);
183} 182}
184 183
185drm_ioctl_compat_t *i915_compat_ioctls[] = { 184drm_ioctl_compat_t *i915_compat_ioctls[] = {
@@ -211,12 +210,10 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
211 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) 210 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
212 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; 211 fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
213 212
214 lock_kernel(); /* XXX for now */
215 if (fn != NULL) 213 if (fn != NULL)
216 ret = (*fn) (filp, cmd, arg); 214 ret = (*fn) (filp, cmd, arg);
217 else 215 else
218 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 216 ret = drm_ioctl(filp, cmd, arg);
219 unlock_kernel();
220 217
221 return ret; 218 return ret;
222} 219}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 97ee566ef749..ddfe16197b59 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -68,7 +68,7 @@ static struct drm_driver driver = {
68 .owner = THIS_MODULE, 68 .owner = THIS_MODULE,
69 .open = drm_open, 69 .open = drm_open,
70 .release = drm_release, 70 .release = drm_release,
71 .ioctl = drm_ioctl, 71 .unlocked_ioctl = drm_ioctl,
72 .mmap = drm_mmap, 72 .mmap = drm_mmap,
73 .poll = drm_poll, 73 .poll = drm_poll,
74 .fasync = drm_fasync, 74 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 30d00478ddee..c1f877b7bac1 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -100,8 +100,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
100 if (err) 100 if (err)
101 return -EFAULT; 101 return -EFAULT;
102 102
103 return drm_ioctl(file->f_path.dentry->d_inode, file, 103 return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
104 DRM_IOCTL_MGA_INIT, (unsigned long)init);
105} 104}
106 105
107typedef struct drm_mga_getparam32 { 106typedef struct drm_mga_getparam32 {
@@ -125,8 +124,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
125 &getparam->value)) 124 &getparam->value))
126 return -EFAULT; 125 return -EFAULT;
127 126
128 return drm_ioctl(file->f_path.dentry->d_inode, file, 127 return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
129 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
130} 128}
131 129
132typedef struct drm_mga_drm_bootstrap32 { 130typedef struct drm_mga_drm_bootstrap32 {
@@ -166,8 +164,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
166 || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) 164 || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
167 return -EFAULT; 165 return -EFAULT;
168 166
169 err = drm_ioctl(file->f_path.dentry->d_inode, file, 167 err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
170 DRM_IOCTL_MGA_DMA_BOOTSTRAP,
171 (unsigned long)dma_bootstrap); 168 (unsigned long)dma_bootstrap);
172 if (err) 169 if (err)
173 return err; 170 return err;
@@ -220,12 +217,10 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
220 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) 217 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
221 fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; 218 fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
222 219
223 lock_kernel(); /* XXX for now */
224 if (fn != NULL) 220 if (fn != NULL)
225 ret = (*fn) (filp, cmd, arg); 221 ret = (*fn) (filp, cmd, arg);
226 else 222 else
227 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 223 ret = drm_ioctl(filp, cmd, arg);
228 unlock_kernel();
229 224
230 return ret; 225 return ret;
231} 226}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 58c717247f26..06eb993e0883 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -345,7 +345,7 @@ static struct drm_driver driver = {
345 .owner = THIS_MODULE, 345 .owner = THIS_MODULE,
346 .open = drm_open, 346 .open = drm_open,
347 .release = drm_release, 347 .release = drm_release,
348 .ioctl = drm_ioctl, 348 .unlocked_ioctl = drm_ioctl,
349 .mmap = nouveau_ttm_mmap, 349 .mmap = nouveau_ttm_mmap,
350 .poll = drm_poll, 350 .poll = drm_poll,
351 .fasync = drm_fasync, 351 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index a2c30f4611ba..475ba810bba3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -61,12 +61,10 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
61 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) 61 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
62 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; 62 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
63#endif 63#endif
64 lock_kernel(); /* XXX for now */
65 if (fn != NULL) 64 if (fn != NULL)
66 ret = (*fn)(filp, cmd, arg); 65 ret = (*fn)(filp, cmd, arg);
67 else 66 else
68 ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); 67 ret = drm_ioctl(filp, cmd, arg);
69 unlock_kernel();
70 68
71 return ret; 69 return ret;
72} 70}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 601f4c0e5da5..b806fdcc7170 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -64,7 +64,7 @@ static struct drm_driver driver = {
64 .owner = THIS_MODULE, 64 .owner = THIS_MODULE,
65 .open = drm_open, 65 .open = drm_open,
66 .release = drm_release, 66 .release = drm_release,
67 .ioctl = drm_ioctl, 67 .unlocked_ioctl = drm_ioctl,
68 .mmap = drm_mmap, 68 .mmap = drm_mmap,
69 .poll = drm_poll, 69 .poll = drm_poll,
70 .fasync = drm_fasync, 70 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index d3cb676eee84..51c99fc4dd38 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -95,8 +95,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
95 &init->agp_textures_offset)) 95 &init->agp_textures_offset))
96 return -EFAULT; 96 return -EFAULT;
97 97
98 return drm_ioctl(file->f_path.dentry->d_inode, file, 98 return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
99 DRM_IOCTL_R128_INIT, (unsigned long)init);
100} 99}
101 100
102typedef struct drm_r128_depth32 { 101typedef struct drm_r128_depth32 {
@@ -129,8 +128,7 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
129 &depth->mask)) 128 &depth->mask))
130 return -EFAULT; 129 return -EFAULT;
131 130
132 return drm_ioctl(file->f_path.dentry->d_inode, file, 131 return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
133 DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
134 132
135} 133}
136 134
@@ -153,8 +151,7 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
153 &stipple->mask)) 151 &stipple->mask))
154 return -EFAULT; 152 return -EFAULT;
155 153
156 return drm_ioctl(file->f_path.dentry->d_inode, file, 154 return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
157 DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
158} 155}
159 156
160typedef struct drm_r128_getparam32 { 157typedef struct drm_r128_getparam32 {
@@ -178,8 +175,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
178 &getparam->value)) 175 &getparam->value))
179 return -EFAULT; 176 return -EFAULT;
180 177
181 return drm_ioctl(file->f_path.dentry->d_inode, file, 178 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
182 DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
183} 179}
184 180
185drm_ioctl_compat_t *r128_compat_ioctls[] = { 181drm_ioctl_compat_t *r128_compat_ioctls[] = {
@@ -210,12 +206,10 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
210 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) 206 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
211 fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; 207 fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
212 208
213 lock_kernel(); /* XXX for now */
214 if (fn != NULL) 209 if (fn != NULL)
215 ret = (*fn) (filp, cmd, arg); 210 ret = (*fn) (filp, cmd, arg);
216 else 211 else
217 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 212 ret = drm_ioctl(filp, cmd, arg);
218 unlock_kernel();
219 213
220 return ret; 214 return ret;
221} 215}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index feb52eee4314..b5f5fe75e6af 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
52 r600_blit_kms.o radeon_pm.o atombios_dp.o 52 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
53 53
54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
55 55
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 824cc6480a06..84e5df766d3f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1374,7 +1374,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1374 case RADEON_TXFORMAT_ARGB4444: 1374 case RADEON_TXFORMAT_ARGB4444:
1375 case RADEON_TXFORMAT_VYUY422: 1375 case RADEON_TXFORMAT_VYUY422:
1376 case RADEON_TXFORMAT_YVYU422: 1376 case RADEON_TXFORMAT_YVYU422:
1377 case RADEON_TXFORMAT_DXT1:
1378 case RADEON_TXFORMAT_SHADOW16: 1377 case RADEON_TXFORMAT_SHADOW16:
1379 case RADEON_TXFORMAT_LDUDV655: 1378 case RADEON_TXFORMAT_LDUDV655:
1380 case RADEON_TXFORMAT_DUDV88: 1379 case RADEON_TXFORMAT_DUDV88:
@@ -1382,12 +1381,19 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1382 break; 1381 break;
1383 case RADEON_TXFORMAT_ARGB8888: 1382 case RADEON_TXFORMAT_ARGB8888:
1384 case RADEON_TXFORMAT_RGBA8888: 1383 case RADEON_TXFORMAT_RGBA8888:
1385 case RADEON_TXFORMAT_DXT23:
1386 case RADEON_TXFORMAT_DXT45:
1387 case RADEON_TXFORMAT_SHADOW32: 1384 case RADEON_TXFORMAT_SHADOW32:
1388 case RADEON_TXFORMAT_LDUDUV8888: 1385 case RADEON_TXFORMAT_LDUDUV8888:
1389 track->textures[i].cpp = 4; 1386 track->textures[i].cpp = 4;
1390 break; 1387 break;
1388 case RADEON_TXFORMAT_DXT1:
1389 track->textures[i].cpp = 1;
1390 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
1391 break;
1392 case RADEON_TXFORMAT_DXT23:
1393 case RADEON_TXFORMAT_DXT45:
1394 track->textures[i].cpp = 1;
1395 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
1396 break;
1391 } 1397 }
1392 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1398 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1393 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1399 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
@@ -2731,6 +2737,7 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
2731 DRM_ERROR("coordinate type %d\n", t->tex_coord_type); 2737 DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
2732 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); 2738 DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
2733 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); 2739 DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
2740 DRM_ERROR("compress format %d\n", t->compress_format);
2734} 2741}
2735 2742
2736static int r100_cs_track_cube(struct radeon_device *rdev, 2743static int r100_cs_track_cube(struct radeon_device *rdev,
@@ -2760,6 +2767,36 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2760 return 0; 2767 return 0;
2761} 2768}
2762 2769
2770static int r100_track_compress_size(int compress_format, int w, int h)
2771{
2772 int block_width, block_height, block_bytes;
2773 int wblocks, hblocks;
2774 int min_wblocks;
2775 int sz;
2776
2777 block_width = 4;
2778 block_height = 4;
2779
2780 switch (compress_format) {
2781 case R100_TRACK_COMP_DXT1:
2782 block_bytes = 8;
2783 min_wblocks = 4;
2784 break;
2785 default:
2786 case R100_TRACK_COMP_DXT35:
2787 block_bytes = 16;
2788 min_wblocks = 2;
2789 break;
2790 }
2791
2792 hblocks = (h + block_height - 1) / block_height;
2793 wblocks = (w + block_width - 1) / block_width;
2794 if (wblocks < min_wblocks)
2795 wblocks = min_wblocks;
2796 sz = wblocks * hblocks * block_bytes;
2797 return sz;
2798}
2799
2763static int r100_cs_track_texture_check(struct radeon_device *rdev, 2800static int r100_cs_track_texture_check(struct radeon_device *rdev,
2764 struct r100_cs_track *track) 2801 struct r100_cs_track *track)
2765{ 2802{
@@ -2797,9 +2834,15 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2797 h = h / (1 << i); 2834 h = h / (1 << i);
2798 if (track->textures[u].roundup_h) 2835 if (track->textures[u].roundup_h)
2799 h = roundup_pow_of_two(h); 2836 h = roundup_pow_of_two(h);
2800 size += w * h; 2837 if (track->textures[u].compress_format) {
2838
2839 size += r100_track_compress_size(track->textures[u].compress_format, w, h);
2840 /* compressed textures are block based */
2841 } else
2842 size += w * h;
2801 } 2843 }
2802 size *= track->textures[u].cpp; 2844 size *= track->textures[u].cpp;
2845
2803 switch (track->textures[u].tex_coord_type) { 2846 switch (track->textures[u].tex_coord_type) {
2804 case 0: 2847 case 0:
2805 break; 2848 break;
@@ -2967,6 +3010,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
2967 track->arrays[i].esize = 0x7F; 3010 track->arrays[i].esize = 0x7F;
2968 } 3011 }
2969 for (i = 0; i < track->num_texture; i++) { 3012 for (i = 0; i < track->num_texture; i++) {
3013 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
2970 track->textures[i].pitch = 16536; 3014 track->textures[i].pitch = 16536;
2971 track->textures[i].width = 16536; 3015 track->textures[i].width = 16536;
2972 track->textures[i].height = 16536; 3016 track->textures[i].height = 16536;
@@ -3399,6 +3443,8 @@ int r100_init(struct radeon_device *rdev)
3399 r100_errata(rdev); 3443 r100_errata(rdev);
3400 /* Initialize clocks */ 3444 /* Initialize clocks */
3401 radeon_get_clock_info(rdev->ddev); 3445 radeon_get_clock_info(rdev->ddev);
3446 /* Initialize power management */
3447 radeon_pm_init(rdev);
3402 /* Get vram informations */ 3448 /* Get vram informations */
3403 r100_vram_info(rdev); 3449 r100_vram_info(rdev);
3404 /* Initialize memory controller (also test AGP) */ 3450 /* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index ca50903dd2bb..7188c3778ee2 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -28,6 +28,10 @@ struct r100_cs_cube_info {
28 unsigned height; 28 unsigned height;
29}; 29};
30 30
31#define R100_TRACK_COMP_NONE 0
32#define R100_TRACK_COMP_DXT1 1
33#define R100_TRACK_COMP_DXT35 2
34
31struct r100_cs_track_texture { 35struct r100_cs_track_texture {
32 struct radeon_bo *robj; 36 struct radeon_bo *robj;
33 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ 37 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
@@ -44,6 +48,7 @@ struct r100_cs_track_texture {
44 bool enabled; 48 bool enabled;
45 bool roundup_w; 49 bool roundup_w;
46 bool roundup_h; 50 bool roundup_h;
51 unsigned compress_format;
47}; 52};
48 53
49struct r100_cs_track_limits { 54struct r100_cs_track_limits {
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index eb740fc3549f..20942127c46b 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -401,7 +401,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
401 case R200_TXFORMAT_Y8: 401 case R200_TXFORMAT_Y8:
402 track->textures[i].cpp = 1; 402 track->textures[i].cpp = 1;
403 break; 403 break;
404 case R200_TXFORMAT_DXT1:
405 case R200_TXFORMAT_AI88: 404 case R200_TXFORMAT_AI88:
406 case R200_TXFORMAT_ARGB1555: 405 case R200_TXFORMAT_ARGB1555:
407 case R200_TXFORMAT_RGB565: 406 case R200_TXFORMAT_RGB565:
@@ -418,9 +417,16 @@ int r200_packet0_check(struct radeon_cs_parser *p,
418 case R200_TXFORMAT_ABGR8888: 417 case R200_TXFORMAT_ABGR8888:
419 case R200_TXFORMAT_BGR111110: 418 case R200_TXFORMAT_BGR111110:
420 case R200_TXFORMAT_LDVDU8888: 419 case R200_TXFORMAT_LDVDU8888:
420 track->textures[i].cpp = 4;
421 break;
422 case R200_TXFORMAT_DXT1:
423 track->textures[i].cpp = 1;
424 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
425 break;
421 case R200_TXFORMAT_DXT23: 426 case R200_TXFORMAT_DXT23:
422 case R200_TXFORMAT_DXT45: 427 case R200_TXFORMAT_DXT45:
423 track->textures[i].cpp = 4; 428 track->textures[i].cpp = 1;
429 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
424 break; 430 break;
425 } 431 }
426 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 432 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 83378c39d0e3..83490c2b5061 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -686,7 +686,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
686 r100_cs_dump_packet(p, pkt); 686 r100_cs_dump_packet(p, pkt);
687 return r; 687 return r;
688 } 688 }
689 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 689
690 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
691 tile_flags |= R300_TXO_MACRO_TILE;
692 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
693 tile_flags |= R300_TXO_MICRO_TILE;
694
695 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
696 tmp |= tile_flags;
697 ib[idx] = tmp;
690 track->textures[i].robj = reloc->robj; 698 track->textures[i].robj = reloc->robj;
691 break; 699 break;
692 /* Tracked registers */ 700 /* Tracked registers */
@@ -852,7 +860,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
852 case R300_TX_FORMAT_Z6Y5X5: 860 case R300_TX_FORMAT_Z6Y5X5:
853 case R300_TX_FORMAT_W4Z4Y4X4: 861 case R300_TX_FORMAT_W4Z4Y4X4:
854 case R300_TX_FORMAT_W1Z5Y5X5: 862 case R300_TX_FORMAT_W1Z5Y5X5:
855 case R300_TX_FORMAT_DXT1:
856 case R300_TX_FORMAT_D3DMFT_CxV8U8: 863 case R300_TX_FORMAT_D3DMFT_CxV8U8:
857 case R300_TX_FORMAT_B8G8_B8G8: 864 case R300_TX_FORMAT_B8G8_B8G8:
858 case R300_TX_FORMAT_G8R8_G8B8: 865 case R300_TX_FORMAT_G8R8_G8B8:
@@ -866,8 +873,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
866 case 0x17: 873 case 0x17:
867 case R300_TX_FORMAT_FL_I32: 874 case R300_TX_FORMAT_FL_I32:
868 case 0x1e: 875 case 0x1e:
869 case R300_TX_FORMAT_DXT3:
870 case R300_TX_FORMAT_DXT5:
871 track->textures[i].cpp = 4; 876 track->textures[i].cpp = 4;
872 break; 877 break;
873 case R300_TX_FORMAT_W16Z16Y16X16: 878 case R300_TX_FORMAT_W16Z16Y16X16:
@@ -878,6 +883,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
878 case R300_TX_FORMAT_FL_R32G32B32A32: 883 case R300_TX_FORMAT_FL_R32G32B32A32:
879 track->textures[i].cpp = 16; 884 track->textures[i].cpp = 16;
880 break; 885 break;
886 case R300_TX_FORMAT_DXT1:
887 track->textures[i].cpp = 1;
888 track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
889 break;
890 case R300_TX_FORMAT_DXT3:
891 case R300_TX_FORMAT_DXT5:
892 track->textures[i].cpp = 1;
893 track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
894 break;
881 default: 895 default:
882 DRM_ERROR("Invalid texture format %u\n", 896 DRM_ERROR("Invalid texture format %u\n",
883 (idx_value & 0x1F)); 897 (idx_value & 0x1F));
@@ -1324,6 +1338,8 @@ int r300_init(struct radeon_device *rdev)
1324 r300_errata(rdev); 1338 r300_errata(rdev);
1325 /* Initialize clocks */ 1339 /* Initialize clocks */
1326 radeon_get_clock_info(rdev->ddev); 1340 radeon_get_clock_info(rdev->ddev);
1341 /* Initialize power management */
1342 radeon_pm_init(rdev);
1327 /* Get vram informations */ 1343 /* Get vram informations */
1328 r300_vram_info(rdev); 1344 r300_vram_info(rdev);
1329 /* Initialize memory controller (also test AGP) */ 1345 /* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index cb2e470f97d4..34bffa0e4b73 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -990,7 +990,7 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
990 int sz; 990 int sz;
991 int addr; 991 int addr;
992 int type; 992 int type;
993 int clamp; 993 int isclamp;
994 int stride; 994 int stride;
995 RING_LOCALS; 995 RING_LOCALS;
996 996
@@ -999,10 +999,10 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
999 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo; 999 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
1000 1000
1001 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE); 1001 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
1002 clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP); 1002 isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
1003 1003
1004 addr |= (type << 16); 1004 addr |= (type << 16);
1005 addr |= (clamp << 17); 1005 addr |= (isclamp << 17);
1006 1006
1007 stride = type ? 4 : 6; 1007 stride = type ? 4 : 6;
1008 1008
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f5cf874dc62a..5c6058c6ddde 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1863,6 +1863,14 @@ int r600_startup(struct radeon_device *rdev)
1863 } 1863 }
1864 r600_gpu_init(rdev); 1864 r600_gpu_init(rdev);
1865 1865
1866 if (!rdev->r600_blit.shader_obj) {
1867 r = r600_blit_init(rdev);
1868 if (r) {
1869 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1870 return r;
1871 }
1872 }
1873
1866 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1874 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1867 if (unlikely(r != 0)) 1875 if (unlikely(r != 0))
1868 return r; 1876 return r;
@@ -2038,12 +2046,6 @@ int r600_init(struct radeon_device *rdev)
2038 if (r) 2046 if (r)
2039 return r; 2047 return r;
2040 2048
2041 r = r600_blit_init(rdev);
2042 if (r) {
2043 DRM_ERROR("radeon: failed blitter (%d).\n", r);
2044 return r;
2045 }
2046
2047 rdev->accel_working = true; 2049 rdev->accel_working = true;
2048 r = r600_startup(rdev); 2050 r = r600_startup(rdev);
2049 if (r) { 2051 if (r) {
@@ -2065,6 +2067,10 @@ int r600_init(struct radeon_device *rdev)
2065 rdev->accel_working = false; 2067 rdev->accel_working = false;
2066 } 2068 }
2067 } 2069 }
2070
2071 r = r600_audio_init(rdev);
2072 if (r)
2073 return r; /* TODO error handling */
2068 return 0; 2074 return 0;
2069} 2075}
2070 2076
@@ -2073,6 +2079,7 @@ void r600_fini(struct radeon_device *rdev)
2073 /* Suspend operations */ 2079 /* Suspend operations */
2074 r600_suspend(rdev); 2080 r600_suspend(rdev);
2075 2081
2082 r600_audio_fini(rdev);
2076 r600_blit_fini(rdev); 2083 r600_blit_fini(rdev);
2077 r600_irq_fini(rdev); 2084 r600_irq_fini(rdev);
2078 radeon_irq_kms_fini(rdev); 2085 radeon_irq_kms_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
new file mode 100644
index 000000000000..99e2c3891a7d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -0,0 +1,267 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Christian König.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Christian König
25 */
26#include "drmP.h"
27#include "radeon.h"
28#include "radeon_reg.h"
29#include "atom.h"
30
31#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
32
33/*
34 * check if the chipset is supported
35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{
38 return rdev->family >= CHIP_R600
39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740;
42}
43
44/*
45 * current number of channels
46 */
47static int r600_audio_channels(struct radeon_device *rdev)
48{
49 return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
50}
51
52/*
53 * current bits per sample
54 */
55static int r600_audio_bits_per_sample(struct radeon_device *rdev)
56{
57 uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
58 switch (value) {
59 case 0x0: return 8;
60 case 0x1: return 16;
61 case 0x2: return 20;
62 case 0x3: return 24;
63 case 0x4: return 32;
64 }
65
66 DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
67
68 return 16;
69}
70
71/*
72 * current sampling rate in HZ
73 */
74static int r600_audio_rate(struct radeon_device *rdev)
75{
76 uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
77 uint32_t result;
78
79 if (value & 0x4000)
80 result = 44100;
81 else
82 result = 48000;
83
84 result *= ((value >> 11) & 0x7) + 1;
85 result /= ((value >> 8) & 0x7) + 1;
86
87 return result;
88}
89
90/*
91 * iec 60958 status bits
92 */
93static uint8_t r600_audio_status_bits(struct radeon_device *rdev)
94{
95 return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
96}
97
98/*
99 * iec 60958 category code
100 */
101static uint8_t r600_audio_category_code(struct radeon_device *rdev)
102{
103 return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
104}
105
106/*
107 * update all hdmi interfaces with current audio parameters
108 */
109static void r600_audio_update_hdmi(unsigned long param)
110{
111 struct radeon_device *rdev = (struct radeon_device *)param;
112 struct drm_device *dev = rdev->ddev;
113
114 int channels = r600_audio_channels(rdev);
115 int rate = r600_audio_rate(rdev);
116 int bps = r600_audio_bits_per_sample(rdev);
117 uint8_t status_bits = r600_audio_status_bits(rdev);
118 uint8_t category_code = r600_audio_category_code(rdev);
119
120 struct drm_encoder *encoder;
121 int changes = 0;
122
123 changes |= channels != rdev->audio_channels;
124 changes |= rate != rdev->audio_rate;
125 changes |= bps != rdev->audio_bits_per_sample;
126 changes |= status_bits != rdev->audio_status_bits;
127 changes |= category_code != rdev->audio_category_code;
128
129 if (changes) {
130 rdev->audio_channels = channels;
131 rdev->audio_rate = rate;
132 rdev->audio_bits_per_sample = bps;
133 rdev->audio_status_bits = status_bits;
134 rdev->audio_category_code = category_code;
135 }
136
137 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
138 if (changes || r600_hdmi_buffer_status_changed(encoder))
139 r600_hdmi_update_audio_settings(
140 encoder, channels,
141 rate, bps, status_bits,
142 category_code);
143 }
144
145 mod_timer(&rdev->audio_timer,
146 jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
147}
148
149/*
150 * initialize the audio vars and register the update timer
151 */
152int r600_audio_init(struct radeon_device *rdev)
153{
154 if (!r600_audio_chipset_supported(rdev))
155 return 0;
156
157 DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
158 WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
159
160 rdev->audio_channels = -1;
161 rdev->audio_rate = -1;
162 rdev->audio_bits_per_sample = -1;
163 rdev->audio_status_bits = 0;
164 rdev->audio_category_code = 0;
165
166 setup_timer(
167 &rdev->audio_timer,
168 r600_audio_update_hdmi,
169 (unsigned long)rdev);
170
171 mod_timer(&rdev->audio_timer, jiffies + 1);
172
173 return 0;
174}
175
176/*
177 * determin how the encoders and audio interface is wired together
178 */
179int r600_audio_tmds_index(struct drm_encoder *encoder)
180{
181 struct drm_device *dev = encoder->dev;
182 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
183 struct drm_encoder *other;
184
185 switch (radeon_encoder->encoder_id) {
186 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
187 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
188 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
189 return 0;
190
191 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
192 /* special case check if an TMDS1 is present */
193 list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
194 if (to_radeon_encoder(other)->encoder_id ==
195 ENCODER_OBJECT_ID_INTERNAL_TMDS1)
196 return 1;
197 }
198 return 0;
199
200 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
201 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
202 return 1;
203
204 default:
205 DRM_ERROR("Unsupported encoder type 0x%02X\n",
206 radeon_encoder->encoder_id);
207 return -1;
208 }
209}
210
211/*
212 * atach the audio codec to the clock source of the encoder
213 */
214void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
215{
216 struct drm_device *dev = encoder->dev;
217 struct radeon_device *rdev = dev->dev_private;
218 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
219 int base_rate = 48000;
220
221 switch (radeon_encoder->encoder_id) {
222 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
223 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
224 WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
225 break;
226
227 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
228 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
229 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
230 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
231 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
232 break;
233
234 default:
235 DRM_ERROR("Unsupported encoder type 0x%02X\n",
236 radeon_encoder->encoder_id);
237 return;
238 }
239
240 switch (r600_audio_tmds_index(encoder)) {
241 case 0:
242 WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
243 WREG32(R600_AUDIO_PLL1_DIV, clock*100);
244 WREG32(R600_AUDIO_CLK_SRCSEL, 0);
245 break;
246
247 case 1:
248 WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
249 WREG32(R600_AUDIO_PLL2_DIV, clock*100);
250 WREG32(R600_AUDIO_CLK_SRCSEL, 1);
251 break;
252 }
253}
254
255/*
256 * release the audio timer
257 * TODO: How to do this correctly on SMP systems?
258 */
259void r600_audio_fini(struct radeon_device *rdev)
260{
261 if (!r600_audio_chipset_supported(rdev))
262 return;
263
264 WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
265
266 del_timer(&rdev->audio_timer);
267}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0d820764f340..44060b92d9e6 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -170,7 +170,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
170 idx, relocs_chunk->length_dw); 170 idx, relocs_chunk->length_dw);
171 return -EINVAL; 171 return -EINVAL;
172 } 172 }
173 *cs_reloc = &p->relocs[0]; 173 *cs_reloc = p->relocs;
174 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; 174 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
175 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; 175 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
176 return 0; 176 return 0;
@@ -717,7 +717,7 @@ static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
717 if (p->chunk_relocs_idx == -1) { 717 if (p->chunk_relocs_idx == -1) {
718 return 0; 718 return 0;
719 } 719 }
720 p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL); 720 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
721 if (p->relocs == NULL) { 721 if (p->relocs == NULL) {
722 return -ENOMEM; 722 return -ENOMEM;
723 } 723 }
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
new file mode 100644
index 000000000000..fcc949df0e5d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -0,0 +1,506 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Christian König.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Christian König
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29#include "atom.h"
30
31/*
32 * HDMI color format
33 */
34enum r600_hdmi_color_format {
35 RGB = 0,
36 YCC_422 = 1,
37 YCC_444 = 2
38};
39
40/*
41 * IEC60958 status bits
42 */
43enum r600_hdmi_iec_status_bits {
44 AUDIO_STATUS_DIG_ENABLE = 0x01,
45 AUDIO_STATUS_V = 0x02,
46 AUDIO_STATUS_VCFG = 0x04,
47 AUDIO_STATUS_EMPHASIS = 0x08,
48 AUDIO_STATUS_COPYRIGHT = 0x10,
49 AUDIO_STATUS_NONAUDIO = 0x20,
50 AUDIO_STATUS_PROFESSIONAL = 0x40,
51 AUDIO_STATUS_LEVEL = 0x80
52};
53
54struct {
55 uint32_t Clock;
56
57 int N_32kHz;
58 int CTS_32kHz;
59
60 int N_44_1kHz;
61 int CTS_44_1kHz;
62
63 int N_48kHz;
64 int CTS_48kHz;
65
66} r600_hdmi_ACR[] = {
67 /* 32kHz 44.1kHz 48kHz */
68 /* Clock N CTS N CTS N CTS */
69 { 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
70 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
71 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
72 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
73 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
74 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
75 { 74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
76 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
77 { 148351, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
78 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
79 { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
80};
81
82/*
83 * calculate CTS value if it's not found in the table
84 */
85static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
86{
87 if (*CTS == 0)
88 *CTS = clock*N/(128*freq)*1000;
89 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
90 N, *CTS, freq);
91}
92
93/*
94 * update the N and CTS parameters for a given pixel clock rate
95 */
96static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
97{
98 struct drm_device *dev = encoder->dev;
99 struct radeon_device *rdev = dev->dev_private;
100 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
101 int CTS;
102 int N;
103 int i;
104
105 for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
106
107 CTS = r600_hdmi_ACR[i].CTS_32kHz;
108 N = r600_hdmi_ACR[i].N_32kHz;
109 r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
110 WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
111 WREG32(offset+R600_HDMI_32kHz_N, N);
112
113 CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
114 N = r600_hdmi_ACR[i].N_44_1kHz;
115 r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
116 WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
117 WREG32(offset+R600_HDMI_44_1kHz_N, N);
118
119 CTS = r600_hdmi_ACR[i].CTS_48kHz;
120 N = r600_hdmi_ACR[i].N_48kHz;
121 r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
122 WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
123 WREG32(offset+R600_HDMI_48kHz_N, N);
124}
125
126/*
127 * calculate the crc for a given info frame
128 */
129static void r600_hdmi_infoframe_checksum(uint8_t packetType,
130 uint8_t versionNumber,
131 uint8_t length,
132 uint8_t *frame)
133{
134 int i;
135 frame[0] = packetType + versionNumber + length;
136 for (i = 1; i <= length; i++)
137 frame[0] += frame[i];
138 frame[0] = 0x100 - frame[0];
139}
140
141/*
142 * build a HDMI Video Info Frame
143 */
144static void r600_hdmi_videoinfoframe(
145 struct drm_encoder *encoder,
146 enum r600_hdmi_color_format color_format,
147 int active_information_present,
148 uint8_t active_format_aspect_ratio,
149 uint8_t scan_information,
150 uint8_t colorimetry,
151 uint8_t ex_colorimetry,
152 uint8_t quantization,
153 int ITC,
154 uint8_t picture_aspect_ratio,
155 uint8_t video_format_identification,
156 uint8_t pixel_repetition,
157 uint8_t non_uniform_picture_scaling,
158 uint8_t bar_info_data_valid,
159 uint16_t top_bar,
160 uint16_t bottom_bar,
161 uint16_t left_bar,
162 uint16_t right_bar
163)
164{
165 struct drm_device *dev = encoder->dev;
166 struct radeon_device *rdev = dev->dev_private;
167 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
168
169 uint8_t frame[14];
170
171 frame[0x0] = 0;
172 frame[0x1] =
173 (scan_information & 0x3) |
174 ((bar_info_data_valid & 0x3) << 2) |
175 ((active_information_present & 0x1) << 4) |
176 ((color_format & 0x3) << 5);
177 frame[0x2] =
178 (active_format_aspect_ratio & 0xF) |
179 ((picture_aspect_ratio & 0x3) << 4) |
180 ((colorimetry & 0x3) << 6);
181 frame[0x3] =
182 (non_uniform_picture_scaling & 0x3) |
183 ((quantization & 0x3) << 2) |
184 ((ex_colorimetry & 0x7) << 4) |
185 ((ITC & 0x1) << 7);
186 frame[0x4] = (video_format_identification & 0x7F);
187 frame[0x5] = (pixel_repetition & 0xF);
188 frame[0x6] = (top_bar & 0xFF);
189 frame[0x7] = (top_bar >> 8);
190 frame[0x8] = (bottom_bar & 0xFF);
191 frame[0x9] = (bottom_bar >> 8);
192 frame[0xA] = (left_bar & 0xFF);
193 frame[0xB] = (left_bar >> 8);
194 frame[0xC] = (right_bar & 0xFF);
195 frame[0xD] = (right_bar >> 8);
196
197 r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
198
199 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
200 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
201 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
202 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
203 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
204 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
205 WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
206 frame[0xC] | (frame[0xD] << 8));
207}
208
209/*
210 * build a Audio Info Frame
211 */
212static void r600_hdmi_audioinfoframe(
213 struct drm_encoder *encoder,
214 uint8_t channel_count,
215 uint8_t coding_type,
216 uint8_t sample_size,
217 uint8_t sample_frequency,
218 uint8_t format,
219 uint8_t channel_allocation,
220 uint8_t level_shift,
221 int downmix_inhibit
222)
223{
224 struct drm_device *dev = encoder->dev;
225 struct radeon_device *rdev = dev->dev_private;
226 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
227
228 uint8_t frame[11];
229
230 frame[0x0] = 0;
231 frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
232 frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
233 frame[0x3] = format;
234 frame[0x4] = channel_allocation;
235 frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
236 frame[0x6] = 0;
237 frame[0x7] = 0;
238 frame[0x8] = 0;
239 frame[0x9] = 0;
240 frame[0xA] = 0;
241
242 r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
243
244 WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
245 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
246 WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
247 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
248}
249
250/*
251 * test if audio buffer is filled enough to start playing
252 */
253static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
254{
255 struct drm_device *dev = encoder->dev;
256 struct radeon_device *rdev = dev->dev_private;
257 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
258
259 return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
260}
261
262/*
263 * have buffer status changed since last call?
264 */
265int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
266{
267 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
268 int status, result;
269
270 if (!radeon_encoder->hdmi_offset)
271 return 0;
272
273 status = r600_hdmi_is_audio_buffer_filled(encoder);
274 result = radeon_encoder->hdmi_buffer_status != status;
275 radeon_encoder->hdmi_buffer_status = status;
276
277 return result;
278}
279
280/*
281 * write the audio workaround status to the hardware
282 */
283void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
284{
285 struct drm_device *dev = encoder->dev;
286 struct radeon_device *rdev = dev->dev_private;
287 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
288 uint32_t offset = radeon_encoder->hdmi_offset;
289
290 if (!offset)
291 return;
292
293 if (r600_hdmi_is_audio_buffer_filled(encoder)) {
294 /* disable audio workaround and start delivering of audio frames */
295 WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
296
297 } else if (radeon_encoder->hdmi_audio_workaround) {
298 /* enable audio workaround and start delivering of audio frames */
299 WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
300
301 } else {
302 /* disable audio workaround and stop delivering of audio frames */
303 WREG32_P(offset+R600_HDMI_CNTL, 0x00000000, ~0x00001001);
304 }
305}
306
307
308/*
309 * update the info frames with the data from the current display mode
310 */
311void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
312{
313 struct drm_device *dev = encoder->dev;
314 struct radeon_device *rdev = dev->dev_private;
315 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
316
317 if (!offset)
318 return;
319
320 r600_audio_set_clock(encoder, mode->clock);
321
322 WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
323 WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
324 WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
325
326 r600_hdmi_update_ACR(encoder, mode->clock);
327
328 WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
329
330 WREG32(offset+R600_HDMI_VERSION, 0x202);
331
332 r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
333 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
334
335 /* it's unknown what these bits do excatly, but it's indeed quite usefull for debugging */
336 WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
337 WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
338 WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
339 WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
340
341 r600_hdmi_audio_workaround(encoder);
342
343 /* audio packets per line, does anyone know how to calc this ? */
344 WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
345
346 /* update? reset? don't realy know */
347 WREG32_P(offset+R600_HDMI_CNTL, 0x14000000, ~0x14000000);
348}
349
350/*
351 * update settings with current parameters from audio engine
352 */
353void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
354 int channels,
355 int rate,
356 int bps,
357 uint8_t status_bits,
358 uint8_t category_code)
359{
360 struct drm_device *dev = encoder->dev;
361 struct radeon_device *rdev = dev->dev_private;
362 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
363
364 uint32_t iec;
365
366 if (!offset)
367 return;
368
369 DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
370 r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
371 channels, rate, bps);
372 DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
373 (int)status_bits, (int)category_code);
374
375 iec = 0;
376 if (status_bits & AUDIO_STATUS_PROFESSIONAL)
377 iec |= 1 << 0;
378 if (status_bits & AUDIO_STATUS_NONAUDIO)
379 iec |= 1 << 1;
380 if (status_bits & AUDIO_STATUS_COPYRIGHT)
381 iec |= 1 << 2;
382 if (status_bits & AUDIO_STATUS_EMPHASIS)
383 iec |= 1 << 3;
384
385 iec |= category_code << 8;
386
387 switch (rate) {
388 case 32000: iec |= 0x3 << 24; break;
389 case 44100: iec |= 0x0 << 24; break;
390 case 88200: iec |= 0x8 << 24; break;
391 case 176400: iec |= 0xc << 24; break;
392 case 48000: iec |= 0x2 << 24; break;
393 case 96000: iec |= 0xa << 24; break;
394 case 192000: iec |= 0xe << 24; break;
395 }
396
397 WREG32(offset+R600_HDMI_IEC60958_1, iec);
398
399 iec = 0;
400 switch (bps) {
401 case 16: iec |= 0x2; break;
402 case 20: iec |= 0x3; break;
403 case 24: iec |= 0xb; break;
404 }
405 if (status_bits & AUDIO_STATUS_V)
406 iec |= 0x5 << 16;
407
408 WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
409
410 /* 0x021 or 0x031 sets the audio frame length */
411 WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
412 r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
413
414 r600_hdmi_audio_workaround(encoder);
415
416 /* update? reset? don't realy know */
417 WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
418}
419
420/*
421 * enable/disable the HDMI engine
422 */
423void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
424{
425 struct drm_device *dev = encoder->dev;
426 struct radeon_device *rdev = dev->dev_private;
427 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
428 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
429
430 if (!offset)
431 return;
432
433 DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
434
435 /* some version of atombios ignore the enable HDMI flag
436 * so enabling/disabling HDMI was moved here for TMDS1+2 */
437 switch (radeon_encoder->encoder_id) {
438 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
439 WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
440 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
441 break;
442
443 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
444 WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
445 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
446 break;
447
448 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
449 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
450 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
451 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
452 /* This part is doubtfull in my opinion */
453 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
454 break;
455
456 default:
457 DRM_ERROR("unknown HDMI output type\n");
458 break;
459 }
460}
461
462/*
463 * determin at which register offset the HDMI encoder is
464 */
465void r600_hdmi_init(struct drm_encoder *encoder)
466{
467 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
468
469 switch (radeon_encoder->encoder_id) {
470 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
471 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
472 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
473 radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
474 break;
475
476 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
477 switch (r600_audio_tmds_index(encoder)) {
478 case 0:
479 radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
480 break;
481 case 1:
482 radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
483 break;
484 default:
485 radeon_encoder->hdmi_offset = 0;
486 break;
487 }
488 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
489 radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
490 break;
491
492 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
493 radeon_encoder->hdmi_offset = R600_HDMI_DIG;
494 break;
495
496 default:
497 radeon_encoder->hdmi_offset = 0;
498 break;
499 }
500
501 DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
502 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
503
504 /* TODO: make this configureable */
505 radeon_encoder->hdmi_audio_workaround = 0;
506}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index e2d1f5f33f7e..d0e28ffdeda9 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -110,5 +110,79 @@
110#define R600_BIOS_6_SCRATCH 0x173c 110#define R600_BIOS_6_SCRATCH 0x173c
111#define R600_BIOS_7_SCRATCH 0x1740 111#define R600_BIOS_7_SCRATCH 0x1740
112 112
113/* Audio, these regs were reverse enginered,
114 * so the chance is high that the naming is wrong
115 * R6xx+ ??? */
116
117/* Audio clocks */
118#define R600_AUDIO_PLL1_MUL 0x0514
119#define R600_AUDIO_PLL1_DIV 0x0518
120#define R600_AUDIO_PLL2_MUL 0x0524
121#define R600_AUDIO_PLL2_DIV 0x0528
122#define R600_AUDIO_CLK_SRCSEL 0x0534
123
124/* Audio general */
125#define R600_AUDIO_ENABLE 0x7300
126#define R600_AUDIO_TIMING 0x7344
127
128/* Audio params */
129#define R600_AUDIO_VENDOR_ID 0x7380
130#define R600_AUDIO_REVISION_ID 0x7384
131#define R600_AUDIO_ROOT_NODE_COUNT 0x7388
132#define R600_AUDIO_NID1_NODE_COUNT 0x738c
133#define R600_AUDIO_NID1_TYPE 0x7390
134#define R600_AUDIO_SUPPORTED_SIZE_RATE 0x7394
135#define R600_AUDIO_SUPPORTED_CODEC 0x7398
136#define R600_AUDIO_SUPPORTED_POWER_STATES 0x739c
137#define R600_AUDIO_NID2_CAPS 0x73a0
138#define R600_AUDIO_NID3_CAPS 0x73a4
139#define R600_AUDIO_NID3_PIN_CAPS 0x73a8
140
141/* Audio conn list */
142#define R600_AUDIO_CONN_LIST_LEN 0x73ac
143#define R600_AUDIO_CONN_LIST 0x73b0
144
145/* Audio verbs */
146#define R600_AUDIO_RATE_BPS_CHANNEL 0x73c0
147#define R600_AUDIO_PLAYING 0x73c4
148#define R600_AUDIO_IMPLEMENTATION_ID 0x73c8
149#define R600_AUDIO_CONFIG_DEFAULT 0x73cc
150#define R600_AUDIO_PIN_SENSE 0x73d0
151#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
152#define R600_AUDIO_STATUS_BITS 0x73d8
153
154/* HDMI base register addresses */
155#define R600_HDMI_TMDS1 0x7400
156#define R600_HDMI_TMDS2 0x7700
157#define R600_HDMI_DIG 0x7800
158
159/* HDMI registers */
160#define R600_HDMI_ENABLE 0x00
161#define R600_HDMI_STATUS 0x04
162#define R600_HDMI_CNTL 0x08
163#define R600_HDMI_UNKNOWN_0 0x0C
164#define R600_HDMI_AUDIOCNTL 0x10
165#define R600_HDMI_VIDEOCNTL 0x14
166#define R600_HDMI_VERSION 0x18
167#define R600_HDMI_UNKNOWN_1 0x28
168#define R600_HDMI_VIDEOINFOFRAME_0 0x54
169#define R600_HDMI_VIDEOINFOFRAME_1 0x58
170#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
171#define R600_HDMI_VIDEOINFOFRAME_3 0x60
172#define R600_HDMI_32kHz_CTS 0xac
173#define R600_HDMI_32kHz_N 0xb0
174#define R600_HDMI_44_1kHz_CTS 0xb4
175#define R600_HDMI_44_1kHz_N 0xb8
176#define R600_HDMI_48kHz_CTS 0xbc
177#define R600_HDMI_48kHz_N 0xc0
178#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
179#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
180#define R600_HDMI_IEC60958_1 0xd4
181#define R600_HDMI_IEC60958_2 0xd8
182#define R600_HDMI_UNKNOWN_2 0xdc
183#define R600_HDMI_AUDIO_DEBUG_0 0xe0
184#define R600_HDMI_AUDIO_DEBUG_1 0xe4
185#define R600_HDMI_AUDIO_DEBUG_2 0xe8
186#define R600_HDMI_AUDIO_DEBUG_3 0xec
113 187
114#endif 188#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c938bb54123c..cd650fd3964e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,6 +89,7 @@ extern int radeon_testing;
89extern int radeon_connector_table; 89extern int radeon_connector_table;
90extern int radeon_tv; 90extern int radeon_tv;
91extern int radeon_new_pll; 91extern int radeon_new_pll;
92extern int radeon_audio;
92 93
93/* 94/*
94 * Copy from radeon_drv.h so we don't have to include both and have conflicting 95 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -814,6 +815,14 @@ struct radeon_device {
814 struct r600_ih ih; /* r6/700 interrupt ring */ 815 struct r600_ih ih; /* r6/700 interrupt ring */
815 struct workqueue_struct *wq; 816 struct workqueue_struct *wq;
816 struct work_struct hotplug_work; 817 struct work_struct hotplug_work;
818
819 /* audio stuff */
820 struct timer_list audio_timer;
821 int audio_channels;
822 int audio_rate;
823 int audio_bits_per_sample;
824 uint8_t audio_status_bits;
825 uint8_t audio_category_code;
817}; 826};
818 827
819int radeon_device_init(struct radeon_device *rdev, 828int radeon_device_init(struct radeon_device *rdev,
@@ -1016,6 +1025,7 @@ extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
1016extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 1025extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
1017extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 1026extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1018extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); 1027extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
1028extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
1019 1029
1020/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1030/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1021struct r100_mc_save { 1031struct r100_mc_save {
@@ -1146,6 +1156,21 @@ extern void r600_irq_fini(struct radeon_device *rdev);
1146extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); 1156extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1147extern int r600_irq_set(struct radeon_device *rdev); 1157extern int r600_irq_set(struct radeon_device *rdev);
1148 1158
1159extern int r600_audio_init(struct radeon_device *rdev);
1160extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1161extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
1162extern void r600_audio_fini(struct radeon_device *rdev);
1163extern void r600_hdmi_init(struct drm_encoder *encoder);
1164extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
1165extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1166extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
1167extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
1168 int channels,
1169 int rate,
1170 int bps,
1171 uint8_t status_bits,
1172 uint8_t category_code);
1173
1149#include "radeon_object.h" 1174#include "radeon_object.h"
1150 1175
1151#endif 1176#endif
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index c5c45e626d74..8ba3de7994d4 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -87,6 +87,7 @@ int radeon_testing = 0;
87int radeon_connector_table = 0; 87int radeon_connector_table = 0;
88int radeon_tv = 1; 88int radeon_tv = 1;
89int radeon_new_pll = 1; 89int radeon_new_pll = 1;
90int radeon_audio = 1;
90 91
91MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 92MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
92module_param_named(no_wb, radeon_no_wb, int, 0444); 93module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -124,6 +125,9 @@ module_param_named(tv, radeon_tv, int, 0444);
124MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips"); 125MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
125module_param_named(new_pll, radeon_new_pll, int, 0444); 126module_param_named(new_pll, radeon_new_pll, int, 0444);
126 127
128MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
129module_param_named(audio, radeon_audio, int, 0444);
130
127static int radeon_suspend(struct drm_device *dev, pm_message_t state) 131static int radeon_suspend(struct drm_device *dev, pm_message_t state)
128{ 132{
129 drm_radeon_private_t *dev_priv = dev->dev_private; 133 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -192,7 +196,7 @@ static struct drm_driver driver_old = {
192 .owner = THIS_MODULE, 196 .owner = THIS_MODULE,
193 .open = drm_open, 197 .open = drm_open,
194 .release = drm_release, 198 .release = drm_release,
195 .ioctl = drm_ioctl, 199 .unlocked_ioctl = drm_ioctl,
196 .mmap = drm_mmap, 200 .mmap = drm_mmap,
197 .poll = drm_poll, 201 .poll = drm_poll,
198 .fasync = drm_fasync, 202 .fasync = drm_fasync,
@@ -280,7 +284,7 @@ static struct drm_driver kms_driver = {
280 .owner = THIS_MODULE, 284 .owner = THIS_MODULE,
281 .open = drm_open, 285 .open = drm_open,
282 .release = drm_release, 286 .release = drm_release,
283 .ioctl = drm_ioctl, 287 .unlocked_ioctl = drm_ioctl,
284 .mmap = radeon_mmap, 288 .mmap = radeon_mmap,
285 .poll = drm_poll, 289 .poll = drm_poll,
286 .fasync = drm_fasync, 290 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index b4f23ec93201..0d1d908e5225 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -438,6 +438,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
438 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 438 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
439 union lvds_encoder_control args; 439 union lvds_encoder_control args;
440 int index = 0; 440 int index = 0;
441 int hdmi_detected = 0;
441 uint8_t frev, crev; 442 uint8_t frev, crev;
442 struct radeon_encoder_atom_dig *dig; 443 struct radeon_encoder_atom_dig *dig;
443 struct drm_connector *connector; 444 struct drm_connector *connector;
@@ -458,6 +459,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
458 if (!radeon_connector->con_priv) 459 if (!radeon_connector->con_priv)
459 return; 460 return;
460 461
462 if (drm_detect_hdmi_monitor(radeon_connector->edid))
463 hdmi_detected = 1;
464
461 dig_connector = radeon_connector->con_priv; 465 dig_connector = radeon_connector->con_priv;
462 466
463 memset(&args, 0, sizeof(args)); 467 memset(&args, 0, sizeof(args));
@@ -487,7 +491,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
487 case 1: 491 case 1:
488 args.v1.ucMisc = 0; 492 args.v1.ucMisc = 0;
489 args.v1.ucAction = action; 493 args.v1.ucAction = action;
490 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 494 if (hdmi_detected)
491 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; 495 args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
492 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 496 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
493 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 497 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
@@ -512,7 +516,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
512 if (dig->coherent_mode) 516 if (dig->coherent_mode)
513 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT; 517 args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
514 } 518 }
515 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 519 if (hdmi_detected)
516 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; 520 args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
517 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 521 args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
518 args.v2.ucTruncate = 0; 522 args.v2.ucTruncate = 0;
@@ -552,7 +556,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
552 } 556 }
553 557
554 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 558 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
555 559 r600_hdmi_enable(encoder, hdmi_detected);
556} 560}
557 561
558int 562int
@@ -893,7 +897,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
893 } 897 }
894 898
895 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 899 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
896
897} 900}
898 901
899static void 902static void
@@ -1162,7 +1165,6 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1162 } 1165 }
1163 1166
1164 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1167 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1165
1166} 1168}
1167 1169
1168static void 1170static void
@@ -1265,6 +1267,8 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1265 break; 1267 break;
1266 } 1268 }
1267 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1269 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1270
1271 r600_hdmi_setmode(encoder, adjusted_mode);
1268} 1272}
1269 1273
1270static bool 1274static bool
@@ -1510,4 +1514,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1510 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 1514 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1511 break; 1515 break;
1512 } 1516 }
1517
1518 r600_hdmi_init(encoder);
1513} 1519}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 2944486871b0..60df2d7e7e4c 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -66,8 +66,9 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
66 } 66 }
67 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); 67 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
68 if (r) { 68 if (r) {
69 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", 69 if (r != -ERESTARTSYS)
70 size, initial_domain, alignment); 70 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
71 size, initial_domain, alignment, r);
71 mutex_lock(&rdev->ddev->struct_mutex); 72 mutex_lock(&rdev->ddev->struct_mutex);
72 drm_gem_object_unreference(gobj); 73 drm_gem_object_unreference(gobj);
73 mutex_unlock(&rdev->ddev->struct_mutex); 74 mutex_unlock(&rdev->ddev->struct_mutex);
@@ -350,9 +351,10 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
350 rbo = gobj->driver_private; 351 rbo = gobj->driver_private;
351 r = radeon_bo_reserve(rbo, false); 352 r = radeon_bo_reserve(rbo, false);
352 if (unlikely(r != 0)) 353 if (unlikely(r != 0))
353 return r; 354 goto out;
354 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 355 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
355 radeon_bo_unreserve(rbo); 356 radeon_bo_unreserve(rbo);
357out:
356 mutex_lock(&dev->struct_mutex); 358 mutex_lock(&dev->struct_mutex);
357 drm_gem_object_unreference(gobj); 359 drm_gem_object_unreference(gobj);
358 mutex_unlock(&dev->struct_mutex); 360 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
index a1bf11de308a..48b7cea31e08 100644
--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -92,8 +92,7 @@ static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
92 &init->gart_textures_offset)) 92 &init->gart_textures_offset))
93 return -EFAULT; 93 return -EFAULT;
94 94
95 return drm_ioctl(file->f_path.dentry->d_inode, file, 95 return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
96 DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
97} 96}
98 97
99typedef struct drm_radeon_clear32 { 98typedef struct drm_radeon_clear32 {
@@ -125,8 +124,7 @@ static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
125 &clr->depth_boxes)) 124 &clr->depth_boxes))
126 return -EFAULT; 125 return -EFAULT;
127 126
128 return drm_ioctl(file->f_path.dentry->d_inode, file, 127 return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
129 DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
130} 128}
131 129
132typedef struct drm_radeon_stipple32 { 130typedef struct drm_radeon_stipple32 {
@@ -149,8 +147,7 @@ static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
149 &request->mask)) 147 &request->mask))
150 return -EFAULT; 148 return -EFAULT;
151 149
152 return drm_ioctl(file->f_path.dentry->d_inode, file, 150 return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
153 DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
154} 151}
155 152
156typedef struct drm_radeon_tex_image32 { 153typedef struct drm_radeon_tex_image32 {
@@ -204,8 +201,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
204 &image->data)) 201 &image->data))
205 return -EFAULT; 202 return -EFAULT;
206 203
207 return drm_ioctl(file->f_path.dentry->d_inode, file, 204 return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
208 DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
209} 205}
210 206
211typedef struct drm_radeon_vertex2_32 { 207typedef struct drm_radeon_vertex2_32 {
@@ -238,8 +234,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
238 &request->prim)) 234 &request->prim))
239 return -EFAULT; 235 return -EFAULT;
240 236
241 return drm_ioctl(file->f_path.dentry->d_inode, file, 237 return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
242 DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
243} 238}
244 239
245typedef struct drm_radeon_cmd_buffer32 { 240typedef struct drm_radeon_cmd_buffer32 {
@@ -268,8 +263,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
268 &request->boxes)) 263 &request->boxes))
269 return -EFAULT; 264 return -EFAULT;
270 265
271 return drm_ioctl(file->f_path.dentry->d_inode, file, 266 return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
272 DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
273} 267}
274 268
275typedef struct drm_radeon_getparam32 { 269typedef struct drm_radeon_getparam32 {
@@ -293,8 +287,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
293 &request->value)) 287 &request->value))
294 return -EFAULT; 288 return -EFAULT;
295 289
296 return drm_ioctl(file->f_path.dentry->d_inode, file, 290 return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
297 DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
298} 291}
299 292
300typedef struct drm_radeon_mem_alloc32 { 293typedef struct drm_radeon_mem_alloc32 {
@@ -322,8 +315,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
322 &request->region_offset)) 315 &request->region_offset))
323 return -EFAULT; 316 return -EFAULT;
324 317
325 return drm_ioctl(file->f_path.dentry->d_inode, file, 318 return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
326 DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
327} 319}
328 320
329typedef struct drm_radeon_irq_emit32 { 321typedef struct drm_radeon_irq_emit32 {
@@ -345,8 +337,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
345 &request->irq_seq)) 337 &request->irq_seq))
346 return -EFAULT; 338 return -EFAULT;
347 339
348 return drm_ioctl(file->f_path.dentry->d_inode, file, 340 return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
349 DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
350} 341}
351 342
352/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ 343/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
@@ -372,8 +363,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
372 &request->value)) 363 &request->value))
373 return -EFAULT; 364 return -EFAULT;
374 365
375 return drm_ioctl(file->f_dentry->d_inode, file, 366 return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
376 DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
377} 367}
378#else 368#else
379#define compat_radeon_cp_setparam NULL 369#define compat_radeon_cp_setparam NULL
@@ -413,12 +403,10 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
413 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) 403 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
414 fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; 404 fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
415 405
416 lock_kernel(); /* XXX for now */
417 if (fn != NULL) 406 if (fn != NULL)
418 ret = (*fn) (filp, cmd, arg); 407 ret = (*fn) (filp, cmd, arg);
419 else 408 else
420 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); 409 ret = drm_ioctl(filp, cmd, arg);
421 unlock_kernel();
422 410
423 return ret; 411 return ret;
424} 412}
@@ -431,9 +419,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
431 if (nr < DRM_COMMAND_BASE) 419 if (nr < DRM_COMMAND_BASE)
432 return drm_compat_ioctl(filp, cmd, arg); 420 return drm_compat_ioctl(filp, cmd, arg);
433 421
434 lock_kernel(); /* XXX for now */ 422 ret = drm_ioctl(filp, cmd, arg);
435 ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
436 unlock_kernel();
437 423
438 return ret; 424 return ret;
439} 425}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 44d4b652ea12..3dcbe130c422 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -334,6 +334,9 @@ struct radeon_encoder {
334 enum radeon_rmx_type rmx_type; 334 enum radeon_rmx_type rmx_type;
335 struct drm_display_mode native_mode; 335 struct drm_display_mode native_mode;
336 void *enc_priv; 336 void *enc_priv;
337 int hdmi_offset;
338 int hdmi_audio_workaround;
339 int hdmi_buffer_status;
337}; 340};
338 341
339struct radeon_connector_atom_dig { 342struct radeon_connector_atom_dig {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 544e18ffaf22..d9ffe1f56e8f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -56,6 +56,13 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
56 kfree(bo); 56 kfree(bo);
57} 57}
58 58
59bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
60{
61 if (bo->destroy == &radeon_ttm_bo_destroy)
62 return true;
63 return false;
64}
65
59void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) 66void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
60{ 67{
61 u32 c = 0; 68 u32 c = 0;
@@ -71,6 +78,8 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
71 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 78 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
72 if (domain & RADEON_GEM_DOMAIN_CPU) 79 if (domain & RADEON_GEM_DOMAIN_CPU)
73 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 80 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
81 if (!c)
82 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
74 rbo->placement.num_placement = c; 83 rbo->placement.num_placement = c;
75 rbo->placement.num_busy_placement = c; 84 rbo->placement.num_busy_placement = c;
76} 85}
@@ -481,14 +490,20 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
481} 490}
482 491
483void radeon_bo_move_notify(struct ttm_buffer_object *bo, 492void radeon_bo_move_notify(struct ttm_buffer_object *bo,
484 struct ttm_mem_reg *mem) 493 struct ttm_mem_reg *mem)
485{ 494{
486 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 495 struct radeon_bo *rbo;
496 if (!radeon_ttm_bo_is_radeon_bo(bo))
497 return;
498 rbo = container_of(bo, struct radeon_bo, tbo);
487 radeon_bo_check_tiling(rbo, 0, 1); 499 radeon_bo_check_tiling(rbo, 0, 1);
488} 500}
489 501
490void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 502void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
491{ 503{
492 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 504 struct radeon_bo *rbo;
505 if (!radeon_ttm_bo_is_radeon_bo(bo))
506 return;
507 rbo = container_of(bo, struct radeon_bo, tbo);
493 radeon_bo_check_tiling(rbo, 0, 0); 508 radeon_bo_check_tiling(rbo, 0, 0);
494} 509}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index f6b69c2c0d00..a02f18011ad1 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -59,19 +59,17 @@ static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
59 * 59 *
60 * Returns: 60 * Returns:
61 * -EBUSY: buffer is busy and @no_wait is true 61 * -EBUSY: buffer is busy and @no_wait is true
62 * -ERESTART: A wait for the buffer to become unreserved was interrupted by 62 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
63 * a signal. Release all buffer reservations and return to user-space. 63 * a signal. Release all buffer reservations and return to user-space.
64 */ 64 */
65static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) 65static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
66{ 66{
67 int r; 67 int r;
68 68
69retry:
70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 69 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
71 if (unlikely(r != 0)) { 70 if (unlikely(r != 0)) {
72 if (r == -ERESTART) 71 if (r != -ERESTARTSYS)
73 goto retry; 72 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
74 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
75 return r; 73 return r;
76 } 74 }
77 return 0; 75 return 0;
@@ -125,12 +123,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
125{ 123{
126 int r; 124 int r;
127 125
128retry:
129 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); 126 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
130 if (unlikely(r != 0)) { 127 if (unlikely(r != 0)) {
131 if (r == -ERESTART) 128 if (r != -ERESTARTSYS)
132 goto retry; 129 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
133 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
134 return r; 130 return r;
135 } 131 }
136 spin_lock(&bo->tbo.lock); 132 spin_lock(&bo->tbo.lock);
@@ -140,8 +136,6 @@ retry:
140 r = ttm_bo_wait(&bo->tbo, true, true, no_wait); 136 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
141 spin_unlock(&bo->tbo.lock); 137 spin_unlock(&bo->tbo.lock);
142 ttm_bo_unreserve(&bo->tbo); 138 ttm_bo_unreserve(&bo->tbo);
143 if (unlikely(r == -ERESTART))
144 goto retry;
145 return r; 139 return r;
146} 140}
147 141
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 34b08d307c81..8bce64cdc320 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -44,8 +44,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
44 struct drm_device *dev = node->minor->dev; 44 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private; 45 struct radeon_device *rdev = dev->dev_private;
46 46
47 seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 47 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
48 seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 48 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
49 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
50 if (rdev->asic->get_memory_clock)
51 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
49 52
50 return 0; 53 return 0;
51} 54}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 391c973ec4db..9f5e2f929da9 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
42 /* Number of tests = 42 /* Number of tests =
43 * (Total GTT - IB pool - writeback page - ring buffer) / test size 43 * (Total GTT - IB pool - writeback page - ring buffer) / test size
44 */ 44 */
45 n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - 45 n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
46 rdev->cp.ring_size) / size; 46 rdev->cp.ring_size)) / size;
47 47
48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); 48 gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
49 if (!gtt_obj) { 49 if (!gtt_obj) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d2ed896cca01..7bed4122528c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -200,7 +200,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
200static void radeon_evict_flags(struct ttm_buffer_object *bo, 200static void radeon_evict_flags(struct ttm_buffer_object *bo,
201 struct ttm_placement *placement) 201 struct ttm_placement *placement)
202{ 202{
203 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); 203 struct radeon_bo *rbo;
204 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
205
206 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
207 placement->fpfn = 0;
208 placement->lpfn = 0;
209 placement->placement = &placements;
210 placement->busy_placement = &placements;
211 placement->num_placement = 1;
212 placement->num_busy_placement = 1;
213 return;
214 }
215 rbo = container_of(bo, struct radeon_bo, tbo);
204 switch (bo->mem.mem_type) { 216 switch (bo->mem.mem_type) {
205 case TTM_PL_VRAM: 217 case TTM_PL_VRAM:
206 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 218 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c1fcdddb6be6..368415df5f3a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -497,6 +497,8 @@ int rs400_init(struct radeon_device *rdev)
497 497
498 /* Initialize clocks */ 498 /* Initialize clocks */
499 radeon_get_clock_info(rdev->ddev); 499 radeon_get_clock_info(rdev->ddev);
500 /* Initialize power management */
501 radeon_pm_init(rdev);
500 /* Get vram informations */ 502 /* Get vram informations */
501 rs400_vram_info(rdev); 503 rs400_vram_info(rdev);
502 /* Initialize memory controller (also test AGP) */ 504 /* Initialize memory controller (also test AGP) */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 2d124bb57762..f58dc6710802 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -892,6 +892,14 @@ static int rv770_startup(struct radeon_device *rdev)
892 } 892 }
893 rv770_gpu_init(rdev); 893 rv770_gpu_init(rdev);
894 894
895 if (!rdev->r600_blit.shader_obj) {
896 r = r600_blit_init(rdev);
897 if (r) {
898 DRM_ERROR("radeon: failed blitter (%d).\n", r);
899 return r;
900 }
901 }
902
895 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 903 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
896 if (unlikely(r != 0)) 904 if (unlikely(r != 0))
897 return r; 905 return r;
@@ -1051,12 +1059,6 @@ int rv770_init(struct radeon_device *rdev)
1051 if (r) 1059 if (r)
1052 return r; 1060 return r;
1053 1061
1054 r = r600_blit_init(rdev);
1055 if (r) {
1056 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1057 return r;
1058 }
1059
1060 rdev->accel_working = true; 1062 rdev->accel_working = true;
1061 r = rv770_startup(rdev); 1063 r = rv770_startup(rdev);
1062 if (r) { 1064 if (r) {
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index eee52aa92a7c..021de44c15ab 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -50,7 +50,7 @@ static struct drm_driver driver = {
50 .owner = THIS_MODULE, 50 .owner = THIS_MODULE,
51 .open = drm_open, 51 .open = drm_open,
52 .release = drm_release, 52 .release = drm_release,
53 .ioctl = drm_ioctl, 53 .unlocked_ioctl = drm_ioctl,
54 .mmap = drm_mmap, 54 .mmap = drm_mmap,
55 .poll = drm_poll, 55 .poll = drm_poll,
56 .fasync = drm_fasync, 56 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index e725cc0b1155..4fd1f067d380 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -80,7 +80,7 @@ static struct drm_driver driver = {
80 .owner = THIS_MODULE, 80 .owner = THIS_MODULE,
81 .open = drm_open, 81 .open = drm_open,
82 .release = drm_release, 82 .release = drm_release,
83 .ioctl = drm_ioctl, 83 .unlocked_ioctl = drm_ioctl,
84 .mmap = drm_mmap, 84 .mmap = drm_mmap,
85 .poll = drm_poll, 85 .poll = drm_poll,
86 .fasync = drm_fasync, 86 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 012ff2e356b2..ec5a43e65722 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,7 @@ static struct drm_driver driver = {
48 .owner = THIS_MODULE, 48 .owner = THIS_MODULE,
49 .open = drm_open, 49 .open = drm_open,
50 .release = drm_release, 50 .release = drm_release,
51 .ioctl = drm_ioctl, 51 .unlocked_ioctl = drm_ioctl,
52 .mmap = drm_mmap, 52 .mmap = drm_mmap,
53 .poll = drm_poll, 53 .poll = drm_poll,
54 .fasync = drm_fasync, 54 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1fbb2eea5e88..2920f9a279e1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -71,34 +71,34 @@ static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
71 return -EINVAL; 71 return -EINVAL;
72} 72}
73 73
74static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob, 74static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
75 struct ttm_mem_type_manager *man)
76{ 75{
76 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
77 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type); 78 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
78 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type); 79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
79 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags); 80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
80 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset); 81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
81 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset); 82 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
82 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size); 83 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
83 printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size); 84 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
84 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n", 85 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
85 man->available_caching); 86 man->available_caching);
86 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n", 87 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
87 man->default_caching); 88 man->default_caching);
88 spin_lock(&glob->lru_lock); 89 if (mem_type != TTM_PL_SYSTEM) {
89 drm_mm_debug_table(&man->manager, TTM_PFX); 90 spin_lock(&bdev->glob->lru_lock);
90 spin_unlock(&glob->lru_lock); 91 drm_mm_debug_table(&man->manager, TTM_PFX);
92 spin_unlock(&bdev->glob->lru_lock);
93 }
91} 94}
92 95
93static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 96static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
94 struct ttm_placement *placement) 97 struct ttm_placement *placement)
95{ 98{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_bo_global *glob = bo->glob;
98 struct ttm_mem_type_manager *man;
99 int i, ret, mem_type; 99 int i, ret, mem_type;
100 100
101 printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n", 101 printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
102 bo, bo->mem.num_pages, bo->mem.size >> 10, 102 bo, bo->mem.num_pages, bo->mem.size >> 10,
103 bo->mem.size >> 20); 103 bo->mem.size >> 20);
104 for (i = 0; i < placement->num_placement; i++) { 104 for (i = 0; i < placement->num_placement; i++) {
@@ -106,10 +106,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
106 &mem_type); 106 &mem_type);
107 if (ret) 107 if (ret)
108 return; 108 return;
109 man = &bdev->man[mem_type];
110 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n", 109 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
111 i, placement->placement[i], mem_type); 110 i, placement->placement[i], mem_type);
112 ttm_mem_type_manager_debug(glob, man); 111 ttm_mem_type_debug(bo->bdev, mem_type);
113 } 112 }
114} 113}
115 114
@@ -465,6 +464,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
465 spin_unlock(&bo->lock); 464 spin_unlock(&bo->lock);
466 465
467 spin_lock(&glob->lru_lock); 466 spin_lock(&glob->lru_lock);
467 put_count = ttm_bo_del_from_lru(bo);
468
468 ret = ttm_bo_reserve_locked(bo, false, false, false, 0); 469 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
469 BUG_ON(ret); 470 BUG_ON(ret);
470 if (bo->ttm) 471 if (bo->ttm)
@@ -472,20 +473,19 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
472 473
473 if (!list_empty(&bo->ddestroy)) { 474 if (!list_empty(&bo->ddestroy)) {
474 list_del_init(&bo->ddestroy); 475 list_del_init(&bo->ddestroy);
475 kref_put(&bo->list_kref, ttm_bo_ref_bug); 476 ++put_count;
476 } 477 }
477 if (bo->mem.mm_node) { 478 if (bo->mem.mm_node) {
478 bo->mem.mm_node->private = NULL; 479 bo->mem.mm_node->private = NULL;
479 drm_mm_put_block(bo->mem.mm_node); 480 drm_mm_put_block(bo->mem.mm_node);
480 bo->mem.mm_node = NULL; 481 bo->mem.mm_node = NULL;
481 } 482 }
482 put_count = ttm_bo_del_from_lru(bo);
483 spin_unlock(&glob->lru_lock); 483 spin_unlock(&glob->lru_lock);
484 484
485 atomic_set(&bo->reserved, 0); 485 atomic_set(&bo->reserved, 0);
486 486
487 while (put_count--) 487 while (put_count--)
488 kref_put(&bo->list_kref, ttm_bo_release_list); 488 kref_put(&bo->list_kref, ttm_bo_ref_bug);
489 489
490 return 0; 490 return 0;
491 } 491 }
@@ -684,19 +684,45 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
684 struct ttm_buffer_object *bo; 684 struct ttm_buffer_object *bo;
685 int ret, put_count = 0; 685 int ret, put_count = 0;
686 686
687retry:
687 spin_lock(&glob->lru_lock); 688 spin_lock(&glob->lru_lock);
689 if (list_empty(&man->lru)) {
690 spin_unlock(&glob->lru_lock);
691 return -EBUSY;
692 }
693
688 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 694 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
689 kref_get(&bo->list_kref); 695 kref_get(&bo->list_kref);
690 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0); 696
691 if (likely(ret == 0)) 697 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
692 put_count = ttm_bo_del_from_lru(bo); 698
699 if (unlikely(ret == -EBUSY)) {
700 spin_unlock(&glob->lru_lock);
701 if (likely(!no_wait))
702 ret = ttm_bo_wait_unreserved(bo, interruptible);
703
704 kref_put(&bo->list_kref, ttm_bo_release_list);
705
706 /**
707 * We *need* to retry after releasing the lru lock.
708 */
709
710 if (unlikely(ret != 0))
711 return ret;
712 goto retry;
713 }
714
715 put_count = ttm_bo_del_from_lru(bo);
693 spin_unlock(&glob->lru_lock); 716 spin_unlock(&glob->lru_lock);
694 if (unlikely(ret != 0)) 717
695 return ret; 718 BUG_ON(ret != 0);
719
696 while (put_count--) 720 while (put_count--)
697 kref_put(&bo->list_kref, ttm_bo_ref_bug); 721 kref_put(&bo->list_kref, ttm_bo_ref_bug);
722
698 ret = ttm_bo_evict(bo, interruptible, no_wait); 723 ret = ttm_bo_evict(bo, interruptible, no_wait);
699 ttm_bo_unreserve(bo); 724 ttm_bo_unreserve(bo);
725
700 kref_put(&bo->list_kref, ttm_bo_release_list); 726 kref_put(&bo->list_kref, ttm_bo_release_list);
701 return ret; 727 return ret;
702} 728}
@@ -849,7 +875,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
849 int i, ret; 875 int i, ret;
850 876
851 mem->mm_node = NULL; 877 mem->mm_node = NULL;
852 for (i = 0; i <= placement->num_placement; ++i) { 878 for (i = 0; i < placement->num_placement; ++i) {
853 ret = ttm_mem_type_from_flags(placement->placement[i], 879 ret = ttm_mem_type_from_flags(placement->placement[i],
854 &mem_type); 880 &mem_type);
855 if (ret) 881 if (ret)
@@ -900,8 +926,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
900 if (!type_found) 926 if (!type_found)
901 return -EINVAL; 927 return -EINVAL;
902 928
903 for (i = 0; i <= placement->num_busy_placement; ++i) { 929 for (i = 0; i < placement->num_busy_placement; ++i) {
904 ret = ttm_mem_type_from_flags(placement->placement[i], 930 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
905 &mem_type); 931 &mem_type);
906 if (ret) 932 if (ret)
907 return ret; 933 return ret;
@@ -911,7 +937,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
911 if (!ttm_bo_mt_compatible(man, 937 if (!ttm_bo_mt_compatible(man,
912 bo->type == ttm_bo_type_user, 938 bo->type == ttm_bo_type_user,
913 mem_type, 939 mem_type,
914 placement->placement[i], 940 placement->busy_placement[i],
915 &cur_flags)) 941 &cur_flags))
916 continue; 942 continue;
917 943
@@ -921,7 +947,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
921 * Use the access and other non-mapping-related flag bits from 947 * Use the access and other non-mapping-related flag bits from
922 * the memory placement flags to the current flags 948 * the memory placement flags to the current flags
923 */ 949 */
924 ttm_flag_masked(&cur_flags, placement->placement[i], 950 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
925 ~TTM_PL_MASK_MEMTYPE); 951 ~TTM_PL_MASK_MEMTYPE);
926 952
927 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 953 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
@@ -1115,6 +1141,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1115 bo->glob = bdev->glob; 1141 bo->glob = bdev->glob;
1116 bo->type = type; 1142 bo->type = type;
1117 bo->num_pages = num_pages; 1143 bo->num_pages = num_pages;
1144 bo->mem.size = num_pages << PAGE_SHIFT;
1118 bo->mem.mem_type = TTM_PL_SYSTEM; 1145 bo->mem.mem_type = TTM_PL_SYSTEM;
1119 bo->mem.num_pages = bo->num_pages; 1146 bo->mem.num_pages = bo->num_pages;
1120 bo->mem.mm_node = NULL; 1147 bo->mem.mm_node = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 609a85a4d855..668dbe8b8dd3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -320,7 +320,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
320 return -EFAULT; 320 return -EFAULT;
321 321
322 driver = bo->bdev->driver; 322 driver = bo->bdev->driver;
323 if (unlikely(driver->verify_access)) { 323 if (unlikely(!driver->verify_access)) {
324 ret = -EPERM; 324 ret = -EPERM;
325 goto out_unref; 325 goto out_unref;
326 } 326 }
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index bc2f51843005..7a1b210401e0 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -58,7 +58,7 @@ static struct drm_driver driver = {
58 .owner = THIS_MODULE, 58 .owner = THIS_MODULE,
59 .open = drm_open, 59 .open = drm_open,
60 .release = drm_release, 60 .release = drm_release,
61 .ioctl = drm_ioctl, 61 .unlocked_ioctl = drm_ioctl,
62 .mmap = drm_mmap, 62 .mmap = drm_mmap,
63 .poll = drm_poll, 63 .poll = drm_poll,
64 .fasync = drm_fasync, 64 .fasync = drm_fasync,
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
new file mode 100644
index 000000000000..f20b8bcbef39
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -0,0 +1,13 @@
1config DRM_VMWGFX
2 tristate "DRM driver for VMware Virtual GPU"
3 depends on DRM && PCI
4 select FB_DEFERRED_IO
5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA
7 select FB_CFB_IMAGEBLIT
8 select DRM_TTM
9 help
10 KMS enabled DRM driver for SVGA2 virtual hardware.
11
12 If unsure say n. The compiled module will be
13 called vmwgfx.ko
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
new file mode 100644
index 000000000000..1a3cb6816d1c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -0,0 +1,9 @@
1
2ccflags-y := -Iinclude/drm
3
4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o
8
9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
new file mode 100644
index 000000000000..77cb45331000
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -0,0 +1,1793 @@
1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga3d_reg.h --
28 *
29 * SVGA 3D hardware definitions
30 */
31
32#ifndef _SVGA3D_REG_H_
33#define _SVGA3D_REG_H_
34
35#include "svga_reg.h"
36
37
38/*
39 * 3D Hardware Version
40 *
41 * The hardware version is stored in the SVGA_FIFO_3D_HWVERSION fifo
42 * register. Is set by the host and read by the guest. This lets
43 * us make new guest drivers which are backwards-compatible with old
44 * SVGA hardware revisions. It does not let us support old guest
45 * drivers. Good enough for now.
46 *
47 */
48
49#define SVGA3D_MAKE_HWVERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
50#define SVGA3D_MAJOR_HWVERSION(version) ((version) >> 16)
51#define SVGA3D_MINOR_HWVERSION(version) ((version) & 0xFF)
52
53typedef enum {
54 SVGA3D_HWVERSION_WS5_RC1 = SVGA3D_MAKE_HWVERSION(0, 1),
55 SVGA3D_HWVERSION_WS5_RC2 = SVGA3D_MAKE_HWVERSION(0, 2),
56 SVGA3D_HWVERSION_WS51_RC1 = SVGA3D_MAKE_HWVERSION(0, 3),
57 SVGA3D_HWVERSION_WS6_B1 = SVGA3D_MAKE_HWVERSION(1, 1),
58 SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
59 SVGA3D_HWVERSION_WS65_B1 = SVGA3D_MAKE_HWVERSION(2, 0),
60 SVGA3D_HWVERSION_CURRENT = SVGA3D_HWVERSION_WS65_B1,
61} SVGA3dHardwareVersion;
62
63/*
64 * Generic Types
65 */
66
67typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
68#define SVGA3D_NUM_CLIPPLANES 6
69#define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS 8
70
71
72/*
73 * Surface formats.
74 *
75 * If you modify this list, be sure to keep GLUtil.c in sync. It
76 * includes the internal format definition of each surface in
77 * GLUtil_ConvertSurfaceFormat, and it contains a table of
78 * human-readable names in GLUtil_GetFormatName.
79 */
80
81typedef enum SVGA3dSurfaceFormat {
82 SVGA3D_FORMAT_INVALID = 0,
83
84 SVGA3D_X8R8G8B8 = 1,
85 SVGA3D_A8R8G8B8 = 2,
86
87 SVGA3D_R5G6B5 = 3,
88 SVGA3D_X1R5G5B5 = 4,
89 SVGA3D_A1R5G5B5 = 5,
90 SVGA3D_A4R4G4B4 = 6,
91
92 SVGA3D_Z_D32 = 7,
93 SVGA3D_Z_D16 = 8,
94 SVGA3D_Z_D24S8 = 9,
95 SVGA3D_Z_D15S1 = 10,
96
97 SVGA3D_LUMINANCE8 = 11,
98 SVGA3D_LUMINANCE4_ALPHA4 = 12,
99 SVGA3D_LUMINANCE16 = 13,
100 SVGA3D_LUMINANCE8_ALPHA8 = 14,
101
102 SVGA3D_DXT1 = 15,
103 SVGA3D_DXT2 = 16,
104 SVGA3D_DXT3 = 17,
105 SVGA3D_DXT4 = 18,
106 SVGA3D_DXT5 = 19,
107
108 SVGA3D_BUMPU8V8 = 20,
109 SVGA3D_BUMPL6V5U5 = 21,
110 SVGA3D_BUMPX8L8V8U8 = 22,
111 SVGA3D_BUMPL8V8U8 = 23,
112
113 SVGA3D_ARGB_S10E5 = 24, /* 16-bit floating-point ARGB */
114 SVGA3D_ARGB_S23E8 = 25, /* 32-bit floating-point ARGB */
115
116 SVGA3D_A2R10G10B10 = 26,
117
118 /* signed formats */
119 SVGA3D_V8U8 = 27,
120 SVGA3D_Q8W8V8U8 = 28,
121 SVGA3D_CxV8U8 = 29,
122
123 /* mixed formats */
124 SVGA3D_X8L8V8U8 = 30,
125 SVGA3D_A2W10V10U10 = 31,
126
127 SVGA3D_ALPHA8 = 32,
128
129 /* Single- and dual-component floating point formats */
130 SVGA3D_R_S10E5 = 33,
131 SVGA3D_R_S23E8 = 34,
132 SVGA3D_RG_S10E5 = 35,
133 SVGA3D_RG_S23E8 = 36,
134
135 /*
136 * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
137 * the most efficient format to use when creating new surfaces
138 * expressly for index or vertex data.
139 */
140 SVGA3D_BUFFER = 37,
141
142 SVGA3D_Z_D24X8 = 38,
143
144 SVGA3D_V16U16 = 39,
145
146 SVGA3D_G16R16 = 40,
147 SVGA3D_A16B16G16R16 = 41,
148
149 /* Packed Video formats */
150 SVGA3D_UYVY = 42,
151 SVGA3D_YUY2 = 43,
152
153 SVGA3D_FORMAT_MAX
154} SVGA3dSurfaceFormat;
155
156typedef uint32 SVGA3dColor; /* a, r, g, b */
157
158/*
159 * These match the D3DFORMAT_OP definitions used by Direct3D. We need
160 * them so that we can query the host for what the supported surface
161 * operations are (when we're using the D3D backend, in particular),
162 * and so we can send those operations to the guest.
163 */
164typedef enum {
165 SVGA3DFORMAT_OP_TEXTURE = 0x00000001,
166 SVGA3DFORMAT_OP_VOLUMETEXTURE = 0x00000002,
167 SVGA3DFORMAT_OP_CUBETEXTURE = 0x00000004,
168 SVGA3DFORMAT_OP_OFFSCREEN_RENDERTARGET = 0x00000008,
169 SVGA3DFORMAT_OP_SAME_FORMAT_RENDERTARGET = 0x00000010,
170 SVGA3DFORMAT_OP_ZSTENCIL = 0x00000040,
171 SVGA3DFORMAT_OP_ZSTENCIL_WITH_ARBITRARY_COLOR_DEPTH = 0x00000080,
172
173/*
174 * This format can be used as a render target if the current display mode
175 * is the same depth if the alpha channel is ignored. e.g. if the device
176 * can render to A8R8G8B8 when the display mode is X8R8G8B8, then the
177 * format op list entry for A8R8G8B8 should have this cap.
178 */
179 SVGA3DFORMAT_OP_SAME_FORMAT_UP_TO_ALPHA_RENDERTARGET = 0x00000100,
180
181/*
182 * This format contains DirectDraw support (including Flip). This flag
183 * should not to be set on alpha formats.
184 */
185 SVGA3DFORMAT_OP_DISPLAYMODE = 0x00000400,
186
187/*
188 * The rasterizer can support some level of Direct3D support in this format
189 * and implies that the driver can create a Context in this mode (for some
190 * render target format). When this flag is set, the SVGA3DFORMAT_OP_DISPLAYMODE
191 * flag must also be set.
192 */
193 SVGA3DFORMAT_OP_3DACCELERATION = 0x00000800,
194
195/*
196 * This is set for a private format when the driver has put the bpp in
197 * the structure.
198 */
199 SVGA3DFORMAT_OP_PIXELSIZE = 0x00001000,
200
201/*
202 * Indicates that this format can be converted to any RGB format for which
203 * SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB is specified
204 */
205 SVGA3DFORMAT_OP_CONVERT_TO_ARGB = 0x00002000,
206
207/*
208 * Indicates that this format can be used to create offscreen plain surfaces.
209 */
210 SVGA3DFORMAT_OP_OFFSCREENPLAIN = 0x00004000,
211
212/*
213 * Indicated that this format can be read as an SRGB texture (meaning that the
214 * sampler will linearize the looked up data)
215 */
216 SVGA3DFORMAT_OP_SRGBREAD = 0x00008000,
217
218/*
219 * Indicates that this format can be used in the bumpmap instructions
220 */
221 SVGA3DFORMAT_OP_BUMPMAP = 0x00010000,
222
223/*
224 * Indicates that this format can be sampled by the displacement map sampler
225 */
226 SVGA3DFORMAT_OP_DMAP = 0x00020000,
227
228/*
229 * Indicates that this format cannot be used with texture filtering
230 */
231 SVGA3DFORMAT_OP_NOFILTER = 0x00040000,
232
233/*
234 * Indicates that format conversions are supported to this RGB format if
235 * SVGA3DFORMAT_OP_CONVERT_TO_ARGB is specified in the source format.
236 */
237 SVGA3DFORMAT_OP_MEMBEROFGROUP_ARGB = 0x00080000,
238
239/*
240 * Indicated that this format can be written as an SRGB target (meaning that the
241 * pixel pipe will DE-linearize data on output to format)
242 */
243 SVGA3DFORMAT_OP_SRGBWRITE = 0x00100000,
244
245/*
246 * Indicates that this format cannot be used with alpha blending
247 */
248 SVGA3DFORMAT_OP_NOALPHABLEND = 0x00200000,
249
250/*
251 * Indicates that the device can auto-generated sublevels for resources
252 * of this format
253 */
254 SVGA3DFORMAT_OP_AUTOGENMIPMAP = 0x00400000,
255
256/*
257 * Indicates that this format can be used by vertex texture sampler
258 */
259 SVGA3DFORMAT_OP_VERTEXTEXTURE = 0x00800000,
260
261/*
262 * Indicates that this format supports neither texture coordinate wrap
263 * modes, nor mipmapping
264 */
265 SVGA3DFORMAT_OP_NOTEXCOORDWRAPNORMIP = 0x01000000
266} SVGA3dFormatOp;
267
268/*
269 * This structure is a conversion of SVGA3DFORMAT_OP_*.
270 * Entries must be located at the same position.
271 */
272typedef union {
273 uint32 value;
274 struct {
275 uint32 texture : 1;
276 uint32 volumeTexture : 1;
277 uint32 cubeTexture : 1;
278 uint32 offscreenRenderTarget : 1;
279 uint32 sameFormatRenderTarget : 1;
280 uint32 unknown1 : 1;
281 uint32 zStencil : 1;
282 uint32 zStencilArbitraryDepth : 1;
283 uint32 sameFormatUpToAlpha : 1;
284 uint32 unknown2 : 1;
285 uint32 displayMode : 1;
286 uint32 acceleration3d : 1;
287 uint32 pixelSize : 1;
288 uint32 convertToARGB : 1;
289 uint32 offscreenPlain : 1;
290 uint32 sRGBRead : 1;
291 uint32 bumpMap : 1;
292 uint32 dmap : 1;
293 uint32 noFilter : 1;
294 uint32 memberOfGroupARGB : 1;
295 uint32 sRGBWrite : 1;
296 uint32 noAlphaBlend : 1;
297 uint32 autoGenMipMap : 1;
298 uint32 vertexTexture : 1;
299 uint32 noTexCoordWrapNorMip : 1;
300 };
301} SVGA3dSurfaceFormatCaps;
302
303/*
304 * SVGA_3D_CMD_SETRENDERSTATE Types. All value types
305 * must fit in a uint32.
306 */
307
308typedef enum {
309 SVGA3D_RS_INVALID = 0,
310 SVGA3D_RS_ZENABLE = 1, /* SVGA3dBool */
311 SVGA3D_RS_ZWRITEENABLE = 2, /* SVGA3dBool */
312 SVGA3D_RS_ALPHATESTENABLE = 3, /* SVGA3dBool */
313 SVGA3D_RS_DITHERENABLE = 4, /* SVGA3dBool */
314 SVGA3D_RS_BLENDENABLE = 5, /* SVGA3dBool */
315 SVGA3D_RS_FOGENABLE = 6, /* SVGA3dBool */
316 SVGA3D_RS_SPECULARENABLE = 7, /* SVGA3dBool */
317 SVGA3D_RS_STENCILENABLE = 8, /* SVGA3dBool */
318 SVGA3D_RS_LIGHTINGENABLE = 9, /* SVGA3dBool */
319 SVGA3D_RS_NORMALIZENORMALS = 10, /* SVGA3dBool */
320 SVGA3D_RS_POINTSPRITEENABLE = 11, /* SVGA3dBool */
321 SVGA3D_RS_POINTSCALEENABLE = 12, /* SVGA3dBool */
322 SVGA3D_RS_STENCILREF = 13, /* uint32 */
323 SVGA3D_RS_STENCILMASK = 14, /* uint32 */
324 SVGA3D_RS_STENCILWRITEMASK = 15, /* uint32 */
325 SVGA3D_RS_FOGSTART = 16, /* float */
326 SVGA3D_RS_FOGEND = 17, /* float */
327 SVGA3D_RS_FOGDENSITY = 18, /* float */
328 SVGA3D_RS_POINTSIZE = 19, /* float */
329 SVGA3D_RS_POINTSIZEMIN = 20, /* float */
330 SVGA3D_RS_POINTSIZEMAX = 21, /* float */
331 SVGA3D_RS_POINTSCALE_A = 22, /* float */
332 SVGA3D_RS_POINTSCALE_B = 23, /* float */
333 SVGA3D_RS_POINTSCALE_C = 24, /* float */
334 SVGA3D_RS_FOGCOLOR = 25, /* SVGA3dColor */
335 SVGA3D_RS_AMBIENT = 26, /* SVGA3dColor */
336 SVGA3D_RS_CLIPPLANEENABLE = 27, /* SVGA3dClipPlanes */
337 SVGA3D_RS_FOGMODE = 28, /* SVGA3dFogMode */
338 SVGA3D_RS_FILLMODE = 29, /* SVGA3dFillMode */
339 SVGA3D_RS_SHADEMODE = 30, /* SVGA3dShadeMode */
340 SVGA3D_RS_LINEPATTERN = 31, /* SVGA3dLinePattern */
341 SVGA3D_RS_SRCBLEND = 32, /* SVGA3dBlendOp */
342 SVGA3D_RS_DSTBLEND = 33, /* SVGA3dBlendOp */
343 SVGA3D_RS_BLENDEQUATION = 34, /* SVGA3dBlendEquation */
344 SVGA3D_RS_CULLMODE = 35, /* SVGA3dFace */
345 SVGA3D_RS_ZFUNC = 36, /* SVGA3dCmpFunc */
346 SVGA3D_RS_ALPHAFUNC = 37, /* SVGA3dCmpFunc */
347 SVGA3D_RS_STENCILFUNC = 38, /* SVGA3dCmpFunc */
348 SVGA3D_RS_STENCILFAIL = 39, /* SVGA3dStencilOp */
349 SVGA3D_RS_STENCILZFAIL = 40, /* SVGA3dStencilOp */
350 SVGA3D_RS_STENCILPASS = 41, /* SVGA3dStencilOp */
351 SVGA3D_RS_ALPHAREF = 42, /* float (0.0 .. 1.0) */
352 SVGA3D_RS_FRONTWINDING = 43, /* SVGA3dFrontWinding */
353 SVGA3D_RS_COORDINATETYPE = 44, /* SVGA3dCoordinateType */
354 SVGA3D_RS_ZBIAS = 45, /* float */
355 SVGA3D_RS_RANGEFOGENABLE = 46, /* SVGA3dBool */
356 SVGA3D_RS_COLORWRITEENABLE = 47, /* SVGA3dColorMask */
357 SVGA3D_RS_VERTEXMATERIALENABLE = 48, /* SVGA3dBool */
358 SVGA3D_RS_DIFFUSEMATERIALSOURCE = 49, /* SVGA3dVertexMaterial */
359 SVGA3D_RS_SPECULARMATERIALSOURCE = 50, /* SVGA3dVertexMaterial */
360 SVGA3D_RS_AMBIENTMATERIALSOURCE = 51, /* SVGA3dVertexMaterial */
361 SVGA3D_RS_EMISSIVEMATERIALSOURCE = 52, /* SVGA3dVertexMaterial */
362 SVGA3D_RS_TEXTUREFACTOR = 53, /* SVGA3dColor */
363 SVGA3D_RS_LOCALVIEWER = 54, /* SVGA3dBool */
364 SVGA3D_RS_SCISSORTESTENABLE = 55, /* SVGA3dBool */
365 SVGA3D_RS_BLENDCOLOR = 56, /* SVGA3dColor */
366 SVGA3D_RS_STENCILENABLE2SIDED = 57, /* SVGA3dBool */
367 SVGA3D_RS_CCWSTENCILFUNC = 58, /* SVGA3dCmpFunc */
368 SVGA3D_RS_CCWSTENCILFAIL = 59, /* SVGA3dStencilOp */
369 SVGA3D_RS_CCWSTENCILZFAIL = 60, /* SVGA3dStencilOp */
370 SVGA3D_RS_CCWSTENCILPASS = 61, /* SVGA3dStencilOp */
371 SVGA3D_RS_VERTEXBLEND = 62, /* SVGA3dVertexBlendFlags */
372 SVGA3D_RS_SLOPESCALEDEPTHBIAS = 63, /* float */
373 SVGA3D_RS_DEPTHBIAS = 64, /* float */
374
375
376 /*
377 * Output Gamma Level
378 *
379 * Output gamma effects the gamma curve of colors that are output from the
380 * rendering pipeline. A value of 1.0 specifies a linear color space. If the
381 * value is <= 0.0, gamma correction is ignored and linear color space is
382 * used.
383 */
384
385 SVGA3D_RS_OUTPUTGAMMA = 65, /* float */
386 SVGA3D_RS_ZVISIBLE = 66, /* SVGA3dBool */
387 SVGA3D_RS_LASTPIXEL = 67, /* SVGA3dBool */
388 SVGA3D_RS_CLIPPING = 68, /* SVGA3dBool */
389 SVGA3D_RS_WRAP0 = 69, /* SVGA3dWrapFlags */
390 SVGA3D_RS_WRAP1 = 70, /* SVGA3dWrapFlags */
391 SVGA3D_RS_WRAP2 = 71, /* SVGA3dWrapFlags */
392 SVGA3D_RS_WRAP3 = 72, /* SVGA3dWrapFlags */
393 SVGA3D_RS_WRAP4 = 73, /* SVGA3dWrapFlags */
394 SVGA3D_RS_WRAP5 = 74, /* SVGA3dWrapFlags */
395 SVGA3D_RS_WRAP6 = 75, /* SVGA3dWrapFlags */
396 SVGA3D_RS_WRAP7 = 76, /* SVGA3dWrapFlags */
397 SVGA3D_RS_WRAP8 = 77, /* SVGA3dWrapFlags */
398 SVGA3D_RS_WRAP9 = 78, /* SVGA3dWrapFlags */
399 SVGA3D_RS_WRAP10 = 79, /* SVGA3dWrapFlags */
400 SVGA3D_RS_WRAP11 = 80, /* SVGA3dWrapFlags */
401 SVGA3D_RS_WRAP12 = 81, /* SVGA3dWrapFlags */
402 SVGA3D_RS_WRAP13 = 82, /* SVGA3dWrapFlags */
403 SVGA3D_RS_WRAP14 = 83, /* SVGA3dWrapFlags */
404 SVGA3D_RS_WRAP15 = 84, /* SVGA3dWrapFlags */
405 SVGA3D_RS_MULTISAMPLEANTIALIAS = 85, /* SVGA3dBool */
406 SVGA3D_RS_MULTISAMPLEMASK = 86, /* uint32 */
407 SVGA3D_RS_INDEXEDVERTEXBLENDENABLE = 87, /* SVGA3dBool */
408 SVGA3D_RS_TWEENFACTOR = 88, /* float */
409 SVGA3D_RS_ANTIALIASEDLINEENABLE = 89, /* SVGA3dBool */
410 SVGA3D_RS_COLORWRITEENABLE1 = 90, /* SVGA3dColorMask */
411 SVGA3D_RS_COLORWRITEENABLE2 = 91, /* SVGA3dColorMask */
412 SVGA3D_RS_COLORWRITEENABLE3 = 92, /* SVGA3dColorMask */
413 SVGA3D_RS_SEPARATEALPHABLENDENABLE = 93, /* SVGA3dBool */
414 SVGA3D_RS_SRCBLENDALPHA = 94, /* SVGA3dBlendOp */
415 SVGA3D_RS_DSTBLENDALPHA = 95, /* SVGA3dBlendOp */
416 SVGA3D_RS_BLENDEQUATIONALPHA = 96, /* SVGA3dBlendEquation */
417 SVGA3D_RS_MAX
418} SVGA3dRenderStateName;
419
420typedef enum {
421 SVGA3D_VERTEXMATERIAL_NONE = 0, /* Use the value in the current material */
422 SVGA3D_VERTEXMATERIAL_DIFFUSE = 1, /* Use the value in the diffuse component */
423 SVGA3D_VERTEXMATERIAL_SPECULAR = 2, /* Use the value in the specular component */
424} SVGA3dVertexMaterial;
425
426typedef enum {
427 SVGA3D_FILLMODE_INVALID = 0,
428 SVGA3D_FILLMODE_POINT = 1,
429 SVGA3D_FILLMODE_LINE = 2,
430 SVGA3D_FILLMODE_FILL = 3,
431 SVGA3D_FILLMODE_MAX
432} SVGA3dFillModeType;
433
434
435typedef
436union {
437 struct {
438 uint16 mode; /* SVGA3dFillModeType */
439 uint16 face; /* SVGA3dFace */
440 };
441 uint32 uintValue;
442} SVGA3dFillMode;
443
444typedef enum {
445 SVGA3D_SHADEMODE_INVALID = 0,
446 SVGA3D_SHADEMODE_FLAT = 1,
447 SVGA3D_SHADEMODE_SMOOTH = 2,
448 SVGA3D_SHADEMODE_PHONG = 3, /* Not supported */
449 SVGA3D_SHADEMODE_MAX
450} SVGA3dShadeMode;
451
452typedef
453union {
454 struct {
455 uint16 repeat;
456 uint16 pattern;
457 };
458 uint32 uintValue;
459} SVGA3dLinePattern;
460
461typedef enum {
462 SVGA3D_BLENDOP_INVALID = 0,
463 SVGA3D_BLENDOP_ZERO = 1,
464 SVGA3D_BLENDOP_ONE = 2,
465 SVGA3D_BLENDOP_SRCCOLOR = 3,
466 SVGA3D_BLENDOP_INVSRCCOLOR = 4,
467 SVGA3D_BLENDOP_SRCALPHA = 5,
468 SVGA3D_BLENDOP_INVSRCALPHA = 6,
469 SVGA3D_BLENDOP_DESTALPHA = 7,
470 SVGA3D_BLENDOP_INVDESTALPHA = 8,
471 SVGA3D_BLENDOP_DESTCOLOR = 9,
472 SVGA3D_BLENDOP_INVDESTCOLOR = 10,
473 SVGA3D_BLENDOP_SRCALPHASAT = 11,
474 SVGA3D_BLENDOP_BLENDFACTOR = 12,
475 SVGA3D_BLENDOP_INVBLENDFACTOR = 13,
476 SVGA3D_BLENDOP_MAX
477} SVGA3dBlendOp;
478
479typedef enum {
480 SVGA3D_BLENDEQ_INVALID = 0,
481 SVGA3D_BLENDEQ_ADD = 1,
482 SVGA3D_BLENDEQ_SUBTRACT = 2,
483 SVGA3D_BLENDEQ_REVSUBTRACT = 3,
484 SVGA3D_BLENDEQ_MINIMUM = 4,
485 SVGA3D_BLENDEQ_MAXIMUM = 5,
486 SVGA3D_BLENDEQ_MAX
487} SVGA3dBlendEquation;
488
489typedef enum {
490 SVGA3D_FRONTWINDING_INVALID = 0,
491 SVGA3D_FRONTWINDING_CW = 1,
492 SVGA3D_FRONTWINDING_CCW = 2,
493 SVGA3D_FRONTWINDING_MAX
494} SVGA3dFrontWinding;
495
496typedef enum {
497 SVGA3D_FACE_INVALID = 0,
498 SVGA3D_FACE_NONE = 1,
499 SVGA3D_FACE_FRONT = 2,
500 SVGA3D_FACE_BACK = 3,
501 SVGA3D_FACE_FRONT_BACK = 4,
502 SVGA3D_FACE_MAX
503} SVGA3dFace;
504
505/*
506 * The order and the values should not be changed
507 */
508
509typedef enum {
510 SVGA3D_CMP_INVALID = 0,
511 SVGA3D_CMP_NEVER = 1,
512 SVGA3D_CMP_LESS = 2,
513 SVGA3D_CMP_EQUAL = 3,
514 SVGA3D_CMP_LESSEQUAL = 4,
515 SVGA3D_CMP_GREATER = 5,
516 SVGA3D_CMP_NOTEQUAL = 6,
517 SVGA3D_CMP_GREATEREQUAL = 7,
518 SVGA3D_CMP_ALWAYS = 8,
519 SVGA3D_CMP_MAX
520} SVGA3dCmpFunc;
521
522/*
523 * SVGA3D_FOGFUNC_* specifies the fog equation, or PER_VERTEX which allows
524 * the fog factor to be specified in the alpha component of the specular
525 * (a.k.a. secondary) vertex color.
526 */
527typedef enum {
528 SVGA3D_FOGFUNC_INVALID = 0,
529 SVGA3D_FOGFUNC_EXP = 1,
530 SVGA3D_FOGFUNC_EXP2 = 2,
531 SVGA3D_FOGFUNC_LINEAR = 3,
532 SVGA3D_FOGFUNC_PER_VERTEX = 4
533} SVGA3dFogFunction;
534
535/*
536 * SVGA3D_FOGTYPE_* specifies if fog factors are computed on a per-vertex
537 * or per-pixel basis.
538 */
539typedef enum {
540 SVGA3D_FOGTYPE_INVALID = 0,
541 SVGA3D_FOGTYPE_VERTEX = 1,
542 SVGA3D_FOGTYPE_PIXEL = 2,
543 SVGA3D_FOGTYPE_MAX = 3
544} SVGA3dFogType;
545
546/*
547 * SVGA3D_FOGBASE_* selects depth or range-based fog. Depth-based fog is
548 * computed using the eye Z value of each pixel (or vertex), whereas range-
549 * based fog is computed using the actual distance (range) to the eye.
550 */
551typedef enum {
552 SVGA3D_FOGBASE_INVALID = 0,
553 SVGA3D_FOGBASE_DEPTHBASED = 1,
554 SVGA3D_FOGBASE_RANGEBASED = 2,
555 SVGA3D_FOGBASE_MAX = 3
556} SVGA3dFogBase;
557
558typedef enum {
559 SVGA3D_STENCILOP_INVALID = 0,
560 SVGA3D_STENCILOP_KEEP = 1,
561 SVGA3D_STENCILOP_ZERO = 2,
562 SVGA3D_STENCILOP_REPLACE = 3,
563 SVGA3D_STENCILOP_INCRSAT = 4,
564 SVGA3D_STENCILOP_DECRSAT = 5,
565 SVGA3D_STENCILOP_INVERT = 6,
566 SVGA3D_STENCILOP_INCR = 7,
567 SVGA3D_STENCILOP_DECR = 8,
568 SVGA3D_STENCILOP_MAX
569} SVGA3dStencilOp;
570
571typedef enum {
572 SVGA3D_CLIPPLANE_0 = (1 << 0),
573 SVGA3D_CLIPPLANE_1 = (1 << 1),
574 SVGA3D_CLIPPLANE_2 = (1 << 2),
575 SVGA3D_CLIPPLANE_3 = (1 << 3),
576 SVGA3D_CLIPPLANE_4 = (1 << 4),
577 SVGA3D_CLIPPLANE_5 = (1 << 5),
578} SVGA3dClipPlanes;
579
580typedef enum {
581 SVGA3D_CLEAR_COLOR = 0x1,
582 SVGA3D_CLEAR_DEPTH = 0x2,
583 SVGA3D_CLEAR_STENCIL = 0x4
584} SVGA3dClearFlag;
585
586typedef enum {
587 SVGA3D_RT_DEPTH = 0,
588 SVGA3D_RT_STENCIL = 1,
589 SVGA3D_RT_COLOR0 = 2,
590 SVGA3D_RT_COLOR1 = 3,
591 SVGA3D_RT_COLOR2 = 4,
592 SVGA3D_RT_COLOR3 = 5,
593 SVGA3D_RT_COLOR4 = 6,
594 SVGA3D_RT_COLOR5 = 7,
595 SVGA3D_RT_COLOR6 = 8,
596 SVGA3D_RT_COLOR7 = 9,
597 SVGA3D_RT_MAX,
598 SVGA3D_RT_INVALID = ((uint32)-1),
599} SVGA3dRenderTargetType;
600
601#define SVGA3D_MAX_RT_COLOR (SVGA3D_RT_COLOR7 - SVGA3D_RT_COLOR0 + 1)
602
603typedef
604union {
605 struct {
606 uint32 red : 1;
607 uint32 green : 1;
608 uint32 blue : 1;
609 uint32 alpha : 1;
610 };
611 uint32 uintValue;
612} SVGA3dColorMask;
613
614typedef enum {
615 SVGA3D_VBLEND_DISABLE = 0,
616 SVGA3D_VBLEND_1WEIGHT = 1,
617 SVGA3D_VBLEND_2WEIGHT = 2,
618 SVGA3D_VBLEND_3WEIGHT = 3,
619} SVGA3dVertexBlendFlags;
620
621typedef enum {
622 SVGA3D_WRAPCOORD_0 = 1 << 0,
623 SVGA3D_WRAPCOORD_1 = 1 << 1,
624 SVGA3D_WRAPCOORD_2 = 1 << 2,
625 SVGA3D_WRAPCOORD_3 = 1 << 3,
626 SVGA3D_WRAPCOORD_ALL = 0xF,
627} SVGA3dWrapFlags;
628
629/*
630 * SVGA_3D_CMD_TEXTURESTATE Types. All value types
631 * must fit in a uint32.
632 */
633
634typedef enum {
635 SVGA3D_TS_INVALID = 0,
636 SVGA3D_TS_BIND_TEXTURE = 1, /* SVGA3dSurfaceId */
637 SVGA3D_TS_COLOROP = 2, /* SVGA3dTextureCombiner */
638 SVGA3D_TS_COLORARG1 = 3, /* SVGA3dTextureArgData */
639 SVGA3D_TS_COLORARG2 = 4, /* SVGA3dTextureArgData */
640 SVGA3D_TS_ALPHAOP = 5, /* SVGA3dTextureCombiner */
641 SVGA3D_TS_ALPHAARG1 = 6, /* SVGA3dTextureArgData */
642 SVGA3D_TS_ALPHAARG2 = 7, /* SVGA3dTextureArgData */
643 SVGA3D_TS_ADDRESSU = 8, /* SVGA3dTextureAddress */
644 SVGA3D_TS_ADDRESSV = 9, /* SVGA3dTextureAddress */
645 SVGA3D_TS_MIPFILTER = 10, /* SVGA3dTextureFilter */
646 SVGA3D_TS_MAGFILTER = 11, /* SVGA3dTextureFilter */
647 SVGA3D_TS_MINFILTER = 12, /* SVGA3dTextureFilter */
648 SVGA3D_TS_BORDERCOLOR = 13, /* SVGA3dColor */
649 SVGA3D_TS_TEXCOORDINDEX = 14, /* uint32 */
650 SVGA3D_TS_TEXTURETRANSFORMFLAGS = 15, /* SVGA3dTexTransformFlags */
651 SVGA3D_TS_TEXCOORDGEN = 16, /* SVGA3dTextureCoordGen */
652 SVGA3D_TS_BUMPENVMAT00 = 17, /* float */
653 SVGA3D_TS_BUMPENVMAT01 = 18, /* float */
654 SVGA3D_TS_BUMPENVMAT10 = 19, /* float */
655 SVGA3D_TS_BUMPENVMAT11 = 20, /* float */
656 SVGA3D_TS_TEXTURE_MIPMAP_LEVEL = 21, /* uint32 */
657 SVGA3D_TS_TEXTURE_LOD_BIAS = 22, /* float */
658 SVGA3D_TS_TEXTURE_ANISOTROPIC_LEVEL = 23, /* uint32 */
659 SVGA3D_TS_ADDRESSW = 24, /* SVGA3dTextureAddress */
660
661
662 /*
663 * Sampler Gamma Level
664 *
665 * Sampler gamma effects the color of samples taken from the sampler. A
666 * value of 1.0 will produce linear samples. If the value is <= 0.0 the
667 * gamma value is ignored and a linear space is used.
668 */
669
670 SVGA3D_TS_GAMMA = 25, /* float */
671 SVGA3D_TS_BUMPENVLSCALE = 26, /* float */
672 SVGA3D_TS_BUMPENVLOFFSET = 27, /* float */
673 SVGA3D_TS_COLORARG0 = 28, /* SVGA3dTextureArgData */
674 SVGA3D_TS_ALPHAARG0 = 29, /* SVGA3dTextureArgData */
675 SVGA3D_TS_MAX
676} SVGA3dTextureStateName;
677
678typedef enum {
679 SVGA3D_TC_INVALID = 0,
680 SVGA3D_TC_DISABLE = 1,
681 SVGA3D_TC_SELECTARG1 = 2,
682 SVGA3D_TC_SELECTARG2 = 3,
683 SVGA3D_TC_MODULATE = 4,
684 SVGA3D_TC_ADD = 5,
685 SVGA3D_TC_ADDSIGNED = 6,
686 SVGA3D_TC_SUBTRACT = 7,
687 SVGA3D_TC_BLENDTEXTUREALPHA = 8,
688 SVGA3D_TC_BLENDDIFFUSEALPHA = 9,
689 SVGA3D_TC_BLENDCURRENTALPHA = 10,
690 SVGA3D_TC_BLENDFACTORALPHA = 11,
691 SVGA3D_TC_MODULATE2X = 12,
692 SVGA3D_TC_MODULATE4X = 13,
693 SVGA3D_TC_DSDT = 14,
694 SVGA3D_TC_DOTPRODUCT3 = 15,
695 SVGA3D_TC_BLENDTEXTUREALPHAPM = 16,
696 SVGA3D_TC_ADDSIGNED2X = 17,
697 SVGA3D_TC_ADDSMOOTH = 18,
698 SVGA3D_TC_PREMODULATE = 19,
699 SVGA3D_TC_MODULATEALPHA_ADDCOLOR = 20,
700 SVGA3D_TC_MODULATECOLOR_ADDALPHA = 21,
701 SVGA3D_TC_MODULATEINVALPHA_ADDCOLOR = 22,
702 SVGA3D_TC_MODULATEINVCOLOR_ADDALPHA = 23,
703 SVGA3D_TC_BUMPENVMAPLUMINANCE = 24,
704 SVGA3D_TC_MULTIPLYADD = 25,
705 SVGA3D_TC_LERP = 26,
706 SVGA3D_TC_MAX
707} SVGA3dTextureCombiner;
708
709#define SVGA3D_TC_CAP_BIT(svga3d_tc_op) (svga3d_tc_op ? (1 << (svga3d_tc_op - 1)) : 0)
710
711typedef enum {
712 SVGA3D_TEX_ADDRESS_INVALID = 0,
713 SVGA3D_TEX_ADDRESS_WRAP = 1,
714 SVGA3D_TEX_ADDRESS_MIRROR = 2,
715 SVGA3D_TEX_ADDRESS_CLAMP = 3,
716 SVGA3D_TEX_ADDRESS_BORDER = 4,
717 SVGA3D_TEX_ADDRESS_MIRRORONCE = 5,
718 SVGA3D_TEX_ADDRESS_EDGE = 6,
719 SVGA3D_TEX_ADDRESS_MAX
720} SVGA3dTextureAddress;
721
722/*
723 * SVGA3D_TEX_FILTER_NONE as the minification filter means mipmapping is
724 * disabled, and the rasterizer should use the magnification filter instead.
725 */
726typedef enum {
727 SVGA3D_TEX_FILTER_NONE = 0,
728 SVGA3D_TEX_FILTER_NEAREST = 1,
729 SVGA3D_TEX_FILTER_LINEAR = 2,
730 SVGA3D_TEX_FILTER_ANISOTROPIC = 3,
731 SVGA3D_TEX_FILTER_FLATCUBIC = 4, // Deprecated, not implemented
732 SVGA3D_TEX_FILTER_GAUSSIANCUBIC = 5, // Deprecated, not implemented
733 SVGA3D_TEX_FILTER_PYRAMIDALQUAD = 6, // Not currently implemented
734 SVGA3D_TEX_FILTER_GAUSSIANQUAD = 7, // Not currently implemented
735 SVGA3D_TEX_FILTER_MAX
736} SVGA3dTextureFilter;
737
738typedef enum {
739 SVGA3D_TEX_TRANSFORM_OFF = 0,
740 SVGA3D_TEX_TRANSFORM_S = (1 << 0),
741 SVGA3D_TEX_TRANSFORM_T = (1 << 1),
742 SVGA3D_TEX_TRANSFORM_R = (1 << 2),
743 SVGA3D_TEX_TRANSFORM_Q = (1 << 3),
744 SVGA3D_TEX_PROJECTED = (1 << 15),
745} SVGA3dTexTransformFlags;
746
747typedef enum {
748 SVGA3D_TEXCOORD_GEN_OFF = 0,
749 SVGA3D_TEXCOORD_GEN_EYE_POSITION = 1,
750 SVGA3D_TEXCOORD_GEN_EYE_NORMAL = 2,
751 SVGA3D_TEXCOORD_GEN_REFLECTIONVECTOR = 3,
752 SVGA3D_TEXCOORD_GEN_SPHERE = 4,
753 SVGA3D_TEXCOORD_GEN_MAX
754} SVGA3dTextureCoordGen;
755
756/*
757 * Texture argument constants for texture combiner
758 */
759typedef enum {
760 SVGA3D_TA_INVALID = 0,
761 SVGA3D_TA_CONSTANT = 1,
762 SVGA3D_TA_PREVIOUS = 2,
763 SVGA3D_TA_DIFFUSE = 3,
764 SVGA3D_TA_TEXTURE = 4,
765 SVGA3D_TA_SPECULAR = 5,
766 SVGA3D_TA_MAX
767} SVGA3dTextureArgData;
768
769#define SVGA3D_TM_MASK_LEN 4
770
771/* Modifiers for texture argument constants defined above. */
772typedef enum {
773 SVGA3D_TM_NONE = 0,
774 SVGA3D_TM_ALPHA = (1 << SVGA3D_TM_MASK_LEN),
775 SVGA3D_TM_ONE_MINUS = (2 << SVGA3D_TM_MASK_LEN),
776} SVGA3dTextureArgModifier;
777
778#define SVGA3D_INVALID_ID ((uint32)-1)
779#define SVGA3D_MAX_CLIP_PLANES 6
780
781/*
782 * This is the limit to the number of fixed-function texture
783 * transforms and texture coordinates we can support. It does *not*
784 * correspond to the number of texture image units (samplers) we
785 * support!
786 */
787#define SVGA3D_MAX_TEXTURE_COORDS 8
788
789/*
790 * Vertex declarations
791 *
792 * Notes:
793 *
794 * SVGA3D_DECLUSAGE_POSITIONT is for pre-transformed vertices. If you
795 * draw with any POSITIONT vertex arrays, the programmable vertex
796 * pipeline will be implicitly disabled. Drawing will take place as if
797 * no vertex shader was bound.
798 */
799
800typedef enum {
801 SVGA3D_DECLUSAGE_POSITION = 0,
802 SVGA3D_DECLUSAGE_BLENDWEIGHT, // 1
803 SVGA3D_DECLUSAGE_BLENDINDICES, // 2
804 SVGA3D_DECLUSAGE_NORMAL, // 3
805 SVGA3D_DECLUSAGE_PSIZE, // 4
806 SVGA3D_DECLUSAGE_TEXCOORD, // 5
807 SVGA3D_DECLUSAGE_TANGENT, // 6
808 SVGA3D_DECLUSAGE_BINORMAL, // 7
809 SVGA3D_DECLUSAGE_TESSFACTOR, // 8
810 SVGA3D_DECLUSAGE_POSITIONT, // 9
811 SVGA3D_DECLUSAGE_COLOR, // 10
812 SVGA3D_DECLUSAGE_FOG, // 11
813 SVGA3D_DECLUSAGE_DEPTH, // 12
814 SVGA3D_DECLUSAGE_SAMPLE, // 13
815 SVGA3D_DECLUSAGE_MAX
816} SVGA3dDeclUsage;
817
818typedef enum {
819 SVGA3D_DECLMETHOD_DEFAULT = 0,
820 SVGA3D_DECLMETHOD_PARTIALU,
821 SVGA3D_DECLMETHOD_PARTIALV,
822 SVGA3D_DECLMETHOD_CROSSUV, // Normal
823 SVGA3D_DECLMETHOD_UV,
824 SVGA3D_DECLMETHOD_LOOKUP, // Lookup a displacement map
825 SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
826} SVGA3dDeclMethod;
827
828typedef enum {
829 SVGA3D_DECLTYPE_FLOAT1 = 0,
830 SVGA3D_DECLTYPE_FLOAT2 = 1,
831 SVGA3D_DECLTYPE_FLOAT3 = 2,
832 SVGA3D_DECLTYPE_FLOAT4 = 3,
833 SVGA3D_DECLTYPE_D3DCOLOR = 4,
834 SVGA3D_DECLTYPE_UBYTE4 = 5,
835 SVGA3D_DECLTYPE_SHORT2 = 6,
836 SVGA3D_DECLTYPE_SHORT4 = 7,
837 SVGA3D_DECLTYPE_UBYTE4N = 8,
838 SVGA3D_DECLTYPE_SHORT2N = 9,
839 SVGA3D_DECLTYPE_SHORT4N = 10,
840 SVGA3D_DECLTYPE_USHORT2N = 11,
841 SVGA3D_DECLTYPE_USHORT4N = 12,
842 SVGA3D_DECLTYPE_UDEC3 = 13,
843 SVGA3D_DECLTYPE_DEC3N = 14,
844 SVGA3D_DECLTYPE_FLOAT16_2 = 15,
845 SVGA3D_DECLTYPE_FLOAT16_4 = 16,
846 SVGA3D_DECLTYPE_MAX,
847} SVGA3dDeclType;
848
849/*
850 * This structure is used for the divisor for geometry instancing;
851 * it's a direct translation of the Direct3D equivalent.
852 */
853typedef union {
854 struct {
855 /*
856 * For index data, this number represents the number of instances to draw.
857 * For instance data, this number represents the number of
858 * instances/vertex in this stream
859 */
860 uint32 count : 30;
861
862 /*
863 * This is 1 if this is supposed to be the data that is repeated for
864 * every instance.
865 */
866 uint32 indexedData : 1;
867
868 /*
869 * This is 1 if this is supposed to be the per-instance data.
870 */
871 uint32 instanceData : 1;
872 };
873
874 uint32 value;
875} SVGA3dVertexDivisor;
876
877typedef enum {
878 SVGA3D_PRIMITIVE_INVALID = 0,
879 SVGA3D_PRIMITIVE_TRIANGLELIST = 1,
880 SVGA3D_PRIMITIVE_POINTLIST = 2,
881 SVGA3D_PRIMITIVE_LINELIST = 3,
882 SVGA3D_PRIMITIVE_LINESTRIP = 4,
883 SVGA3D_PRIMITIVE_TRIANGLESTRIP = 5,
884 SVGA3D_PRIMITIVE_TRIANGLEFAN = 6,
885 SVGA3D_PRIMITIVE_MAX
886} SVGA3dPrimitiveType;
887
888typedef enum {
889 SVGA3D_COORDINATE_INVALID = 0,
890 SVGA3D_COORDINATE_LEFTHANDED = 1,
891 SVGA3D_COORDINATE_RIGHTHANDED = 2,
892 SVGA3D_COORDINATE_MAX
893} SVGA3dCoordinateType;
894
895typedef enum {
896 SVGA3D_TRANSFORM_INVALID = 0,
897 SVGA3D_TRANSFORM_WORLD = 1,
898 SVGA3D_TRANSFORM_VIEW = 2,
899 SVGA3D_TRANSFORM_PROJECTION = 3,
900 SVGA3D_TRANSFORM_TEXTURE0 = 4,
901 SVGA3D_TRANSFORM_TEXTURE1 = 5,
902 SVGA3D_TRANSFORM_TEXTURE2 = 6,
903 SVGA3D_TRANSFORM_TEXTURE3 = 7,
904 SVGA3D_TRANSFORM_TEXTURE4 = 8,
905 SVGA3D_TRANSFORM_TEXTURE5 = 9,
906 SVGA3D_TRANSFORM_TEXTURE6 = 10,
907 SVGA3D_TRANSFORM_TEXTURE7 = 11,
908 SVGA3D_TRANSFORM_WORLD1 = 12,
909 SVGA3D_TRANSFORM_WORLD2 = 13,
910 SVGA3D_TRANSFORM_WORLD3 = 14,
911 SVGA3D_TRANSFORM_MAX
912} SVGA3dTransformType;
913
914typedef enum {
915 SVGA3D_LIGHTTYPE_INVALID = 0,
916 SVGA3D_LIGHTTYPE_POINT = 1,
917 SVGA3D_LIGHTTYPE_SPOT1 = 2, /* 1-cone, in degrees */
918 SVGA3D_LIGHTTYPE_SPOT2 = 3, /* 2-cone, in radians */
919 SVGA3D_LIGHTTYPE_DIRECTIONAL = 4,
920 SVGA3D_LIGHTTYPE_MAX
921} SVGA3dLightType;
922
923typedef enum {
924 SVGA3D_CUBEFACE_POSX = 0,
925 SVGA3D_CUBEFACE_NEGX = 1,
926 SVGA3D_CUBEFACE_POSY = 2,
927 SVGA3D_CUBEFACE_NEGY = 3,
928 SVGA3D_CUBEFACE_POSZ = 4,
929 SVGA3D_CUBEFACE_NEGZ = 5,
930} SVGA3dCubeFace;
931
932typedef enum {
933 SVGA3D_SHADERTYPE_COMPILED_DX8 = 0,
934 SVGA3D_SHADERTYPE_VS = 1,
935 SVGA3D_SHADERTYPE_PS = 2,
936 SVGA3D_SHADERTYPE_MAX
937} SVGA3dShaderType;
938
939typedef enum {
940 SVGA3D_CONST_TYPE_FLOAT = 0,
941 SVGA3D_CONST_TYPE_INT = 1,
942 SVGA3D_CONST_TYPE_BOOL = 2,
943} SVGA3dShaderConstType;
944
945#define SVGA3D_MAX_SURFACE_FACES 6
946
947typedef enum {
948 SVGA3D_STRETCH_BLT_POINT = 0,
949 SVGA3D_STRETCH_BLT_LINEAR = 1,
950 SVGA3D_STRETCH_BLT_MAX
951} SVGA3dStretchBltMode;
952
953typedef enum {
954 SVGA3D_QUERYTYPE_OCCLUSION = 0,
955 SVGA3D_QUERYTYPE_MAX
956} SVGA3dQueryType;
957
958typedef enum {
959 SVGA3D_QUERYSTATE_PENDING = 0, /* Waiting on the host (set by guest) */
960 SVGA3D_QUERYSTATE_SUCCEEDED = 1, /* Completed successfully (set by host) */
961 SVGA3D_QUERYSTATE_FAILED = 2, /* Completed unsuccessfully (set by host) */
962 SVGA3D_QUERYSTATE_NEW = 3, /* Never submitted (For guest use only) */
963} SVGA3dQueryState;
964
965typedef enum {
966 SVGA3D_WRITE_HOST_VRAM = 1,
967 SVGA3D_READ_HOST_VRAM = 2,
968} SVGA3dTransferType;
969
970/*
971 * The maximum number vertex arrays we're guaranteed to support in
972 * SVGA_3D_CMD_DRAWPRIMITIVES.
973 */
974#define SVGA3D_MAX_VERTEX_ARRAYS 32
975
976/*
977 * Identifiers for commands in the command FIFO.
978 *
979 * IDs between 1000 and 1039 (inclusive) were used by obsolete versions of
980 * the SVGA3D protocol and remain reserved; they should not be used in the
981 * future.
982 *
983 * IDs between 1040 and 1999 (inclusive) are available for use by the
984 * current SVGA3D protocol.
985 *
986 * FIFO clients other than SVGA3D should stay below 1000, or at 2000
987 * and up.
988 */
989
990#define SVGA_3D_CMD_LEGACY_BASE 1000
991#define SVGA_3D_CMD_BASE 1040
992
993#define SVGA_3D_CMD_SURFACE_DEFINE SVGA_3D_CMD_BASE + 0
994#define SVGA_3D_CMD_SURFACE_DESTROY SVGA_3D_CMD_BASE + 1
995#define SVGA_3D_CMD_SURFACE_COPY SVGA_3D_CMD_BASE + 2
996#define SVGA_3D_CMD_SURFACE_STRETCHBLT SVGA_3D_CMD_BASE + 3
997#define SVGA_3D_CMD_SURFACE_DMA SVGA_3D_CMD_BASE + 4
998#define SVGA_3D_CMD_CONTEXT_DEFINE SVGA_3D_CMD_BASE + 5
999#define SVGA_3D_CMD_CONTEXT_DESTROY SVGA_3D_CMD_BASE + 6
1000#define SVGA_3D_CMD_SETTRANSFORM SVGA_3D_CMD_BASE + 7
1001#define SVGA_3D_CMD_SETZRANGE SVGA_3D_CMD_BASE + 8
1002#define SVGA_3D_CMD_SETRENDERSTATE SVGA_3D_CMD_BASE + 9
1003#define SVGA_3D_CMD_SETRENDERTARGET SVGA_3D_CMD_BASE + 10
1004#define SVGA_3D_CMD_SETTEXTURESTATE SVGA_3D_CMD_BASE + 11
1005#define SVGA_3D_CMD_SETMATERIAL SVGA_3D_CMD_BASE + 12
1006#define SVGA_3D_CMD_SETLIGHTDATA SVGA_3D_CMD_BASE + 13
1007#define SVGA_3D_CMD_SETLIGHTENABLED SVGA_3D_CMD_BASE + 14
1008#define SVGA_3D_CMD_SETVIEWPORT SVGA_3D_CMD_BASE + 15
1009#define SVGA_3D_CMD_SETCLIPPLANE SVGA_3D_CMD_BASE + 16
1010#define SVGA_3D_CMD_CLEAR SVGA_3D_CMD_BASE + 17
1011#define SVGA_3D_CMD_PRESENT SVGA_3D_CMD_BASE + 18 // Deprecated
1012#define SVGA_3D_CMD_SHADER_DEFINE SVGA_3D_CMD_BASE + 19
1013#define SVGA_3D_CMD_SHADER_DESTROY SVGA_3D_CMD_BASE + 20
1014#define SVGA_3D_CMD_SET_SHADER SVGA_3D_CMD_BASE + 21
1015#define SVGA_3D_CMD_SET_SHADER_CONST SVGA_3D_CMD_BASE + 22
1016#define SVGA_3D_CMD_DRAW_PRIMITIVES SVGA_3D_CMD_BASE + 23
1017#define SVGA_3D_CMD_SETSCISSORRECT SVGA_3D_CMD_BASE + 24
1018#define SVGA_3D_CMD_BEGIN_QUERY SVGA_3D_CMD_BASE + 25
1019#define SVGA_3D_CMD_END_QUERY SVGA_3D_CMD_BASE + 26
1020#define SVGA_3D_CMD_WAIT_FOR_QUERY SVGA_3D_CMD_BASE + 27
1021#define SVGA_3D_CMD_PRESENT_READBACK SVGA_3D_CMD_BASE + 28 // Deprecated
1022#define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
1023#define SVGA_3D_CMD_MAX SVGA_3D_CMD_BASE + 30
1024
1025#define SVGA_3D_CMD_FUTURE_MAX 2000
1026
1027/*
1028 * Common substructures used in multiple FIFO commands:
1029 */
1030
1031typedef struct {
1032 union {
1033 struct {
1034 uint16 function; // SVGA3dFogFunction
1035 uint8 type; // SVGA3dFogType
1036 uint8 base; // SVGA3dFogBase
1037 };
1038 uint32 uintValue;
1039 };
1040} SVGA3dFogMode;
1041
1042/*
1043 * Uniquely identify one image (a 1D/2D/3D array) from a surface. This
1044 * is a surface ID as well as face/mipmap indices.
1045 */
1046
1047typedef
1048struct SVGA3dSurfaceImageId {
1049 uint32 sid;
1050 uint32 face;
1051 uint32 mipmap;
1052} SVGA3dSurfaceImageId;
1053
1054typedef
1055struct SVGA3dGuestImage {
1056 SVGAGuestPtr ptr;
1057
1058 /*
1059 * A note on interpretation of pitch: This value of pitch is the
1060 * number of bytes between vertically adjacent image
1061 * blocks. Normally this is the number of bytes between the first
1062 * pixel of two adjacent scanlines. With compressed textures,
1063 * however, this may represent the number of bytes between
1064 * compression blocks rather than between rows of pixels.
1065 *
1066 * XXX: Compressed textures currently must be tightly packed in guest memory.
1067 *
1068 * If the image is 1-dimensional, pitch is ignored.
1069 *
1070 * If 'pitch' is zero, the SVGA3D device calculates a pitch value
1071 * assuming each row of blocks is tightly packed.
1072 */
1073 uint32 pitch;
1074} SVGA3dGuestImage;
1075
1076
1077/*
1078 * FIFO command format definitions:
1079 */
1080
1081/*
1082 * The data size header following cmdNum for every 3d command
1083 */
1084typedef
1085struct {
1086 uint32 id;
1087 uint32 size;
1088} SVGA3dCmdHeader;
1089
1090/*
1091 * A surface is a hierarchy of host VRAM surfaces: 1D, 2D, or 3D, with
1092 * optional mipmaps and cube faces.
1093 */
1094
1095typedef
1096struct {
1097 uint32 width;
1098 uint32 height;
1099 uint32 depth;
1100} SVGA3dSize;
1101
1102typedef enum {
1103 SVGA3D_SURFACE_CUBEMAP = (1 << 0),
1104 SVGA3D_SURFACE_HINT_STATIC = (1 << 1),
1105 SVGA3D_SURFACE_HINT_DYNAMIC = (1 << 2),
1106 SVGA3D_SURFACE_HINT_INDEXBUFFER = (1 << 3),
1107 SVGA3D_SURFACE_HINT_VERTEXBUFFER = (1 << 4),
1108 SVGA3D_SURFACE_HINT_TEXTURE = (1 << 5),
1109 SVGA3D_SURFACE_HINT_RENDERTARGET = (1 << 6),
1110 SVGA3D_SURFACE_HINT_DEPTHSTENCIL = (1 << 7),
1111 SVGA3D_SURFACE_HINT_WRITEONLY = (1 << 8),
1112} SVGA3dSurfaceFlags;
1113
1114typedef
1115struct {
1116 uint32 numMipLevels;
1117} SVGA3dSurfaceFace;
1118
1119typedef
1120struct {
1121 uint32 sid;
1122 SVGA3dSurfaceFlags surfaceFlags;
1123 SVGA3dSurfaceFormat format;
1124 SVGA3dSurfaceFace face[SVGA3D_MAX_SURFACE_FACES];
1125 /*
1126 * Followed by an SVGA3dSize structure for each mip level in each face.
1127 *
1128 * A note on surface sizes: Sizes are always specified in pixels,
1129 * even if the true surface size is not a multiple of the minimum
1130 * block size of the surface's format. For example, a 3x3x1 DXT1
1131 * compressed texture would actually be stored as a 4x4x1 image in
1132 * memory.
1133 */
1134} SVGA3dCmdDefineSurface; /* SVGA_3D_CMD_SURFACE_DEFINE */
1135
1136typedef
1137struct {
1138 uint32 sid;
1139} SVGA3dCmdDestroySurface; /* SVGA_3D_CMD_SURFACE_DESTROY */
1140
1141typedef
1142struct {
1143 uint32 cid;
1144} SVGA3dCmdDefineContext; /* SVGA_3D_CMD_CONTEXT_DEFINE */
1145
1146typedef
1147struct {
1148 uint32 cid;
1149} SVGA3dCmdDestroyContext; /* SVGA_3D_CMD_CONTEXT_DESTROY */
1150
1151typedef
1152struct {
1153 uint32 cid;
1154 SVGA3dClearFlag clearFlag;
1155 uint32 color;
1156 float depth;
1157 uint32 stencil;
1158 /* Followed by variable number of SVGA3dRect structures */
1159} SVGA3dCmdClear; /* SVGA_3D_CMD_CLEAR */
1160
1161typedef
1162struct SVGA3dCopyRect {
1163 uint32 x;
1164 uint32 y;
1165 uint32 w;
1166 uint32 h;
1167 uint32 srcx;
1168 uint32 srcy;
1169} SVGA3dCopyRect;
1170
1171typedef
1172struct SVGA3dCopyBox {
1173 uint32 x;
1174 uint32 y;
1175 uint32 z;
1176 uint32 w;
1177 uint32 h;
1178 uint32 d;
1179 uint32 srcx;
1180 uint32 srcy;
1181 uint32 srcz;
1182} SVGA3dCopyBox;
1183
1184typedef
1185struct {
1186 uint32 x;
1187 uint32 y;
1188 uint32 w;
1189 uint32 h;
1190} SVGA3dRect;
1191
1192typedef
1193struct {
1194 uint32 x;
1195 uint32 y;
1196 uint32 z;
1197 uint32 w;
1198 uint32 h;
1199 uint32 d;
1200} SVGA3dBox;
1201
1202typedef
1203struct {
1204 uint32 x;
1205 uint32 y;
1206 uint32 z;
1207} SVGA3dPoint;
1208
1209typedef
1210struct {
1211 SVGA3dLightType type;
1212 SVGA3dBool inWorldSpace;
1213 float diffuse[4];
1214 float specular[4];
1215 float ambient[4];
1216 float position[4];
1217 float direction[4];
1218 float range;
1219 float falloff;
1220 float attenuation0;
1221 float attenuation1;
1222 float attenuation2;
1223 float theta;
1224 float phi;
1225} SVGA3dLightData;
1226
1227typedef
1228struct {
1229 uint32 sid;
1230 /* Followed by variable number of SVGA3dCopyRect structures */
1231} SVGA3dCmdPresent; /* SVGA_3D_CMD_PRESENT */
1232
1233typedef
1234struct {
1235 SVGA3dRenderStateName state;
1236 union {
1237 uint32 uintValue;
1238 float floatValue;
1239 };
1240} SVGA3dRenderState;
1241
1242typedef
1243struct {
1244 uint32 cid;
1245 /* Followed by variable number of SVGA3dRenderState structures */
1246} SVGA3dCmdSetRenderState; /* SVGA_3D_CMD_SETRENDERSTATE */
1247
1248typedef
1249struct {
1250 uint32 cid;
1251 SVGA3dRenderTargetType type;
1252 SVGA3dSurfaceImageId target;
1253} SVGA3dCmdSetRenderTarget; /* SVGA_3D_CMD_SETRENDERTARGET */
1254
1255typedef
1256struct {
1257 SVGA3dSurfaceImageId src;
1258 SVGA3dSurfaceImageId dest;
1259 /* Followed by variable number of SVGA3dCopyBox structures */
1260} SVGA3dCmdSurfaceCopy; /* SVGA_3D_CMD_SURFACE_COPY */
1261
1262typedef
1263struct {
1264 SVGA3dSurfaceImageId src;
1265 SVGA3dSurfaceImageId dest;
1266 SVGA3dBox boxSrc;
1267 SVGA3dBox boxDest;
1268 SVGA3dStretchBltMode mode;
1269} SVGA3dCmdSurfaceStretchBlt; /* SVGA_3D_CMD_SURFACE_STRETCHBLT */
1270
1271typedef
1272struct {
1273 /*
1274 * If the discard flag is present in a surface DMA operation, the host may
1275 * discard the contents of the current mipmap level and face of the target
1276 * surface before applying the surface DMA contents.
1277 */
1278 uint32 discard : 1;
1279
1280 /*
1281 * If the unsynchronized flag is present, the host may perform this upload
1282 * without syncing to pending reads on this surface.
1283 */
1284 uint32 unsynchronized : 1;
1285
1286 /*
1287 * Guests *MUST* set the reserved bits to 0 before submitting the command
1288 * suffix as future flags may occupy these bits.
1289 */
1290 uint32 reserved : 30;
1291} SVGA3dSurfaceDMAFlags;
1292
1293typedef
1294struct {
1295 SVGA3dGuestImage guest;
1296 SVGA3dSurfaceImageId host;
1297 SVGA3dTransferType transfer;
1298 /*
1299 * Followed by variable number of SVGA3dCopyBox structures. For consistency
1300 * in all clipping logic and coordinate translation, we define the
1301 * "source" in each copyBox as the guest image and the
1302 * "destination" as the host image, regardless of transfer
1303 * direction.
1304 *
1305 * For efficiency, the SVGA3D device is free to copy more data than
1306 * specified. For example, it may round copy boxes outwards such
1307 * that they lie on particular alignment boundaries.
1308 */
1309} SVGA3dCmdSurfaceDMA; /* SVGA_3D_CMD_SURFACE_DMA */
1310
1311/*
1312 * SVGA3dCmdSurfaceDMASuffix --
1313 *
1314 * This is a command suffix that will appear after a SurfaceDMA command in
1315 * the FIFO. It contains some extra information that hosts may use to
1316 * optimize performance or protect the guest. This suffix exists to preserve
1317 * backwards compatibility while also allowing for new functionality to be
1318 * implemented.
1319 */
1320
1321typedef
1322struct {
1323 uint32 suffixSize;
1324
1325 /*
1326 * The maximum offset is used to determine the maximum offset from the
1327 * guestPtr base address that will be accessed or written to during this
1328 * surfaceDMA. If the suffix is supported, the host will respect this
1329 * boundary while performing surface DMAs.
1330 *
1331 * Defaults to MAX_UINT32
1332 */
1333 uint32 maximumOffset;
1334
1335 /*
1336 * A set of flags that describes optimizations that the host may perform
1337 * while performing this surface DMA operation. The guest should never rely
1338 * on behaviour that is different when these flags are set for correctness.
1339 *
1340 * Defaults to 0
1341 */
1342 SVGA3dSurfaceDMAFlags flags;
1343} SVGA3dCmdSurfaceDMASuffix;
1344
1345/*
1346 * SVGA_3D_CMD_DRAW_PRIMITIVES --
1347 *
1348 * This command is the SVGA3D device's generic drawing entry point.
1349 * It can draw multiple ranges of primitives, optionally using an
1350 * index buffer, using an arbitrary collection of vertex buffers.
1351 *
1352 * Each SVGA3dVertexDecl defines a distinct vertex array to bind
1353 * during this draw call. The declarations specify which surface
1354 * the vertex data lives in, what that vertex data is used for,
1355 * and how to interpret it.
1356 *
1357 * Each SVGA3dPrimitiveRange defines a collection of primitives
1358 * to render using the same vertex arrays. An index buffer is
1359 * optional.
1360 */
1361
1362typedef
1363struct {
1364 /*
1365 * A range hint is an optional specification for the range of indices
1366 * in an SVGA3dArray that will be used. If 'last' is zero, it is assumed
1367 * that the entire array will be used.
1368 *
1369 * These are only hints. The SVGA3D device may use them for
1370 * performance optimization if possible, but it's also allowed to
1371 * ignore these values.
1372 */
1373 uint32 first;
1374 uint32 last;
1375} SVGA3dArrayRangeHint;
1376
1377typedef
1378struct {
1379 /*
1380 * Define the origin and shape of a vertex or index array. Both
1381 * 'offset' and 'stride' are in bytes. The provided surface will be
1382 * reinterpreted as a flat array of bytes in the same format used
1383 * by surface DMA operations. To avoid unnecessary conversions, the
1384 * surface should be created with the SVGA3D_BUFFER format.
1385 *
1386 * Index 0 in the array starts 'offset' bytes into the surface.
1387 * Index 1 begins at byte 'offset + stride', etc. Array indices may
1388 * not be negative.
1389 */
1390 uint32 surfaceId;
1391 uint32 offset;
1392 uint32 stride;
1393} SVGA3dArray;
1394
1395typedef
1396struct {
1397 /*
1398 * Describe a vertex array's data type, and define how it is to be
1399 * used by the fixed function pipeline or the vertex shader. It
1400 * isn't useful to have two VertexDecls with the same
1401 * VertexArrayIdentity in one draw call.
1402 */
1403 SVGA3dDeclType type;
1404 SVGA3dDeclMethod method;
1405 SVGA3dDeclUsage usage;
1406 uint32 usageIndex;
1407} SVGA3dVertexArrayIdentity;
1408
1409typedef
1410struct {
1411 SVGA3dVertexArrayIdentity identity;
1412 SVGA3dArray array;
1413 SVGA3dArrayRangeHint rangeHint;
1414} SVGA3dVertexDecl;
1415
1416typedef
1417struct {
1418 /*
1419 * Define a group of primitives to render, from sequential indices.
1420 *
1421 * The value of 'primitiveType' and 'primitiveCount' imply the
1422 * total number of vertices that will be rendered.
1423 */
1424 SVGA3dPrimitiveType primType;
1425 uint32 primitiveCount;
1426
1427 /*
1428 * Optional index buffer. If indexArray.surfaceId is
1429 * SVGA3D_INVALID_ID, we render without an index buffer. Rendering
1430 * without an index buffer is identical to rendering with an index
1431 * buffer containing the sequence [0, 1, 2, 3, ...].
1432 *
1433 * If an index buffer is in use, indexWidth specifies the width in
1434 * bytes of each index value. It must be less than or equal to
1435 * indexArray.stride.
1436 *
1437 * (Currently, the SVGA3D device requires index buffers to be tightly
1438 * packed. In other words, indexWidth == indexArray.stride)
1439 */
1440 SVGA3dArray indexArray;
1441 uint32 indexWidth;
1442
1443 /*
1444 * Optional index bias. This number is added to all indices from
1445 * indexArray before they are used as vertex array indices. This
1446 * can be used in multiple ways:
1447 *
1448 * - When not using an indexArray, this bias can be used to
1449 * specify where in the vertex arrays to begin rendering.
1450 *
1451 * - A positive number here is equivalent to increasing the
1452 * offset in each vertex array.
1453 *
1454 * - A negative number can be used to render using a small
1455 * vertex array and an index buffer that contains large
1456 * values. This may be used by some applications that
1457 * crop a vertex buffer without modifying their index
1458 * buffer.
1459 *
1460 * Note that rendering with a negative bias value may be slower and
1461 * use more memory than rendering with a positive or zero bias.
1462 */
1463 int32 indexBias;
1464} SVGA3dPrimitiveRange;
1465
1466typedef
1467struct {
1468 uint32 cid;
1469 uint32 numVertexDecls;
1470 uint32 numRanges;
1471
1472 /*
1473 * There are two variable size arrays after the
1474 * SVGA3dCmdDrawPrimitives structure. In order,
1475 * they are:
1476 *
1477 * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
1478 * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
1479 * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
1480 * the frequency divisor for this the corresponding vertex decl)
1481 */
1482} SVGA3dCmdDrawPrimitives; /* SVGA_3D_CMD_DRAWPRIMITIVES */
1483
1484typedef
1485struct {
1486 uint32 stage;
1487 SVGA3dTextureStateName name;
1488 union {
1489 uint32 value;
1490 float floatValue;
1491 };
1492} SVGA3dTextureState;
1493
1494typedef
1495struct {
1496 uint32 cid;
1497 /* Followed by variable number of SVGA3dTextureState structures */
1498} SVGA3dCmdSetTextureState; /* SVGA_3D_CMD_SETTEXTURESTATE */
1499
1500typedef
1501struct {
1502 uint32 cid;
1503 SVGA3dTransformType type;
1504 float matrix[16];
1505} SVGA3dCmdSetTransform; /* SVGA_3D_CMD_SETTRANSFORM */
1506
1507typedef
1508struct {
1509 float min;
1510 float max;
1511} SVGA3dZRange;
1512
1513typedef
1514struct {
1515 uint32 cid;
1516 SVGA3dZRange zRange;
1517} SVGA3dCmdSetZRange; /* SVGA_3D_CMD_SETZRANGE */
1518
1519typedef
1520struct {
1521 float diffuse[4];
1522 float ambient[4];
1523 float specular[4];
1524 float emissive[4];
1525 float shininess;
1526} SVGA3dMaterial;
1527
1528typedef
1529struct {
1530 uint32 cid;
1531 SVGA3dFace face;
1532 SVGA3dMaterial material;
1533} SVGA3dCmdSetMaterial; /* SVGA_3D_CMD_SETMATERIAL */
1534
1535typedef
1536struct {
1537 uint32 cid;
1538 uint32 index;
1539 SVGA3dLightData data;
1540} SVGA3dCmdSetLightData; /* SVGA_3D_CMD_SETLIGHTDATA */
1541
1542typedef
1543struct {
1544 uint32 cid;
1545 uint32 index;
1546 uint32 enabled;
1547} SVGA3dCmdSetLightEnabled; /* SVGA_3D_CMD_SETLIGHTENABLED */
1548
1549typedef
1550struct {
1551 uint32 cid;
1552 SVGA3dRect rect;
1553} SVGA3dCmdSetViewport; /* SVGA_3D_CMD_SETVIEWPORT */
1554
1555typedef
1556struct {
1557 uint32 cid;
1558 SVGA3dRect rect;
1559} SVGA3dCmdSetScissorRect; /* SVGA_3D_CMD_SETSCISSORRECT */
1560
1561typedef
1562struct {
1563 uint32 cid;
1564 uint32 index;
1565 float plane[4];
1566} SVGA3dCmdSetClipPlane; /* SVGA_3D_CMD_SETCLIPPLANE */
1567
1568typedef
1569struct {
1570 uint32 cid;
1571 uint32 shid;
1572 SVGA3dShaderType type;
1573 /* Followed by variable number of DWORDs for shader bycode */
1574} SVGA3dCmdDefineShader; /* SVGA_3D_CMD_SHADER_DEFINE */
1575
1576typedef
1577struct {
1578 uint32 cid;
1579 uint32 shid;
1580 SVGA3dShaderType type;
1581} SVGA3dCmdDestroyShader; /* SVGA_3D_CMD_SHADER_DESTROY */
1582
1583typedef
1584struct {
1585 uint32 cid;
1586 uint32 reg; /* register number */
1587 SVGA3dShaderType type;
1588 SVGA3dShaderConstType ctype;
1589 uint32 values[4];
1590} SVGA3dCmdSetShaderConst; /* SVGA_3D_CMD_SET_SHADER_CONST */
1591
1592typedef
1593struct {
1594 uint32 cid;
1595 SVGA3dShaderType type;
1596 uint32 shid;
1597} SVGA3dCmdSetShader; /* SVGA_3D_CMD_SET_SHADER */
1598
1599typedef
1600struct {
1601 uint32 cid;
1602 SVGA3dQueryType type;
1603} SVGA3dCmdBeginQuery; /* SVGA_3D_CMD_BEGIN_QUERY */
1604
1605typedef
1606struct {
1607 uint32 cid;
1608 SVGA3dQueryType type;
1609 SVGAGuestPtr guestResult; /* Points to an SVGA3dQueryResult structure */
1610} SVGA3dCmdEndQuery; /* SVGA_3D_CMD_END_QUERY */
1611
1612typedef
1613struct {
1614 uint32 cid; /* Same parameters passed to END_QUERY */
1615 SVGA3dQueryType type;
1616 SVGAGuestPtr guestResult;
1617} SVGA3dCmdWaitForQuery; /* SVGA_3D_CMD_WAIT_FOR_QUERY */
1618
1619typedef
1620struct {
1621 uint32 totalSize; /* Set by guest before query is ended. */
1622 SVGA3dQueryState state; /* Set by host or guest. See SVGA3dQueryState. */
1623 union { /* Set by host on exit from PENDING state */
1624 uint32 result32;
1625 };
1626} SVGA3dQueryResult;
1627
1628/*
1629 * SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN --
1630 *
1631 * This is a blit from an SVGA3D surface to a Screen Object. Just
1632 * like GMR-to-screen blits, this blit may be directed at a
1633 * specific screen or to the virtual coordinate space.
1634 *
1635 * The blit copies from a rectangular region of an SVGA3D surface
1636 * image to a rectangular region of a screen or screens.
1637 *
1638 * This command takes an optional variable-length list of clipping
1639 * rectangles after the body of the command. If no rectangles are
1640 * specified, there is no clipping region. The entire destRect is
1641 * drawn to. If one or more rectangles are included, they describe
1642 * a clipping region. The clip rectangle coordinates are measured
1643 * relative to the top-left corner of destRect.
1644 *
1645 * This clipping region serves multiple purposes:
1646 *
1647 * - It can be used to perform an irregularly shaped blit more
1648 * efficiently than by issuing many separate blit commands.
1649 *
1650 * - It is equivalent to allowing blits with non-integer
1651 * source coordinates. You could blit just one half-pixel
1652 * of a source, for example, by specifying a larger
1653 * destination rectangle than you need, then removing
1654 * part of it using a clip rectangle.
1655 *
1656 * Availability:
1657 * SVGA_FIFO_CAP_SCREEN_OBJECT
1658 *
1659 * Limitations:
1660 *
1661 * - Currently, no backend supports blits from a mipmap or face
1662 * other than the first one.
1663 */
1664
1665typedef
1666struct {
1667 SVGA3dSurfaceImageId srcImage;
1668 SVGASignedRect srcRect;
1669 uint32 destScreenId; /* Screen ID or SVGA_ID_INVALID for virt. coords */
1670 SVGASignedRect destRect; /* Supports scaling if src/rest different size */
1671 /* Clipping: zero or more SVGASignedRects follow */
1672} SVGA3dCmdBlitSurfaceToScreen; /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
1673
1674
1675/*
1676 * Capability query index.
1677 *
1678 * Notes:
1679 *
1680 * 1. SVGA3D_DEVCAP_MAX_TEXTURES reflects the maximum number of
1681 * fixed-function texture units available. Each of these units
1682 * work in both FFP and Shader modes, and they support texture
1683 * transforms and texture coordinates. The host may have additional
1684 * texture image units that are only usable with shaders.
1685 *
1686 * 2. The BUFFER_FORMAT capabilities are deprecated, and they always
1687 * return TRUE. Even on physical hardware that does not support
1688 * these formats natively, the SVGA3D device will provide an emulation
1689 * which should be invisible to the guest OS.
1690 *
1691 * In general, the SVGA3D device should support any operation on
1692 * any surface format, it just may perform some of these
1693 * operations in software depending on the capabilities of the
1694 * available physical hardware.
1695 *
1696 * XXX: In the future, we will add capabilities that describe in
1697 * detail what formats are supported in hardware for what kinds
1698 * of operations.
1699 */
1700
1701typedef enum {
1702 SVGA3D_DEVCAP_3D = 0,
1703 SVGA3D_DEVCAP_MAX_LIGHTS = 1,
1704 SVGA3D_DEVCAP_MAX_TEXTURES = 2, /* See note (1) */
1705 SVGA3D_DEVCAP_MAX_CLIP_PLANES = 3,
1706 SVGA3D_DEVCAP_VERTEX_SHADER_VERSION = 4,
1707 SVGA3D_DEVCAP_VERTEX_SHADER = 5,
1708 SVGA3D_DEVCAP_FRAGMENT_SHADER_VERSION = 6,
1709 SVGA3D_DEVCAP_FRAGMENT_SHADER = 7,
1710 SVGA3D_DEVCAP_MAX_RENDER_TARGETS = 8,
1711 SVGA3D_DEVCAP_S23E8_TEXTURES = 9,
1712 SVGA3D_DEVCAP_S10E5_TEXTURES = 10,
1713 SVGA3D_DEVCAP_MAX_FIXED_VERTEXBLEND = 11,
1714 SVGA3D_DEVCAP_D16_BUFFER_FORMAT = 12, /* See note (2) */
1715 SVGA3D_DEVCAP_D24S8_BUFFER_FORMAT = 13, /* See note (2) */
1716 SVGA3D_DEVCAP_D24X8_BUFFER_FORMAT = 14, /* See note (2) */
1717 SVGA3D_DEVCAP_QUERY_TYPES = 15,
1718 SVGA3D_DEVCAP_TEXTURE_GRADIENT_SAMPLING = 16,
1719 SVGA3D_DEVCAP_MAX_POINT_SIZE = 17,
1720 SVGA3D_DEVCAP_MAX_SHADER_TEXTURES = 18,
1721 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH = 19,
1722 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT = 20,
1723 SVGA3D_DEVCAP_MAX_VOLUME_EXTENT = 21,
1724 SVGA3D_DEVCAP_MAX_TEXTURE_REPEAT = 22,
1725 SVGA3D_DEVCAP_MAX_TEXTURE_ASPECT_RATIO = 23,
1726 SVGA3D_DEVCAP_MAX_TEXTURE_ANISOTROPY = 24,
1727 SVGA3D_DEVCAP_MAX_PRIMITIVE_COUNT = 25,
1728 SVGA3D_DEVCAP_MAX_VERTEX_INDEX = 26,
1729 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_INSTRUCTIONS = 27,
1730 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_INSTRUCTIONS = 28,
1731 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEMPS = 29,
1732 SVGA3D_DEVCAP_MAX_FRAGMENT_SHADER_TEMPS = 30,
1733 SVGA3D_DEVCAP_TEXTURE_OPS = 31,
1734 SVGA3D_DEVCAP_SURFACEFMT_X8R8G8B8 = 32,
1735 SVGA3D_DEVCAP_SURFACEFMT_A8R8G8B8 = 33,
1736 SVGA3D_DEVCAP_SURFACEFMT_A2R10G10B10 = 34,
1737 SVGA3D_DEVCAP_SURFACEFMT_X1R5G5B5 = 35,
1738 SVGA3D_DEVCAP_SURFACEFMT_A1R5G5B5 = 36,
1739 SVGA3D_DEVCAP_SURFACEFMT_A4R4G4B4 = 37,
1740 SVGA3D_DEVCAP_SURFACEFMT_R5G6B5 = 38,
1741 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE16 = 39,
1742 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8_ALPHA8 = 40,
1743 SVGA3D_DEVCAP_SURFACEFMT_ALPHA8 = 41,
1744 SVGA3D_DEVCAP_SURFACEFMT_LUMINANCE8 = 42,
1745 SVGA3D_DEVCAP_SURFACEFMT_Z_D16 = 43,
1746 SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8 = 44,
1747 SVGA3D_DEVCAP_SURFACEFMT_Z_D24X8 = 45,
1748 SVGA3D_DEVCAP_SURFACEFMT_DXT1 = 46,
1749 SVGA3D_DEVCAP_SURFACEFMT_DXT2 = 47,
1750 SVGA3D_DEVCAP_SURFACEFMT_DXT3 = 48,
1751 SVGA3D_DEVCAP_SURFACEFMT_DXT4 = 49,
1752 SVGA3D_DEVCAP_SURFACEFMT_DXT5 = 50,
1753 SVGA3D_DEVCAP_SURFACEFMT_BUMPX8L8V8U8 = 51,
1754 SVGA3D_DEVCAP_SURFACEFMT_A2W10V10U10 = 52,
1755 SVGA3D_DEVCAP_SURFACEFMT_BUMPU8V8 = 53,
1756 SVGA3D_DEVCAP_SURFACEFMT_Q8W8V8U8 = 54,
1757 SVGA3D_DEVCAP_SURFACEFMT_CxV8U8 = 55,
1758 SVGA3D_DEVCAP_SURFACEFMT_R_S10E5 = 56,
1759 SVGA3D_DEVCAP_SURFACEFMT_R_S23E8 = 57,
1760 SVGA3D_DEVCAP_SURFACEFMT_RG_S10E5 = 58,
1761 SVGA3D_DEVCAP_SURFACEFMT_RG_S23E8 = 59,
1762 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S10E5 = 60,
1763 SVGA3D_DEVCAP_SURFACEFMT_ARGB_S23E8 = 61,
1764 SVGA3D_DEVCAP_MAX_VERTEX_SHADER_TEXTURES = 63,
1765
1766 /*
1767 * Note that MAX_SIMULTANEOUS_RENDER_TARGETS is a maximum count of color
1768 * render targets. This does no include the depth or stencil targets.
1769 */
1770 SVGA3D_DEVCAP_MAX_SIMULTANEOUS_RENDER_TARGETS = 64,
1771
1772 SVGA3D_DEVCAP_SURFACEFMT_V16U16 = 65,
1773 SVGA3D_DEVCAP_SURFACEFMT_G16R16 = 66,
1774 SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16 = 67,
1775 SVGA3D_DEVCAP_SURFACEFMT_UYVY = 68,
1776 SVGA3D_DEVCAP_SURFACEFMT_YUY2 = 69,
1777
1778 /*
1779 * Don't add new caps into the previous section; the values in this
1780 * enumeration must not change. You can put new values right before
1781 * SVGA3D_DEVCAP_MAX.
1782 */
1783 SVGA3D_DEVCAP_MAX /* This must be the last index. */
1784} SVGA3dDevCapIndex;
1785
1786typedef union {
1787 Bool b;
1788 uint32 u;
1789 int32 i;
1790 float f;
1791} SVGA3dDevCapResult;
1792
1793#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_escape.h b/drivers/gpu/drm/vmwgfx/svga_escape.h
new file mode 100644
index 000000000000..7b85e9b8c854
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_escape.h
@@ -0,0 +1,89 @@
1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga_escape.h --
28 *
29 * Definitions for our own (vendor-specific) SVGA Escape commands.
30 */
31
32#ifndef _SVGA_ESCAPE_H_
33#define _SVGA_ESCAPE_H_
34
35
36/*
37 * Namespace IDs for the escape command
38 */
39
40#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
41#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF
42
43
44/*
45 * Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
46 * the first DWORD of escape data (after the nsID and size). As a
47 * guideline we're using the high word and low word as a major and
48 * minor command number, respectively.
49 *
50 * Major command number allocation:
51 *
52 * 0000: Reserved
53 * 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
54 * 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
55 * 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
56 */
57
58#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000
59
60
61/*
62 * SVGA Hint commands.
63 *
64 * These escapes let the SVGA driver provide optional information to
65 * he host about the state of the guest or guest applications. The
66 * host can use these hints to make user interface or performance
67 * decisions.
68 *
69 * Notes:
70 *
71 * - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
72 * that use the SVGA Screen Object extension. Instead of sending
73 * this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
74 * Screen Object.
75 */
76
77#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
78#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 // Deprecated
79
80typedef
81struct {
82 uint32 command;
83 uint32 fullscreen;
84 struct {
85 int32 x, y;
86 } monitorPosition;
87} SVGAEscapeHintFullscreen;
88
89#endif /* _SVGA_ESCAPE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga_overlay.h b/drivers/gpu/drm/vmwgfx/svga_overlay.h
new file mode 100644
index 000000000000..f753d73c14b4
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_overlay.h
@@ -0,0 +1,201 @@
1/**********************************************************
2 * Copyright 2007-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga_overlay.h --
28 *
29 * Definitions for video-overlay support.
30 */
31
32#ifndef _SVGA_OVERLAY_H_
33#define _SVGA_OVERLAY_H_
34
35#include "svga_reg.h"
36
37/*
38 * Video formats we support
39 */
40
41#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
42#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
43#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
44
45typedef enum {
46 SVGA_OVERLAY_FORMAT_INVALID = 0,
47 SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
48 SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
49 SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
50} SVGAOverlayFormat;
51
52#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff
53
54#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000
55
56#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001
57 /* FIFO escape layout:
58 * Type, Stream Id, (Register Id, Value) pairs */
59
60#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002
61 /* FIFO escape layout:
62 * Type, Stream Id */
63
64typedef
65struct SVGAEscapeVideoSetRegs {
66 struct {
67 uint32 cmdType;
68 uint32 streamId;
69 } header;
70
71 // May include zero or more items.
72 struct {
73 uint32 registerId;
74 uint32 value;
75 } items[1];
76} SVGAEscapeVideoSetRegs;
77
78typedef
79struct SVGAEscapeVideoFlush {
80 uint32 cmdType;
81 uint32 streamId;
82} SVGAEscapeVideoFlush;
83
84
85/*
86 * Struct definitions for the video overlay commands built on
87 * SVGAFifoCmdEscape.
88 */
89typedef
90struct {
91 uint32 command;
92 uint32 overlay;
93} SVGAFifoEscapeCmdVideoBase;
94
95typedef
96struct {
97 SVGAFifoEscapeCmdVideoBase videoCmd;
98} SVGAFifoEscapeCmdVideoFlush;
99
100typedef
101struct {
102 SVGAFifoEscapeCmdVideoBase videoCmd;
103 struct {
104 uint32 regId;
105 uint32 value;
106 } items[1];
107} SVGAFifoEscapeCmdVideoSetRegs;
108
109typedef
110struct {
111 SVGAFifoEscapeCmdVideoBase videoCmd;
112 struct {
113 uint32 regId;
114 uint32 value;
115 } items[SVGA_VIDEO_NUM_REGS];
116} SVGAFifoEscapeCmdVideoSetAllRegs;
117
118
119/*
120 *----------------------------------------------------------------------
121 *
122 * VMwareVideoGetAttributes --
123 *
124 * Computes the size, pitches and offsets for YUV frames.
125 *
126 * Results:
127 * TRUE on success; otherwise FALSE on failure.
128 *
129 * Side effects:
130 * Pitches and offsets for the given YUV frame are put in 'pitches'
131 * and 'offsets' respectively. They are both optional though.
132 *
133 *----------------------------------------------------------------------
134 */
135
136static inline bool
137VMwareVideoGetAttributes(const SVGAOverlayFormat format, // IN
138 uint32 *width, // IN / OUT
139 uint32 *height, // IN / OUT
140 uint32 *size, // OUT
141 uint32 *pitches, // OUT (optional)
142 uint32 *offsets) // OUT (optional)
143{
144 int tmp;
145
146 *width = (*width + 1) & ~1;
147
148 if (offsets) {
149 offsets[0] = 0;
150 }
151
152 switch (format) {
153 case VMWARE_FOURCC_YV12:
154 *height = (*height + 1) & ~1;
155 *size = (*width + 3) & ~3;
156
157 if (pitches) {
158 pitches[0] = *size;
159 }
160
161 *size *= *height;
162
163 if (offsets) {
164 offsets[1] = *size;
165 }
166
167 tmp = ((*width >> 1) + 3) & ~3;
168
169 if (pitches) {
170 pitches[1] = pitches[2] = tmp;
171 }
172
173 tmp *= (*height >> 1);
174 *size += tmp;
175
176 if (offsets) {
177 offsets[2] = *size;
178 }
179
180 *size += tmp;
181 break;
182
183 case VMWARE_FOURCC_YUY2:
184 case VMWARE_FOURCC_UYVY:
185 *size = *width * 2;
186
187 if (pitches) {
188 pitches[0] = *size;
189 }
190
191 *size *= *height;
192 break;
193
194 default:
195 return false;
196 }
197
198 return true;
199}
200
201#endif // _SVGA_OVERLAY_H_
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
new file mode 100644
index 000000000000..1b96c2ec07dd
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -0,0 +1,1346 @@
1/**********************************************************
2 * Copyright 1998-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26/*
27 * svga_reg.h --
28 *
29 * Virtual hardware definitions for the VMware SVGA II device.
30 */
31
32#ifndef _SVGA_REG_H_
33#define _SVGA_REG_H_
34
35/*
36 * PCI device IDs.
37 */
38#define PCI_VENDOR_ID_VMWARE 0x15AD
39#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
40
41/*
42 * Legal values for the SVGA_REG_CURSOR_ON register in old-fashioned
43 * cursor bypass mode. This is still supported, but no new guest
44 * drivers should use it.
45 */
46#define SVGA_CURSOR_ON_HIDE 0x0 /* Must be 0 to maintain backward compatibility */
47#define SVGA_CURSOR_ON_SHOW 0x1 /* Must be 1 to maintain backward compatibility */
48#define SVGA_CURSOR_ON_REMOVE_FROM_FB 0x2 /* Remove the cursor from the framebuffer because we need to see what's under it */
49#define SVGA_CURSOR_ON_RESTORE_TO_FB 0x3 /* Put the cursor back in the framebuffer so the user can see it */
50
51/*
52 * The maximum framebuffer size that can traced for e.g. guests in VESA mode.
53 * The changeMap in the monitor is proportional to this number. Therefore, we'd
54 * like to keep it as small as possible to reduce monitor overhead (using
55 * SVGA_VRAM_MAX_SIZE for this increases the size of the shared area by over
56 * 4k!).
57 *
58 * NB: For compatibility reasons, this value must be greater than 0xff0000.
59 * See bug 335072.
60 */
61#define SVGA_FB_MAX_TRACEABLE_SIZE 0x1000000
62
63#define SVGA_MAX_PSEUDOCOLOR_DEPTH 8
64#define SVGA_MAX_PSEUDOCOLORS (1 << SVGA_MAX_PSEUDOCOLOR_DEPTH)
65#define SVGA_NUM_PALETTE_REGS (3 * SVGA_MAX_PSEUDOCOLORS)
66
67#define SVGA_MAGIC 0x900000UL
68#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
69
70/* Version 2 let the address of the frame buffer be unsigned on Win32 */
71#define SVGA_VERSION_2 2
72#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
73
74/* Version 1 has new registers starting with SVGA_REG_CAPABILITIES so
75 PALETTE_BASE has moved */
76#define SVGA_VERSION_1 1
77#define SVGA_ID_1 SVGA_MAKE_ID(SVGA_VERSION_1)
78
79/* Version 0 is the initial version */
80#define SVGA_VERSION_0 0
81#define SVGA_ID_0 SVGA_MAKE_ID(SVGA_VERSION_0)
82
83/* "Invalid" value for all SVGA IDs. (Version ID, screen object ID, surface ID...) */
84#define SVGA_ID_INVALID 0xFFFFFFFF
85
86/* Port offsets, relative to BAR0 */
87#define SVGA_INDEX_PORT 0x0
88#define SVGA_VALUE_PORT 0x1
89#define SVGA_BIOS_PORT 0x2
90#define SVGA_IRQSTATUS_PORT 0x8
91
92/*
93 * Interrupt source flags for IRQSTATUS_PORT and IRQMASK.
94 *
95 * Interrupts are only supported when the
96 * SVGA_CAP_IRQMASK capability is present.
97 */
98#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
99#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
100#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
101
102/*
103 * Registers
104 */
105
106enum {
107 SVGA_REG_ID = 0,
108 SVGA_REG_ENABLE = 1,
109 SVGA_REG_WIDTH = 2,
110 SVGA_REG_HEIGHT = 3,
111 SVGA_REG_MAX_WIDTH = 4,
112 SVGA_REG_MAX_HEIGHT = 5,
113 SVGA_REG_DEPTH = 6,
114 SVGA_REG_BITS_PER_PIXEL = 7, /* Current bpp in the guest */
115 SVGA_REG_PSEUDOCOLOR = 8,
116 SVGA_REG_RED_MASK = 9,
117 SVGA_REG_GREEN_MASK = 10,
118 SVGA_REG_BLUE_MASK = 11,
119 SVGA_REG_BYTES_PER_LINE = 12,
120 SVGA_REG_FB_START = 13, /* (Deprecated) */
121 SVGA_REG_FB_OFFSET = 14,
122 SVGA_REG_VRAM_SIZE = 15,
123 SVGA_REG_FB_SIZE = 16,
124
125 /* ID 0 implementation only had the above registers, then the palette */
126
127 SVGA_REG_CAPABILITIES = 17,
128 SVGA_REG_MEM_START = 18, /* (Deprecated) */
129 SVGA_REG_MEM_SIZE = 19,
130 SVGA_REG_CONFIG_DONE = 20, /* Set when memory area configured */
131 SVGA_REG_SYNC = 21, /* See "FIFO Synchronization Registers" */
132 SVGA_REG_BUSY = 22, /* See "FIFO Synchronization Registers" */
133 SVGA_REG_GUEST_ID = 23, /* Set guest OS identifier */
134 SVGA_REG_CURSOR_ID = 24, /* (Deprecated) */
135 SVGA_REG_CURSOR_X = 25, /* (Deprecated) */
136 SVGA_REG_CURSOR_Y = 26, /* (Deprecated) */
137 SVGA_REG_CURSOR_ON = 27, /* (Deprecated) */
138 SVGA_REG_HOST_BITS_PER_PIXEL = 28, /* (Deprecated) */
139 SVGA_REG_SCRATCH_SIZE = 29, /* Number of scratch registers */
140 SVGA_REG_MEM_REGS = 30, /* Number of FIFO registers */
141 SVGA_REG_NUM_DISPLAYS = 31, /* (Deprecated) */
142 SVGA_REG_PITCHLOCK = 32, /* Fixed pitch for all modes */
143 SVGA_REG_IRQMASK = 33, /* Interrupt mask */
144
145 /* Legacy multi-monitor support */
146 SVGA_REG_NUM_GUEST_DISPLAYS = 34,/* Number of guest displays in X/Y direction */
147 SVGA_REG_DISPLAY_ID = 35, /* Display ID for the following display attributes */
148 SVGA_REG_DISPLAY_IS_PRIMARY = 36,/* Whether this is a primary display */
149 SVGA_REG_DISPLAY_POSITION_X = 37,/* The display position x */
150 SVGA_REG_DISPLAY_POSITION_Y = 38,/* The display position y */
151 SVGA_REG_DISPLAY_WIDTH = 39, /* The display's width */
152 SVGA_REG_DISPLAY_HEIGHT = 40, /* The display's height */
153
154 /* See "Guest memory regions" below. */
155 SVGA_REG_GMR_ID = 41,
156 SVGA_REG_GMR_DESCRIPTOR = 42,
157 SVGA_REG_GMR_MAX_IDS = 43,
158 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH = 44,
159
160 SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
161 SVGA_REG_TOP = 46, /* Must be 1 more than the last register */
162
163 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
164 /* Next 768 (== 256*3) registers exist for colormap */
165
166 SVGA_SCRATCH_BASE = SVGA_PALETTE_BASE + SVGA_NUM_PALETTE_REGS
167 /* Base of scratch registers */
168 /* Next reg[SVGA_REG_SCRATCH_SIZE] registers exist for scratch usage:
169 First 4 are reserved for VESA BIOS Extension; any remaining are for
170 the use of the current SVGA driver. */
171};
172
173
174/*
175 * Guest memory regions (GMRs):
176 *
177 * This is a new memory mapping feature available in SVGA devices
178 * which have the SVGA_CAP_GMR bit set. Previously, there were two
179 * fixed memory regions available with which to share data between the
180 * device and the driver: the FIFO ('MEM') and the framebuffer. GMRs
181 * are our name for an extensible way of providing arbitrary DMA
182 * buffers for use between the driver and the SVGA device. They are a
183 * new alternative to framebuffer memory, usable for both 2D and 3D
184 * graphics operations.
185 *
186 * Since GMR mapping must be done synchronously with guest CPU
187 * execution, we use a new pair of SVGA registers:
188 *
189 * SVGA_REG_GMR_ID --
190 *
191 * Read/write.
192 * This register holds the 32-bit ID (a small positive integer)
193 * of a GMR to create, delete, or redefine. Writing this register
194 * has no side-effects.
195 *
196 * SVGA_REG_GMR_DESCRIPTOR --
197 *
198 * Write-only.
199 * Writing this register will create, delete, or redefine the GMR
200 * specified by the above ID register. If this register is zero,
201 * the GMR is deleted. Any pointers into this GMR (including those
202 * currently being processed by FIFO commands) will be
203 * synchronously invalidated.
204 *
205 * If this register is nonzero, it must be the physical page
206 * number (PPN) of a data structure which describes the physical
207 * layout of the memory region this GMR should describe. The
208 * descriptor structure will be read synchronously by the SVGA
209 * device when this register is written. The descriptor need not
210 * remain allocated for the lifetime of the GMR.
211 *
212 * The guest driver should write SVGA_REG_GMR_ID first, then
213 * SVGA_REG_GMR_DESCRIPTOR.
214 *
215 * SVGA_REG_GMR_MAX_IDS --
216 *
217 * Read-only.
218 * The SVGA device may choose to support a maximum number of
219 * user-defined GMR IDs. This register holds the number of supported
220 * IDs. (The maximum supported ID plus 1)
221 *
222 * SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH --
223 *
224 * Read-only.
225 * The SVGA device may choose to put a limit on the total number
226 * of SVGAGuestMemDescriptor structures it will read when defining
227 * a single GMR.
228 *
229 * The descriptor structure is an array of SVGAGuestMemDescriptor
230 * structures. Each structure may do one of three things:
231 *
232 * - Terminate the GMR descriptor list.
233 * (ppn==0, numPages==0)
234 *
235 * - Add a PPN or range of PPNs to the GMR's virtual address space.
236 * (ppn != 0, numPages != 0)
237 *
238 * - Provide the PPN of the next SVGAGuestMemDescriptor, in order to
239 * support multi-page GMR descriptor tables without forcing the
240 * driver to allocate physically contiguous memory.
241 * (ppn != 0, numPages == 0)
242 *
243 * Note that each physical page of SVGAGuestMemDescriptor structures
244 * can describe at least 2MB of guest memory. If the driver needs to
245 * use more than one page of descriptor structures, it must use one of
246 * its SVGAGuestMemDescriptors to point to an additional page. The
247 * device will never automatically cross a page boundary.
248 *
249 * Once the driver has described a GMR, it is immediately available
250 * for use via any FIFO command that uses an SVGAGuestPtr structure.
251 * These pointers include a GMR identifier plus an offset into that
252 * GMR.
253 *
254 * The driver must check the SVGA_CAP_GMR bit before using the GMR
255 * registers.
256 */
257
258/*
259 * Special GMR IDs, allowing SVGAGuestPtrs to point to framebuffer
260 * memory as well. In the future, these IDs could even be used to
261 * allow legacy memory regions to be redefined by the guest as GMRs.
262 *
263 * Using the guest framebuffer (GFB) at BAR1 for general purpose DMA
264 * is being phased out. Please try to use user-defined GMRs whenever
265 * possible.
266 */
267#define SVGA_GMR_NULL ((uint32) -1)
268#define SVGA_GMR_FRAMEBUFFER ((uint32) -2) // Guest Framebuffer (GFB)
269
270typedef
271struct SVGAGuestMemDescriptor {
272 uint32 ppn;
273 uint32 numPages;
274} SVGAGuestMemDescriptor;
275
276typedef
277struct SVGAGuestPtr {
278 uint32 gmrId;
279 uint32 offset;
280} SVGAGuestPtr;
281
282
283/*
284 * SVGAGMRImageFormat --
285 *
286 * This is a packed representation of the source 2D image format
287 * for a GMR-to-screen blit. Currently it is defined as an encoding
288 * of the screen's color depth and bits-per-pixel, however, 16 bits
289 * are reserved for future use to identify other encodings (such as
290 * RGBA or higher-precision images).
291 *
292 * Currently supported formats:
293 *
294 * bpp depth Format Name
295 * --- ----- -----------
296 * 32 24 32-bit BGRX
297 * 24 24 24-bit BGR
298 * 16 16 RGB 5-6-5
299 * 16 15 RGB 5-5-5
300 *
301 */
302
303typedef
304struct SVGAGMRImageFormat {
305 union {
306 struct {
307 uint32 bitsPerPixel : 8;
308 uint32 colorDepth : 8;
309 uint32 reserved : 16; // Must be zero
310 };
311
312 uint32 value;
313 };
314} SVGAGMRImageFormat;
315
316/*
317 * SVGAColorBGRX --
318 *
319 * A 24-bit color format (BGRX), which does not depend on the
320 * format of the legacy guest framebuffer (GFB) or the current
321 * GMRFB state.
322 */
323
324typedef
325struct SVGAColorBGRX {
326 union {
327 struct {
328 uint32 b : 8;
329 uint32 g : 8;
330 uint32 r : 8;
331 uint32 x : 8; // Unused
332 };
333
334 uint32 value;
335 };
336} SVGAColorBGRX;
337
338
339/*
340 * SVGASignedRect --
341 * SVGASignedPoint --
342 *
343 * Signed rectangle and point primitives. These are used by the new
344 * 2D primitives for drawing to Screen Objects, which can occupy a
345 * signed virtual coordinate space.
346 *
347 * SVGASignedRect specifies a half-open interval: the (left, top)
348 * pixel is part of the rectangle, but the (right, bottom) pixel is
349 * not.
350 */
351
352typedef
353struct SVGASignedRect {
354 int32 left;
355 int32 top;
356 int32 right;
357 int32 bottom;
358} SVGASignedRect;
359
360typedef
361struct SVGASignedPoint {
362 int32 x;
363 int32 y;
364} SVGASignedPoint;
365
366
367/*
368 * Capabilities
369 *
370 * Note the holes in the bitfield. Missing bits have been deprecated,
371 * and must not be reused. Those capabilities will never be reported
372 * by new versions of the SVGA device.
373 */
374
375#define SVGA_CAP_NONE 0x00000000
376#define SVGA_CAP_RECT_COPY 0x00000002
377#define SVGA_CAP_CURSOR 0x00000020
378#define SVGA_CAP_CURSOR_BYPASS 0x00000040 // Legacy (Use Cursor Bypass 3 instead)
379#define SVGA_CAP_CURSOR_BYPASS_2 0x00000080 // Legacy (Use Cursor Bypass 3 instead)
380#define SVGA_CAP_8BIT_EMULATION 0x00000100
381#define SVGA_CAP_ALPHA_CURSOR 0x00000200
382#define SVGA_CAP_3D 0x00004000
383#define SVGA_CAP_EXTENDED_FIFO 0x00008000
384#define SVGA_CAP_MULTIMON 0x00010000 // Legacy multi-monitor support
385#define SVGA_CAP_PITCHLOCK 0x00020000
386#define SVGA_CAP_IRQMASK 0x00040000
387#define SVGA_CAP_DISPLAY_TOPOLOGY 0x00080000 // Legacy multi-monitor support
388#define SVGA_CAP_GMR 0x00100000
389#define SVGA_CAP_TRACES 0x00200000
390
391
392/*
393 * FIFO register indices.
394 *
395 * The FIFO is a chunk of device memory mapped into guest physmem. It
396 * is always treated as 32-bit words.
397 *
398 * The guest driver gets to decide how to partition it between
399 * - FIFO registers (there are always at least 4, specifying where the
400 * following data area is and how much data it contains; there may be
401 * more registers following these, depending on the FIFO protocol
402 * version in use)
403 * - FIFO data, written by the guest and slurped out by the VMX.
404 * These indices are 32-bit word offsets into the FIFO.
405 */
406
407enum {
408 /*
409 * Block 1 (basic registers): The originally defined FIFO registers.
410 * These exist and are valid for all versions of the FIFO protocol.
411 */
412
413 SVGA_FIFO_MIN = 0,
414 SVGA_FIFO_MAX, /* The distance from MIN to MAX must be at least 10K */
415 SVGA_FIFO_NEXT_CMD,
416 SVGA_FIFO_STOP,
417
418 /*
419 * Block 2 (extended registers): Mandatory registers for the extended
420 * FIFO. These exist if the SVGA caps register includes
421 * SVGA_CAP_EXTENDED_FIFO; some of them are valid only if their
422 * associated capability bit is enabled.
423 *
424 * Note that when originally defined, SVGA_CAP_EXTENDED_FIFO implied
425 * support only for (FIFO registers) CAPABILITIES, FLAGS, and FENCE.
426 * This means that the guest has to test individually (in most cases
427 * using FIFO caps) for the presence of registers after this; the VMX
428 * can define "extended FIFO" to mean whatever it wants, and currently
429 * won't enable it unless there's room for that set and much more.
430 */
431
432 SVGA_FIFO_CAPABILITIES = 4,
433 SVGA_FIFO_FLAGS,
434 // Valid with SVGA_FIFO_CAP_FENCE:
435 SVGA_FIFO_FENCE,
436
437 /*
438 * Block 3a (optional extended registers): Additional registers for the
439 * extended FIFO, whose presence isn't actually implied by
440 * SVGA_CAP_EXTENDED_FIFO; these exist if SVGA_FIFO_MIN is high enough to
441 * leave room for them.
442 *
443 * These in block 3a, the VMX currently considers mandatory for the
444 * extended FIFO.
445 */
446
447 // Valid if exists (i.e. if extended FIFO enabled):
448 SVGA_FIFO_3D_HWVERSION, /* See SVGA3dHardwareVersion in svga3d_reg.h */
449 // Valid with SVGA_FIFO_CAP_PITCHLOCK:
450 SVGA_FIFO_PITCHLOCK,
451
452 // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
453 SVGA_FIFO_CURSOR_ON, /* Cursor bypass 3 show/hide register */
454 SVGA_FIFO_CURSOR_X, /* Cursor bypass 3 x register */
455 SVGA_FIFO_CURSOR_Y, /* Cursor bypass 3 y register */
456 SVGA_FIFO_CURSOR_COUNT, /* Incremented when any of the other 3 change */
457 SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
458
459 // Valid with SVGA_FIFO_CAP_RESERVE:
460 SVGA_FIFO_RESERVED, /* Bytes past NEXT_CMD with real contents */
461
462 /*
463 * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
464 *
465 * By default this is SVGA_ID_INVALID, to indicate that the cursor
466 * coordinates are specified relative to the virtual root. If this
467 * is set to a specific screen ID, cursor position is reinterpreted
468 * as a signed offset relative to that screen's origin. This is the
469 * only way to place the cursor on a non-rooted screen.
470 */
471 SVGA_FIFO_CURSOR_SCREEN_ID,
472
473 /*
474 * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
475 * registers, but this must be done carefully and with judicious use of
476 * capability bits, since comparisons based on SVGA_FIFO_MIN aren't
477 * enough to tell you whether the register exists: we've shipped drivers
478 * and products that used SVGA_FIFO_3D_CAPS but didn't know about some of
479 * the earlier ones. The actual order of introduction was:
480 * - PITCHLOCK
481 * - 3D_CAPS
482 * - CURSOR_* (cursor bypass 3)
483 * - RESERVED
484 * So, code that wants to know whether it can use any of the
485 * aforementioned registers, or anything else added after PITCHLOCK and
486 * before 3D_CAPS, needs to reason about something other than
487 * SVGA_FIFO_MIN.
488 */
489
490 /*
491 * 3D caps block space; valid with 3D hardware version >=
492 * SVGA3D_HWVERSION_WS6_B1.
493 */
494 SVGA_FIFO_3D_CAPS = 32,
495 SVGA_FIFO_3D_CAPS_LAST = 32 + 255,
496
497 /*
498 * End of VMX's current definition of "extended-FIFO registers".
499 * Registers before here are always enabled/disabled as a block; either
500 * the extended FIFO is enabled and includes all preceding registers, or
501 * it's disabled entirely.
502 *
503 * Block 3b (truly optional extended registers): Additional registers for
504 * the extended FIFO, which the VMX already knows how to enable and
505 * disable with correct granularity.
506 *
507 * Registers after here exist if and only if the guest SVGA driver
508 * sets SVGA_FIFO_MIN high enough to leave room for them.
509 */
510
511 // Valid if register exists:
512 SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
513 SVGA_FIFO_FENCE_GOAL, /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
514 SVGA_FIFO_BUSY, /* See "FIFO Synchronization Registers" */
515
516 /*
517 * Always keep this last. This defines the maximum number of
518 * registers we know about. At power-on, this value is placed in
519 * the SVGA_REG_MEM_REGS register, and we expect the guest driver
520 * to allocate this much space in FIFO memory for registers.
521 */
522 SVGA_FIFO_NUM_REGS
523};
524
525
526/*
527 * Definition of registers included in extended FIFO support.
528 *
529 * The guest SVGA driver gets to allocate the FIFO between registers
530 * and data. It must always allocate at least 4 registers, but old
531 * drivers stopped there.
532 *
533 * The VMX will enable extended FIFO support if and only if the guest
534 * left enough room for all registers defined as part of the mandatory
535 * set for the extended FIFO.
536 *
537 * Note that the guest drivers typically allocate the FIFO only at
538 * initialization time, not at mode switches, so it's likely that the
539 * number of FIFO registers won't change without a reboot.
540 *
541 * All registers less than this value are guaranteed to be present if
542 * svgaUser->fifo.extended is set. Any later registers must be tested
543 * individually for compatibility at each use (in the VMX).
544 *
545 * This value is used only by the VMX, so it can change without
546 * affecting driver compatibility; keep it that way?
547 */
548#define SVGA_FIFO_EXTENDED_MANDATORY_REGS (SVGA_FIFO_3D_CAPS_LAST + 1)
549
550
551/*
552 * FIFO Synchronization Registers
553 *
554 * This explains the relationship between the various FIFO
555 * sync-related registers in IOSpace and in FIFO space.
556 *
557 * SVGA_REG_SYNC --
558 *
559 * The SYNC register can be used in two different ways by the guest:
560 *
561 * 1. If the guest wishes to fully sync (drain) the FIFO,
562 * it will write once to SYNC then poll on the BUSY
563 * register. The FIFO is sync'ed once BUSY is zero.
564 *
565 * 2. If the guest wants to asynchronously wake up the host,
566 * it will write once to SYNC without polling on BUSY.
567 * Ideally it will do this after some new commands have
568 * been placed in the FIFO, and after reading a zero
569 * from SVGA_FIFO_BUSY.
570 *
571 * (1) is the original behaviour that SYNC was designed to
572 * support. Originally, a write to SYNC would implicitly
573 * trigger a read from BUSY. This causes us to synchronously
574 * process the FIFO.
575 *
576 * This behaviour has since been changed so that writing SYNC
577 * will *not* implicitly cause a read from BUSY. Instead, it
578 * makes a channel call which asynchronously wakes up the MKS
579 * thread.
580 *
581 * New guests can use this new behaviour to implement (2)
582 * efficiently. This lets guests get the host's attention
583 * without waiting for the MKS to poll, which gives us much
584 * better CPU utilization on SMP hosts and on UP hosts while
585 * we're blocked on the host GPU.
586 *
587 * Old guests shouldn't notice the behaviour change. SYNC was
588 * never guaranteed to process the entire FIFO, since it was
589 * bounded to a particular number of CPU cycles. Old guests will
590 * still loop on the BUSY register until the FIFO is empty.
591 *
592 * Writing to SYNC currently has the following side-effects:
593 *
594 * - Sets SVGA_REG_BUSY to TRUE (in the monitor)
595 * - Asynchronously wakes up the MKS thread for FIFO processing
596 * - The value written to SYNC is recorded as a "reason", for
597 * stats purposes.
598 *
599 * If SVGA_FIFO_BUSY is available, drivers are advised to only
600 * write to SYNC if SVGA_FIFO_BUSY is FALSE. Drivers should set
601 * SVGA_FIFO_BUSY to TRUE after writing to SYNC. The MKS will
602 * eventually set SVGA_FIFO_BUSY on its own, but this approach
603 * lets the driver avoid sending multiple asynchronous wakeup
604 * messages to the MKS thread.
605 *
606 * SVGA_REG_BUSY --
607 *
608 * This register is set to TRUE when SVGA_REG_SYNC is written,
609 * and it reads as FALSE when the FIFO has been completely
610 * drained.
611 *
612 * Every read from this register causes us to synchronously
613 * process FIFO commands. There is no guarantee as to how many
614 * commands each read will process.
615 *
616 * CPU time spent processing FIFO commands will be billed to
617 * the guest.
618 *
619 * New drivers should avoid using this register unless they
620 * need to guarantee that the FIFO is completely drained. It
621 * is overkill for performing a sync-to-fence. Older drivers
622 * will use this register for any type of synchronization.
623 *
624 * SVGA_FIFO_BUSY --
625 *
626 * This register is a fast way for the guest driver to check
627 * whether the FIFO is already being processed. It reads and
628 * writes at normal RAM speeds, with no monitor intervention.
629 *
630 * If this register reads as TRUE, the host is guaranteeing that
631 * any new commands written into the FIFO will be noticed before
632 * the MKS goes back to sleep.
633 *
634 * If this register reads as FALSE, no such guarantee can be
635 * made.
636 *
637 * The guest should use this register to quickly determine
638 * whether or not it needs to wake up the host. If the guest
639 * just wrote a command or group of commands that it would like
640 * the host to begin processing, it should:
641 *
642 * 1. Read SVGA_FIFO_BUSY. If it reads as TRUE, no further
643 * action is necessary.
644 *
645 * 2. Write TRUE to SVGA_FIFO_BUSY. This informs future guest
646 * code that we've already sent a SYNC to the host and we
647 * don't need to send a duplicate.
648 *
649 * 3. Write a reason to SVGA_REG_SYNC. This will send an
650 * asynchronous wakeup to the MKS thread.
651 */
652
653
654/*
655 * FIFO Capabilities
656 *
657 * Fence -- Fence register and command are supported
658 * Accel Front -- Front buffer only commands are supported
659 * Pitch Lock -- Pitch lock register is supported
660 * Video -- SVGA Video overlay units are supported
661 * Escape -- Escape command is supported
662 *
663 * XXX: Add longer descriptions for each capability, including a list
664 * of the new features that each capability provides.
665 *
666 * SVGA_FIFO_CAP_SCREEN_OBJECT --
667 *
668 * Provides dynamic multi-screen rendering, for improved Unity and
669 * multi-monitor modes. With Screen Object, the guest can
670 * dynamically create and destroy 'screens', which can represent
671 * Unity windows or virtual monitors. Screen Object also provides
672 * strong guarantees that DMA operations happen only when
673 * guest-initiated. Screen Object deprecates the BAR1 guest
674 * framebuffer (GFB) and all commands that work only with the GFB.
675 *
676 * New registers:
677 * FIFO_CURSOR_SCREEN_ID, VIDEO_DATA_GMRID, VIDEO_DST_SCREEN_ID
678 *
679 * New 2D commands:
680 * DEFINE_SCREEN, DESTROY_SCREEN, DEFINE_GMRFB, BLIT_GMRFB_TO_SCREEN,
681 * BLIT_SCREEN_TO_GMRFB, ANNOTATION_FILL, ANNOTATION_COPY
682 *
683 * New 3D commands:
684 * BLIT_SURFACE_TO_SCREEN
685 *
686 * New guarantees:
687 *
688 * - The host will not read or write guest memory, including the GFB,
689 * except when explicitly initiated by a DMA command.
690 *
691 * - All DMA, including legacy DMA like UPDATE and PRESENT_READBACK,
692 * is guaranteed to complete before any subsequent FENCEs.
693 *
694 * - All legacy commands which affect a Screen (UPDATE, PRESENT,
695 * PRESENT_READBACK) as well as new Screen blit commands will
696 * all behave consistently as blits, and memory will be read
697 * or written in FIFO order.
698 *
699 * For example, if you PRESENT from one SVGA3D surface to multiple
700 * places on the screen, the data copied will always be from the
701 * SVGA3D surface at the time the PRESENT was issued in the FIFO.
702 * This was not necessarily true on devices without Screen Object.
703 *
704 * This means that on devices that support Screen Object, the
705 * PRESENT_READBACK command should not be necessary unless you
706 * actually want to read back the results of 3D rendering into
707 * system memory. (And for that, the BLIT_SCREEN_TO_GMRFB
708 * command provides a strict superset of functionality.)
709 *
710 * - When a screen is resized, either using Screen Object commands or
711 * legacy multimon registers, its contents are preserved.
712 */
713
714#define SVGA_FIFO_CAP_NONE 0
715#define SVGA_FIFO_CAP_FENCE (1<<0)
716#define SVGA_FIFO_CAP_ACCELFRONT (1<<1)
717#define SVGA_FIFO_CAP_PITCHLOCK (1<<2)
718#define SVGA_FIFO_CAP_VIDEO (1<<3)
719#define SVGA_FIFO_CAP_CURSOR_BYPASS_3 (1<<4)
720#define SVGA_FIFO_CAP_ESCAPE (1<<5)
721#define SVGA_FIFO_CAP_RESERVE (1<<6)
722#define SVGA_FIFO_CAP_SCREEN_OBJECT (1<<7)
723
724
725/*
726 * FIFO Flags
727 *
728 * Accel Front -- Driver should use front buffer only commands
729 */
730
731#define SVGA_FIFO_FLAG_NONE 0
732#define SVGA_FIFO_FLAG_ACCELFRONT (1<<0)
733#define SVGA_FIFO_FLAG_RESERVED (1<<31) // Internal use only
734
735/*
736 * FIFO reservation sentinel value
737 */
738
739#define SVGA_FIFO_RESERVED_UNKNOWN 0xffffffff
740
741
742/*
743 * Video overlay support
744 */
745
746#define SVGA_NUM_OVERLAY_UNITS 32
747
748
749/*
750 * Video capabilities that the guest is currently using
751 */
752
753#define SVGA_VIDEO_FLAG_COLORKEY 0x0001
754
755
756/*
757 * Offsets for the video overlay registers
758 */
759
760enum {
761 SVGA_VIDEO_ENABLED = 0,
762 SVGA_VIDEO_FLAGS,
763 SVGA_VIDEO_DATA_OFFSET,
764 SVGA_VIDEO_FORMAT,
765 SVGA_VIDEO_COLORKEY,
766 SVGA_VIDEO_SIZE, // Deprecated
767 SVGA_VIDEO_WIDTH,
768 SVGA_VIDEO_HEIGHT,
769 SVGA_VIDEO_SRC_X,
770 SVGA_VIDEO_SRC_Y,
771 SVGA_VIDEO_SRC_WIDTH,
772 SVGA_VIDEO_SRC_HEIGHT,
773 SVGA_VIDEO_DST_X, // Signed int32
774 SVGA_VIDEO_DST_Y, // Signed int32
775 SVGA_VIDEO_DST_WIDTH,
776 SVGA_VIDEO_DST_HEIGHT,
777 SVGA_VIDEO_PITCH_1,
778 SVGA_VIDEO_PITCH_2,
779 SVGA_VIDEO_PITCH_3,
780 SVGA_VIDEO_DATA_GMRID, // Optional, defaults to SVGA_GMR_FRAMEBUFFER
781 SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
782 SVGA_VIDEO_NUM_REGS
783};
784
785
786/*
787 * SVGA Overlay Units
788 *
789 * width and height relate to the entire source video frame.
790 * srcX, srcY, srcWidth and srcHeight represent subset of the source
791 * video frame to be displayed.
792 */
793
794typedef struct SVGAOverlayUnit {
795 uint32 enabled;
796 uint32 flags;
797 uint32 dataOffset;
798 uint32 format;
799 uint32 colorKey;
800 uint32 size;
801 uint32 width;
802 uint32 height;
803 uint32 srcX;
804 uint32 srcY;
805 uint32 srcWidth;
806 uint32 srcHeight;
807 int32 dstX;
808 int32 dstY;
809 uint32 dstWidth;
810 uint32 dstHeight;
811 uint32 pitches[3];
812 uint32 dataGMRId;
813 uint32 dstScreenId;
814} SVGAOverlayUnit;
815
816
817/*
818 * SVGAScreenObject --
819 *
820 * This is a new way to represent a guest's multi-monitor screen or
821 * Unity window. Screen objects are only supported if the
822 * SVGA_FIFO_CAP_SCREEN_OBJECT capability bit is set.
823 *
824 * If Screen Objects are supported, they can be used to fully
825 * replace the functionality provided by the framebuffer registers
826 * (SVGA_REG_WIDTH, HEIGHT, etc.) and by SVGA_CAP_DISPLAY_TOPOLOGY.
827 *
828 * The screen object is a struct with guaranteed binary
829 * compatibility. New flags can be added, and the struct may grow,
830 * but existing fields must retain their meaning.
831 *
832 */
833
834#define SVGA_SCREEN_HAS_ROOT (1 << 0) // Screen is present in the virtual coord space
835#define SVGA_SCREEN_IS_PRIMARY (1 << 1) // Guest considers this screen to be 'primary'
836#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) // Guest is running a fullscreen app here
837
838typedef
839struct SVGAScreenObject {
840 uint32 structSize; // sizeof(SVGAScreenObject)
841 uint32 id;
842 uint32 flags;
843 struct {
844 uint32 width;
845 uint32 height;
846 } size;
847 struct {
848 int32 x;
849 int32 y;
850 } root; // Only used if SVGA_SCREEN_HAS_ROOT is set.
851} SVGAScreenObject;
852
853
854/*
855 * Commands in the command FIFO:
856 *
857 * Command IDs defined below are used for the traditional 2D FIFO
858 * communication (not all commands are available for all versions of the
859 * SVGA FIFO protocol).
860 *
861 * Note the holes in the command ID numbers: These commands have been
862 * deprecated, and the old IDs must not be reused.
863 *
864 * Command IDs from 1000 to 1999 are reserved for use by the SVGA3D
865 * protocol.
866 *
867 * Each command's parameters are described by the comments and
868 * structs below.
869 */
870
871typedef enum {
872 SVGA_CMD_INVALID_CMD = 0,
873 SVGA_CMD_UPDATE = 1,
874 SVGA_CMD_RECT_COPY = 3,
875 SVGA_CMD_DEFINE_CURSOR = 19,
876 SVGA_CMD_DEFINE_ALPHA_CURSOR = 22,
877 SVGA_CMD_UPDATE_VERBOSE = 25,
878 SVGA_CMD_FRONT_ROP_FILL = 29,
879 SVGA_CMD_FENCE = 30,
880 SVGA_CMD_ESCAPE = 33,
881 SVGA_CMD_DEFINE_SCREEN = 34,
882 SVGA_CMD_DESTROY_SCREEN = 35,
883 SVGA_CMD_DEFINE_GMRFB = 36,
884 SVGA_CMD_BLIT_GMRFB_TO_SCREEN = 37,
885 SVGA_CMD_BLIT_SCREEN_TO_GMRFB = 38,
886 SVGA_CMD_ANNOTATION_FILL = 39,
887 SVGA_CMD_ANNOTATION_COPY = 40,
888 SVGA_CMD_MAX
889} SVGAFifoCmdId;
890
891#define SVGA_CMD_MAX_ARGS 64
892
893
894/*
895 * SVGA_CMD_UPDATE --
896 *
897 * This is a DMA transfer which copies from the Guest Framebuffer
898 * (GFB) at BAR1 + SVGA_REG_FB_OFFSET to any screens which
899 * intersect with the provided virtual rectangle.
900 *
901 * This command does not support using arbitrary guest memory as a
902 * data source- it only works with the pre-defined GFB memory.
903 * This command also does not support signed virtual coordinates.
904 * If you have defined screens (using SVGA_CMD_DEFINE_SCREEN) with
905 * negative root x/y coordinates, the negative portion of those
906 * screens will not be reachable by this command.
907 *
908 * This command is not necessary when using framebuffer
909 * traces. Traces are automatically enabled if the SVGA FIFO is
910 * disabled, and you may explicitly enable/disable traces using
911 * SVGA_REG_TRACES. With traces enabled, any write to the GFB will
912 * automatically act as if a subsequent SVGA_CMD_UPDATE was issued.
913 *
914 * Traces and SVGA_CMD_UPDATE are the only supported ways to render
915 * pseudocolor screen updates. The newer Screen Object commands
916 * only support true color formats.
917 *
918 * Availability:
919 * Always available.
920 */
921
922typedef
923struct {
924 uint32 x;
925 uint32 y;
926 uint32 width;
927 uint32 height;
928} SVGAFifoCmdUpdate;
929
930
931/*
932 * SVGA_CMD_RECT_COPY --
933 *
934 * Perform a rectangular DMA transfer from one area of the GFB to
935 * another, and copy the result to any screens which intersect it.
936 *
937 * Availability:
938 * SVGA_CAP_RECT_COPY
939 */
940
941typedef
942struct {
943 uint32 srcX;
944 uint32 srcY;
945 uint32 destX;
946 uint32 destY;
947 uint32 width;
948 uint32 height;
949} SVGAFifoCmdRectCopy;
950
951
952/*
953 * SVGA_CMD_DEFINE_CURSOR --
954 *
955 * Provide a new cursor image, as an AND/XOR mask.
956 *
957 * The recommended way to position the cursor overlay is by using
958 * the SVGA_FIFO_CURSOR_* registers, supported by the
959 * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
960 *
961 * Availability:
962 * SVGA_CAP_CURSOR
963 */
964
965typedef
966struct {
967 uint32 id; // Reserved, must be zero.
968 uint32 hotspotX;
969 uint32 hotspotY;
970 uint32 width;
971 uint32 height;
972 uint32 andMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
973 uint32 xorMaskDepth; // Value must be 1 or equal to BITS_PER_PIXEL
974 /*
975 * Followed by scanline data for AND mask, then XOR mask.
976 * Each scanline is padded to a 32-bit boundary.
977 */
978} SVGAFifoCmdDefineCursor;
979
980
981/*
982 * SVGA_CMD_DEFINE_ALPHA_CURSOR --
983 *
984 * Provide a new cursor image, in 32-bit BGRA format.
985 *
986 * The recommended way to position the cursor overlay is by using
987 * the SVGA_FIFO_CURSOR_* registers, supported by the
988 * SVGA_FIFO_CAP_CURSOR_BYPASS_3 capability.
989 *
990 * Availability:
991 * SVGA_CAP_ALPHA_CURSOR
992 */
993
994typedef
995struct {
996 uint32 id; // Reserved, must be zero.
997 uint32 hotspotX;
998 uint32 hotspotY;
999 uint32 width;
1000 uint32 height;
1001 /* Followed by scanline data */
1002} SVGAFifoCmdDefineAlphaCursor;
1003
1004
1005/*
1006 * SVGA_CMD_UPDATE_VERBOSE --
1007 *
1008 * Just like SVGA_CMD_UPDATE, but also provide a per-rectangle
1009 * 'reason' value, an opaque cookie which is used by internal
1010 * debugging tools. Third party drivers should not use this
1011 * command.
1012 *
1013 * Availability:
1014 * SVGA_CAP_EXTENDED_FIFO
1015 */
1016
1017typedef
1018struct {
1019 uint32 x;
1020 uint32 y;
1021 uint32 width;
1022 uint32 height;
1023 uint32 reason;
1024} SVGAFifoCmdUpdateVerbose;
1025
1026
1027/*
1028 * SVGA_CMD_FRONT_ROP_FILL --
1029 *
1030 * This is a hint which tells the SVGA device that the driver has
1031 * just filled a rectangular region of the GFB with a solid
1032 * color. Instead of reading these pixels from the GFB, the device
1033 * can assume that they all equal 'color'. This is primarily used
1034 * for remote desktop protocols.
1035 *
1036 * Availability:
1037 * SVGA_FIFO_CAP_ACCELFRONT
1038 */
1039
1040#define SVGA_ROP_COPY 0x03
1041
1042typedef
1043struct {
1044 uint32 color; // In the same format as the GFB
1045 uint32 x;
1046 uint32 y;
1047 uint32 width;
1048 uint32 height;
1049 uint32 rop; // Must be SVGA_ROP_COPY
1050} SVGAFifoCmdFrontRopFill;
1051
1052
1053/*
1054 * SVGA_CMD_FENCE --
1055 *
1056 * Insert a synchronization fence. When the SVGA device reaches
1057 * this command, it will copy the 'fence' value into the
1058 * SVGA_FIFO_FENCE register. It will also compare the fence against
1059 * SVGA_FIFO_FENCE_GOAL. If the fence matches the goal and the
1060 * SVGA_IRQFLAG_FENCE_GOAL interrupt is enabled, the device will
1061 * raise this interrupt.
1062 *
1063 * Availability:
1064 * SVGA_FIFO_FENCE for this command,
1065 * SVGA_CAP_IRQMASK for SVGA_FIFO_FENCE_GOAL.
1066 */
1067
1068typedef
1069struct {
1070 uint32 fence;
1071} SVGAFifoCmdFence;
1072
1073
1074/*
1075 * SVGA_CMD_ESCAPE --
1076 *
1077 * Send an extended or vendor-specific variable length command.
1078 * This is used for video overlay, third party plugins, and
1079 * internal debugging tools. See svga_escape.h
1080 *
1081 * Availability:
1082 * SVGA_FIFO_CAP_ESCAPE
1083 */
1084
1085typedef
1086struct {
1087 uint32 nsid;
1088 uint32 size;
1089 /* followed by 'size' bytes of data */
1090} SVGAFifoCmdEscape;
1091
1092
1093/*
1094 * SVGA_CMD_DEFINE_SCREEN --
1095 *
1096 * Define or redefine an SVGAScreenObject. See the description of
1097 * SVGAScreenObject above. The video driver is responsible for
1098 * generating new screen IDs. They should be small positive
1099 * integers. The virtual device will have an implementation
1100 * specific upper limit on the number of screen IDs
1101 * supported. Drivers are responsible for recycling IDs. The first
1102 * valid ID is zero.
1103 *
1104 * - Interaction with other registers:
1105 *
1106 * For backwards compatibility, when the GFB mode registers (WIDTH,
1107 * HEIGHT, PITCHLOCK, BITS_PER_PIXEL) are modified, the SVGA device
1108 * deletes all screens other than screen #0, and redefines screen
1109 * #0 according to the specified mode. Drivers that use
1110 * SVGA_CMD_DEFINE_SCREEN should destroy or redefine screen #0.
1111 *
1112 * If you use screen objects, do not use the legacy multi-mon
1113 * registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
1114 *
1115 * Availability:
1116 * SVGA_FIFO_CAP_SCREEN_OBJECT
1117 */
1118
1119typedef
1120struct {
1121 SVGAScreenObject screen; // Variable-length according to version
1122} SVGAFifoCmdDefineScreen;
1123
1124
1125/*
1126 * SVGA_CMD_DESTROY_SCREEN --
1127 *
1128 * Destroy an SVGAScreenObject. Its ID is immediately available for
1129 * re-use.
1130 *
1131 * Availability:
1132 * SVGA_FIFO_CAP_SCREEN_OBJECT
1133 */
1134
1135typedef
1136struct {
1137 uint32 screenId;
1138} SVGAFifoCmdDestroyScreen;
1139
1140
1141/*
1142 * SVGA_CMD_DEFINE_GMRFB --
1143 *
1144 * This command sets a piece of SVGA device state called the
1145 * Guest Memory Region Framebuffer, or GMRFB. The GMRFB is a
1146 * piece of light-weight state which identifies the location and
1147 * format of an image in guest memory or in BAR1. The GMRFB has
1148 * an arbitrary size, and it doesn't need to match the geometry
1149 * of the GFB or any screen object.
1150 *
1151 * The GMRFB can be redefined as often as you like. You could
1152 * always use the same GMRFB, you could redefine it before
1153 * rendering from a different guest screen, or you could even
1154 * redefine it before every blit.
1155 *
1156 * There are multiple ways to use this command. The simplest way is
1157 * to use it to move the framebuffer either to elsewhere in the GFB
1158 * (BAR1) memory region, or to a user-defined GMR. This lets a
1159 * driver use a framebuffer allocated entirely out of normal system
1160 * memory, which we encourage.
1161 *
1162 * Another way to use this command is to set up a ring buffer of
1163 * updates in GFB memory. If a driver wants to ensure that no
1164 * frames are skipped by the SVGA device, it is important that the
1165 * driver not modify the source data for a blit until the device is
1166 * done processing the command. One efficient way to accomplish
1167 * this is to use a ring of small DMA buffers. Each buffer is used
1168 * for one blit, then we move on to the next buffer in the
1169 * ring. The FENCE mechanism is used to protect each buffer from
1170 * re-use until the device is finished with that buffer's
1171 * corresponding blit.
1172 *
1173 * This command does not affect the meaning of SVGA_CMD_UPDATE.
1174 * UPDATEs always occur from the legacy GFB memory area. This
1175 * command has no support for pseudocolor GMRFBs. Currently only
1176 * true-color 15, 16, and 24-bit depths are supported. Future
1177 * devices may expose capabilities for additional framebuffer
1178 * formats.
1179 *
1180 * The default GMRFB value is undefined. Drivers must always send
1181 * this command at least once before performing any blit from the
1182 * GMRFB.
1183 *
1184 * Availability:
1185 * SVGA_FIFO_CAP_SCREEN_OBJECT
1186 */
1187
1188typedef
1189struct {
1190 SVGAGuestPtr ptr;
1191 uint32 bytesPerLine;
1192 SVGAGMRImageFormat format;
1193} SVGAFifoCmdDefineGMRFB;
1194
1195
1196/*
1197 * SVGA_CMD_BLIT_GMRFB_TO_SCREEN --
1198 *
1199 * This is a guest-to-host blit. It performs a DMA operation to
1200 * copy a rectangular region of pixels from the current GMRFB to
1201 * one or more Screen Objects.
1202 *
1203 * The destination coordinate may be specified relative to a
1204 * screen's origin (if a screen ID is specified) or relative to the
1205 * virtual coordinate system's origin (if the screen ID is
1206 * SVGA_ID_INVALID). The actual destination may span zero or more
1207 * screens, in the case of a virtual destination rect or a rect
1208 * which extends off the edge of the specified screen.
1209 *
1210 * This command writes to the screen's "base layer": the underlying
1211 * framebuffer which exists below any cursor or video overlays. No
1212 * action is necessary to explicitly hide or update any overlays
1213 * which exist on top of the updated region.
1214 *
1215 * The SVGA device is guaranteed to finish reading from the GMRFB
1216 * by the time any subsequent FENCE commands are reached.
1217 *
1218 * This command consumes an annotation. See the
1219 * SVGA_CMD_ANNOTATION_* commands for details.
1220 *
1221 * Availability:
1222 * SVGA_FIFO_CAP_SCREEN_OBJECT
1223 */
1224
1225typedef
1226struct {
1227 SVGASignedPoint srcOrigin;
1228 SVGASignedRect destRect;
1229 uint32 destScreenId;
1230} SVGAFifoCmdBlitGMRFBToScreen;
1231
1232
1233/*
1234 * SVGA_CMD_BLIT_SCREEN_TO_GMRFB --
1235 *
1236 * This is a host-to-guest blit. It performs a DMA operation to
1237 * copy a rectangular region of pixels from a single Screen Object
1238 * back to the current GMRFB.
1239 *
1240 * Usage note: This command should be used rarely. It will
1241 * typically be inefficient, but it is necessary for some types of
1242 * synchronization between 3D (GPU) and 2D (CPU) rendering into
1243 * overlapping areas of a screen.
1244 *
1245 * The source coordinate is specified relative to a screen's
1246 * origin. The provided screen ID must be valid. If any parameters
1247 * are invalid, the resulting pixel values are undefined.
1248 *
1249 * This command reads the screen's "base layer". Overlays like
1250 * video and cursor are not included, but any data which was sent
1251 * using a blit-to-screen primitive will be available, no matter
1252 * whether the data's original source was the GMRFB or the 3D
1253 * acceleration hardware.
1254 *
1255 * Note that our guest-to-host blits and host-to-guest blits aren't
1256 * symmetric in their current implementation. While the parameters
1257 * are identical, host-to-guest blits are a lot less featureful.
1258 * They do not support clipping: If the source parameters don't
1259 * fully fit within a screen, the blit fails. They must originate
1260 * from exactly one screen. Virtual coordinates are not directly
1261 * supported.
1262 *
1263 * Host-to-guest blits do support the same set of GMRFB formats
1264 * offered by guest-to-host blits.
1265 *
1266 * The SVGA device is guaranteed to finish writing to the GMRFB by
1267 * the time any subsequent FENCE commands are reached.
1268 *
1269 * Availability:
1270 * SVGA_FIFO_CAP_SCREEN_OBJECT
1271 */
1272
1273typedef
1274struct {
1275 SVGASignedPoint destOrigin;
1276 SVGASignedRect srcRect;
1277 uint32 srcScreenId;
1278} SVGAFifoCmdBlitScreenToGMRFB;
1279
1280
1281/*
1282 * SVGA_CMD_ANNOTATION_FILL --
1283 *
1284 * This is a blit annotation. This command stores a small piece of
1285 * device state which is consumed by the next blit-to-screen
1286 * command. The state is only cleared by commands which are
1287 * specifically documented as consuming an annotation. Other
1288 * commands (such as ESCAPEs for debugging) may intervene between
1289 * the annotation and its associated blit.
1290 *
1291 * This annotation is a promise about the contents of the next
1292 * blit: The video driver is guaranteeing that all pixels in that
1293 * blit will have the same value, specified here as a color in
1294 * SVGAColorBGRX format.
1295 *
1296 * The SVGA device can still render the blit correctly even if it
1297 * ignores this annotation, but the annotation may allow it to
1298 * perform the blit more efficiently, for example by ignoring the
1299 * source data and performing a fill in hardware.
1300 *
1301 * This annotation is most important for performance when the
1302 * user's display is being remoted over a network connection.
1303 *
1304 * Availability:
1305 * SVGA_FIFO_CAP_SCREEN_OBJECT
1306 */
1307
1308typedef
1309struct {
1310 SVGAColorBGRX color;
1311} SVGAFifoCmdAnnotationFill;
1312
1313
1314/*
1315 * SVGA_CMD_ANNOTATION_COPY --
1316 *
1317 * This is a blit annotation. See SVGA_CMD_ANNOTATION_FILL for more
1318 * information about annotations.
1319 *
1320 * This annotation is a promise about the contents of the next
1321 * blit: The video driver is guaranteeing that all pixels in that
1322 * blit will have the same value as those which already exist at an
1323 * identically-sized region on the same or a different screen.
1324 *
1325 * Note that the source pixels for the COPY in this annotation are
1326 * sampled before applying the anqnotation's associated blit. They
1327 * are allowed to overlap with the blit's destination pixels.
1328 *
1329 * The copy source rectangle is specified the same way as the blit
1330 * destination: it can be a rectangle which spans zero or more
1331 * screens, specified relative to either a screen or to the virtual
1332 * coordinate system's origin. If the source rectangle includes
1333 * pixels which are not from exactly one screen, the results are
1334 * undefined.
1335 *
1336 * Availability:
1337 * SVGA_FIFO_CAP_SCREEN_OBJECT
1338 */
1339
1340typedef
1341struct {
1342 SVGASignedPoint srcOrigin;
1343 uint32 srcScreenId;
1344} SVGAFifoCmdAnnotationCopy;
1345
1346#endif
diff --git a/drivers/gpu/drm/vmwgfx/svga_types.h b/drivers/gpu/drm/vmwgfx/svga_types.h
new file mode 100644
index 000000000000..55836dedcfc2
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga_types.h
@@ -0,0 +1,45 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * Silly typedefs for the svga headers. Currently the headers are shared
30 * between all components that talk to svga. And as such the headers are
31 * are in a completely different style and use weird defines.
32 *
33 * This file lets all the ugly be prefixed with svga*.
34 */
35
36#ifndef _SVGA_TYPES_H_
37#define _SVGA_TYPES_H_
38
39typedef uint16_t uint16;
40typedef uint32_t uint32;
41typedef uint8_t uint8;
42typedef int32_t int32;
43typedef bool Bool;
44
45#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
new file mode 100644
index 000000000000..d6f2d2b882e9
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -0,0 +1,229 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h"
31
32static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
33 TTM_PL_FLAG_CACHED;
34
35static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
36 TTM_PL_FLAG_CACHED |
37 TTM_PL_FLAG_NO_EVICT;
38
39static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
40 TTM_PL_FLAG_CACHED;
41
42struct ttm_placement vmw_vram_placement = {
43 .fpfn = 0,
44 .lpfn = 0,
45 .num_placement = 1,
46 .placement = &vram_placement_flags,
47 .num_busy_placement = 1,
48 .busy_placement = &vram_placement_flags
49};
50
51struct ttm_placement vmw_vram_ne_placement = {
52 .fpfn = 0,
53 .lpfn = 0,
54 .num_placement = 1,
55 .placement = &vram_ne_placement_flags,
56 .num_busy_placement = 1,
57 .busy_placement = &vram_ne_placement_flags
58};
59
60struct ttm_placement vmw_sys_placement = {
61 .fpfn = 0,
62 .lpfn = 0,
63 .num_placement = 1,
64 .placement = &sys_placement_flags,
65 .num_busy_placement = 1,
66 .busy_placement = &sys_placement_flags
67};
68
69struct vmw_ttm_backend {
70 struct ttm_backend backend;
71};
72
73static int vmw_ttm_populate(struct ttm_backend *backend,
74 unsigned long num_pages, struct page **pages,
75 struct page *dummy_read_page)
76{
77 return 0;
78}
79
80static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
81{
82 return 0;
83}
84
85static int vmw_ttm_unbind(struct ttm_backend *backend)
86{
87 return 0;
88}
89
90static void vmw_ttm_clear(struct ttm_backend *backend)
91{
92}
93
94static void vmw_ttm_destroy(struct ttm_backend *backend)
95{
96 struct vmw_ttm_backend *vmw_be =
97 container_of(backend, struct vmw_ttm_backend, backend);
98
99 kfree(vmw_be);
100}
101
102static struct ttm_backend_func vmw_ttm_func = {
103 .populate = vmw_ttm_populate,
104 .clear = vmw_ttm_clear,
105 .bind = vmw_ttm_bind,
106 .unbind = vmw_ttm_unbind,
107 .destroy = vmw_ttm_destroy,
108};
109
110struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
111{
112 struct vmw_ttm_backend *vmw_be;
113
114 vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
115 if (!vmw_be)
116 return NULL;
117
118 vmw_be->backend.func = &vmw_ttm_func;
119
120 return &vmw_be->backend;
121}
122
123int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
124{
125 return 0;
126}
127
128int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
129 struct ttm_mem_type_manager *man)
130{
131 struct vmw_private *dev_priv =
132 container_of(bdev, struct vmw_private, bdev);
133
134 switch (type) {
135 case TTM_PL_SYSTEM:
136 /* System memory */
137
138 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
139 man->available_caching = TTM_PL_MASK_CACHING;
140 man->default_caching = TTM_PL_FLAG_CACHED;
141 break;
142 case TTM_PL_VRAM:
143 /* "On-card" video ram */
144 man->gpu_offset = 0;
145 man->io_offset = dev_priv->vram_start;
146 man->io_size = dev_priv->vram_size;
147 man->flags = TTM_MEMTYPE_FLAG_FIXED |
148 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
149 man->io_addr = NULL;
150 man->available_caching = TTM_PL_MASK_CACHING;
151 man->default_caching = TTM_PL_FLAG_WC;
152 break;
153 default:
154 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
155 return -EINVAL;
156 }
157 return 0;
158}
159
160void vmw_evict_flags(struct ttm_buffer_object *bo,
161 struct ttm_placement *placement)
162{
163 *placement = vmw_sys_placement;
164}
165
166/**
167 * FIXME: Proper access checks on buffers.
168 */
169
170static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
171{
172 return 0;
173}
174
175/**
176 * FIXME: We're using the old vmware polling method to sync.
177 * Do this with fences instead.
178 */
179
180static void *vmw_sync_obj_ref(void *sync_obj)
181{
182 return sync_obj;
183}
184
185static void vmw_sync_obj_unref(void **sync_obj)
186{
187 *sync_obj = NULL;
188}
189
190static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
191{
192 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
193
194 mutex_lock(&dev_priv->hw_mutex);
195 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
196 mutex_unlock(&dev_priv->hw_mutex);
197 return 0;
198}
199
200static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
201{
202 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
203 uint32_t sequence = (unsigned long) sync_obj;
204
205 return vmw_fence_signaled(dev_priv, sequence);
206}
207
208static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
209 bool lazy, bool interruptible)
210{
211 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
212 uint32_t sequence = (unsigned long) sync_obj;
213
214 return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
215}
216
217struct ttm_bo_driver vmw_bo_driver = {
218 .create_ttm_backend_entry = vmw_ttm_backend_init,
219 .invalidate_caches = vmw_invalidate_caches,
220 .init_mem_type = vmw_init_mem_type,
221 .evict_flags = vmw_evict_flags,
222 .move = NULL,
223 .verify_access = vmw_verify_access,
224 .sync_obj_signaled = vmw_sync_obj_signaled,
225 .sync_obj_wait = vmw_sync_obj_wait,
226 .sync_obj_flush = vmw_sync_obj_flush,
227 .sync_obj_unref = vmw_sync_obj_unref,
228 .sync_obj_ref = vmw_sync_obj_ref
229};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
new file mode 100644
index 000000000000..1db1ef30be2b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -0,0 +1,726 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30#include "ttm/ttm_placement.h"
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_object.h"
33#include "ttm/ttm_module.h"
34
35#define VMWGFX_DRIVER_NAME "vmwgfx"
36#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
37#define VMWGFX_CHIP_SVGAII 0
38#define VMW_FB_RESERVATION 0
39
40/**
41 * Fully encoded drm commands. Might move to vmw_drm.h
42 */
43
44#define DRM_IOCTL_VMW_GET_PARAM \
45 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
46 struct drm_vmw_getparam_arg)
47#define DRM_IOCTL_VMW_ALLOC_DMABUF \
48 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
49 union drm_vmw_alloc_dmabuf_arg)
50#define DRM_IOCTL_VMW_UNREF_DMABUF \
51 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
52 struct drm_vmw_unref_dmabuf_arg)
53#define DRM_IOCTL_VMW_CURSOR_BYPASS \
54 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
55 struct drm_vmw_cursor_bypass_arg)
56
57#define DRM_IOCTL_VMW_CONTROL_STREAM \
58 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
59 struct drm_vmw_control_stream_arg)
60#define DRM_IOCTL_VMW_CLAIM_STREAM \
61 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
62 struct drm_vmw_stream_arg)
63#define DRM_IOCTL_VMW_UNREF_STREAM \
64 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
65 struct drm_vmw_stream_arg)
66
67#define DRM_IOCTL_VMW_CREATE_CONTEXT \
68 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
69 struct drm_vmw_context_arg)
70#define DRM_IOCTL_VMW_UNREF_CONTEXT \
71 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
72 struct drm_vmw_context_arg)
73#define DRM_IOCTL_VMW_CREATE_SURFACE \
74 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
75 union drm_vmw_surface_create_arg)
76#define DRM_IOCTL_VMW_UNREF_SURFACE \
77 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
78 struct drm_vmw_surface_arg)
79#define DRM_IOCTL_VMW_REF_SURFACE \
80 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
81 union drm_vmw_surface_reference_arg)
82#define DRM_IOCTL_VMW_EXECBUF \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
84 struct drm_vmw_execbuf_arg)
85#define DRM_IOCTL_VMW_FIFO_DEBUG \
86 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \
87 struct drm_vmw_fifo_debug_arg)
88#define DRM_IOCTL_VMW_FENCE_WAIT \
89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
90 struct drm_vmw_fence_wait_arg)
91
92
93/**
94 * The core DRM version of this macro doesn't account for
95 * DRM_COMMAND_BASE.
96 */
97
98#define VMW_IOCTL_DEF(ioctl, func, flags) \
99 [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
100
101/**
102 * Ioctl definitions.
103 */
104
105static struct drm_ioctl_desc vmw_ioctls[] = {
106 VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
107 DRM_AUTH | DRM_UNLOCKED),
108 VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
109 DRM_AUTH | DRM_UNLOCKED),
110 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
111 DRM_AUTH | DRM_UNLOCKED),
112 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
113 vmw_kms_cursor_bypass_ioctl,
114 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
115
116 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
117 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
118 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
119 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
120 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
121 DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
122
123 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
124 DRM_AUTH | DRM_UNLOCKED),
125 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
126 DRM_AUTH | DRM_UNLOCKED),
127 VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
128 DRM_AUTH | DRM_UNLOCKED),
129 VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
130 DRM_AUTH | DRM_UNLOCKED),
131 VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
132 DRM_AUTH | DRM_UNLOCKED),
133 VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
134 DRM_AUTH | DRM_UNLOCKED),
135 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
136 DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
137 VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
138 DRM_AUTH | DRM_UNLOCKED)
139};
140
141static struct pci_device_id vmw_pci_id_list[] = {
142 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
143 {0, 0, 0}
144};
145
146static char *vmw_devname = "vmwgfx";
147
148static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
149static void vmw_master_init(struct vmw_master *);
150
151static void vmw_print_capabilities(uint32_t capabilities)
152{
153 DRM_INFO("Capabilities:\n");
154 if (capabilities & SVGA_CAP_RECT_COPY)
155 DRM_INFO(" Rect copy.\n");
156 if (capabilities & SVGA_CAP_CURSOR)
157 DRM_INFO(" Cursor.\n");
158 if (capabilities & SVGA_CAP_CURSOR_BYPASS)
159 DRM_INFO(" Cursor bypass.\n");
160 if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
161 DRM_INFO(" Cursor bypass 2.\n");
162 if (capabilities & SVGA_CAP_8BIT_EMULATION)
163 DRM_INFO(" 8bit emulation.\n");
164 if (capabilities & SVGA_CAP_ALPHA_CURSOR)
165 DRM_INFO(" Alpha cursor.\n");
166 if (capabilities & SVGA_CAP_3D)
167 DRM_INFO(" 3D.\n");
168 if (capabilities & SVGA_CAP_EXTENDED_FIFO)
169 DRM_INFO(" Extended Fifo.\n");
170 if (capabilities & SVGA_CAP_MULTIMON)
171 DRM_INFO(" Multimon.\n");
172 if (capabilities & SVGA_CAP_PITCHLOCK)
173 DRM_INFO(" Pitchlock.\n");
174 if (capabilities & SVGA_CAP_IRQMASK)
175 DRM_INFO(" Irq mask.\n");
176 if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
177 DRM_INFO(" Display Topology.\n");
178 if (capabilities & SVGA_CAP_GMR)
179 DRM_INFO(" GMR.\n");
180 if (capabilities & SVGA_CAP_TRACES)
181 DRM_INFO(" Traces.\n");
182}
183
184static int vmw_request_device(struct vmw_private *dev_priv)
185{
186 int ret;
187
188 vmw_kms_save_vga(dev_priv);
189
190 ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
191 if (unlikely(ret != 0)) {
192 DRM_ERROR("Unable to initialize FIFO.\n");
193 return ret;
194 }
195
196 return 0;
197}
198
199static void vmw_release_device(struct vmw_private *dev_priv)
200{
201 vmw_fifo_release(dev_priv, &dev_priv->fifo);
202 vmw_kms_restore_vga(dev_priv);
203}
204
205
206static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
207{
208 struct vmw_private *dev_priv;
209 int ret;
210
211 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
212 if (unlikely(dev_priv == NULL)) {
213 DRM_ERROR("Failed allocating a device private struct.\n");
214 return -ENOMEM;
215 }
216 memset(dev_priv, 0, sizeof(*dev_priv));
217
218 dev_priv->dev = dev;
219 dev_priv->vmw_chipset = chipset;
220 mutex_init(&dev_priv->hw_mutex);
221 mutex_init(&dev_priv->cmdbuf_mutex);
222 rwlock_init(&dev_priv->resource_lock);
223 idr_init(&dev_priv->context_idr);
224 idr_init(&dev_priv->surface_idr);
225 idr_init(&dev_priv->stream_idr);
226 ida_init(&dev_priv->gmr_ida);
227 mutex_init(&dev_priv->init_mutex);
228 init_waitqueue_head(&dev_priv->fence_queue);
229 init_waitqueue_head(&dev_priv->fifo_queue);
230 atomic_set(&dev_priv->fence_queue_waiters, 0);
231 atomic_set(&dev_priv->fifo_queue_waiters, 0);
232 INIT_LIST_HEAD(&dev_priv->gmr_lru);
233
234 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
235 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
236 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
237
238 mutex_lock(&dev_priv->hw_mutex);
239 dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
240
241 if (dev_priv->capabilities & SVGA_CAP_GMR) {
242 dev_priv->max_gmr_descriptors =
243 vmw_read(dev_priv,
244 SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
245 dev_priv->max_gmr_ids =
246 vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
247 }
248
249 dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
250 dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
251 dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
252 dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
253
254 mutex_unlock(&dev_priv->hw_mutex);
255
256 vmw_print_capabilities(dev_priv->capabilities);
257
258 if (dev_priv->capabilities & SVGA_CAP_GMR) {
259 DRM_INFO("Max GMR ids is %u\n",
260 (unsigned)dev_priv->max_gmr_ids);
261 DRM_INFO("Max GMR descriptors is %u\n",
262 (unsigned)dev_priv->max_gmr_descriptors);
263 }
264 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
265 dev_priv->vram_start, dev_priv->vram_size / 1024);
266 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
267 dev_priv->mmio_start, dev_priv->mmio_size / 1024);
268
269 ret = vmw_ttm_global_init(dev_priv);
270 if (unlikely(ret != 0))
271 goto out_err0;
272
273
274 vmw_master_init(&dev_priv->fbdev_master);
275 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
276 dev_priv->active_master = &dev_priv->fbdev_master;
277
278
279 ret = ttm_bo_device_init(&dev_priv->bdev,
280 dev_priv->bo_global_ref.ref.object,
281 &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
282 false);
283 if (unlikely(ret != 0)) {
284 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
285 goto out_err1;
286 }
287
288 ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
289 (dev_priv->vram_size >> PAGE_SHIFT));
290 if (unlikely(ret != 0)) {
291 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
292 goto out_err2;
293 }
294
295 dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
296 dev_priv->mmio_size, DRM_MTRR_WC);
297
298 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
299 dev_priv->mmio_size);
300
301 if (unlikely(dev_priv->mmio_virt == NULL)) {
302 ret = -ENOMEM;
303 DRM_ERROR("Failed mapping MMIO.\n");
304 goto out_err3;
305 }
306
307 dev_priv->tdev = ttm_object_device_init
308 (dev_priv->mem_global_ref.object, 12);
309
310 if (unlikely(dev_priv->tdev == NULL)) {
311 DRM_ERROR("Unable to initialize TTM object management.\n");
312 ret = -ENOMEM;
313 goto out_err4;
314 }
315
316 dev->dev_private = dev_priv;
317
318 if (!dev->devname)
319 dev->devname = vmw_devname;
320
321 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
322 ret = drm_irq_install(dev);
323 if (unlikely(ret != 0)) {
324 DRM_ERROR("Failed installing irq: %d\n", ret);
325 goto out_no_irq;
326 }
327 }
328
329 ret = pci_request_regions(dev->pdev, "vmwgfx probe");
330 dev_priv->stealth = (ret != 0);
331 if (dev_priv->stealth) {
332 /**
333 * Request at least the mmio PCI resource.
334 */
335
336 DRM_INFO("It appears like vesafb is loaded. "
337 "Ignore above error if any. Entering stealth mode.\n");
338 ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
339 if (unlikely(ret != 0)) {
340 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
341 goto out_no_device;
342 }
343 vmw_kms_init(dev_priv);
344 vmw_overlay_init(dev_priv);
345 } else {
346 ret = vmw_request_device(dev_priv);
347 if (unlikely(ret != 0))
348 goto out_no_device;
349 vmw_kms_init(dev_priv);
350 vmw_overlay_init(dev_priv);
351 vmw_fb_init(dev_priv);
352 }
353
354 return 0;
355
356out_no_device:
357 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
358 drm_irq_uninstall(dev_priv->dev);
359 if (dev->devname == vmw_devname)
360 dev->devname = NULL;
361out_no_irq:
362 ttm_object_device_release(&dev_priv->tdev);
363out_err4:
364 iounmap(dev_priv->mmio_virt);
365out_err3:
366 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
367 dev_priv->mmio_size, DRM_MTRR_WC);
368 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
369out_err2:
370 (void)ttm_bo_device_release(&dev_priv->bdev);
371out_err1:
372 vmw_ttm_global_release(dev_priv);
373out_err0:
374 ida_destroy(&dev_priv->gmr_ida);
375 idr_destroy(&dev_priv->surface_idr);
376 idr_destroy(&dev_priv->context_idr);
377 idr_destroy(&dev_priv->stream_idr);
378 kfree(dev_priv);
379 return ret;
380}
381
382static int vmw_driver_unload(struct drm_device *dev)
383{
384 struct vmw_private *dev_priv = vmw_priv(dev);
385
386 DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n");
387
388 if (!dev_priv->stealth) {
389 vmw_fb_close(dev_priv);
390 vmw_kms_close(dev_priv);
391 vmw_overlay_close(dev_priv);
392 vmw_release_device(dev_priv);
393 pci_release_regions(dev->pdev);
394 } else {
395 vmw_kms_close(dev_priv);
396 vmw_overlay_close(dev_priv);
397 pci_release_region(dev->pdev, 2);
398 }
399 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
400 drm_irq_uninstall(dev_priv->dev);
401 if (dev->devname == vmw_devname)
402 dev->devname = NULL;
403 ttm_object_device_release(&dev_priv->tdev);
404 iounmap(dev_priv->mmio_virt);
405 drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
406 dev_priv->mmio_size, DRM_MTRR_WC);
407 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
408 (void)ttm_bo_device_release(&dev_priv->bdev);
409 vmw_ttm_global_release(dev_priv);
410 ida_destroy(&dev_priv->gmr_ida);
411 idr_destroy(&dev_priv->surface_idr);
412 idr_destroy(&dev_priv->context_idr);
413 idr_destroy(&dev_priv->stream_idr);
414
415 kfree(dev_priv);
416
417 return 0;
418}
419
420static void vmw_postclose(struct drm_device *dev,
421 struct drm_file *file_priv)
422{
423 struct vmw_fpriv *vmw_fp;
424
425 vmw_fp = vmw_fpriv(file_priv);
426 ttm_object_file_release(&vmw_fp->tfile);
427 if (vmw_fp->locked_master)
428 drm_master_put(&vmw_fp->locked_master);
429 kfree(vmw_fp);
430}
431
432static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
433{
434 struct vmw_private *dev_priv = vmw_priv(dev);
435 struct vmw_fpriv *vmw_fp;
436 int ret = -ENOMEM;
437
438 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
439 if (unlikely(vmw_fp == NULL))
440 return ret;
441
442 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
443 if (unlikely(vmw_fp->tfile == NULL))
444 goto out_no_tfile;
445
446 file_priv->driver_priv = vmw_fp;
447
448 if (unlikely(dev_priv->bdev.dev_mapping == NULL))
449 dev_priv->bdev.dev_mapping =
450 file_priv->filp->f_path.dentry->d_inode->i_mapping;
451
452 return 0;
453
454out_no_tfile:
455 kfree(vmw_fp);
456 return ret;
457}
458
459static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
460 unsigned long arg)
461{
462 struct drm_file *file_priv = filp->private_data;
463 struct drm_device *dev = file_priv->minor->dev;
464 unsigned int nr = DRM_IOCTL_NR(cmd);
465
466 /*
467 * Do extra checking on driver private ioctls.
468 */
469
470 if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
471 && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
472 struct drm_ioctl_desc *ioctl =
473 &vmw_ioctls[nr - DRM_COMMAND_BASE];
474
475 if (unlikely(ioctl->cmd != cmd)) {
476 DRM_ERROR("Invalid command format, ioctl %d\n",
477 nr - DRM_COMMAND_BASE);
478 return -EINVAL;
479 }
480 }
481
482 return drm_ioctl(filp, cmd, arg);
483}
484
485static int vmw_firstopen(struct drm_device *dev)
486{
487 struct vmw_private *dev_priv = vmw_priv(dev);
488 dev_priv->is_opened = true;
489
490 return 0;
491}
492
493static void vmw_lastclose(struct drm_device *dev)
494{
495 struct vmw_private *dev_priv = vmw_priv(dev);
496 struct drm_crtc *crtc;
497 struct drm_mode_set set;
498 int ret;
499
500 /**
501 * Do nothing on the lastclose call from drm_unload.
502 */
503
504 if (!dev_priv->is_opened)
505 return;
506
507 dev_priv->is_opened = false;
508 set.x = 0;
509 set.y = 0;
510 set.fb = NULL;
511 set.mode = NULL;
512 set.connectors = NULL;
513 set.num_connectors = 0;
514
515 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
516 set.crtc = crtc;
517 ret = crtc->funcs->set_config(&set);
518 WARN_ON(ret != 0);
519 }
520
521}
522
523static void vmw_master_init(struct vmw_master *vmaster)
524{
525 ttm_lock_init(&vmaster->lock);
526}
527
528static int vmw_master_create(struct drm_device *dev,
529 struct drm_master *master)
530{
531 struct vmw_master *vmaster;
532
533 DRM_INFO("Master create.\n");
534 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
535 if (unlikely(vmaster == NULL))
536 return -ENOMEM;
537
538 ttm_lock_init(&vmaster->lock);
539 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
540 master->driver_priv = vmaster;
541
542 return 0;
543}
544
545static void vmw_master_destroy(struct drm_device *dev,
546 struct drm_master *master)
547{
548 struct vmw_master *vmaster = vmw_master(master);
549
550 DRM_INFO("Master destroy.\n");
551 master->driver_priv = NULL;
552 kfree(vmaster);
553}
554
555
556static int vmw_master_set(struct drm_device *dev,
557 struct drm_file *file_priv,
558 bool from_open)
559{
560 struct vmw_private *dev_priv = vmw_priv(dev);
561 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
562 struct vmw_master *active = dev_priv->active_master;
563 struct vmw_master *vmaster = vmw_master(file_priv->master);
564 int ret = 0;
565
566 DRM_INFO("Master set.\n");
567 if (dev_priv->stealth) {
568 ret = vmw_request_device(dev_priv);
569 if (unlikely(ret != 0))
570 return ret;
571 }
572
573 if (active) {
574 BUG_ON(active != &dev_priv->fbdev_master);
575 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
576 if (unlikely(ret != 0))
577 goto out_no_active_lock;
578
579 ttm_lock_set_kill(&active->lock, true, SIGTERM);
580 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
581 if (unlikely(ret != 0)) {
582 DRM_ERROR("Unable to clean VRAM on "
583 "master drop.\n");
584 }
585
586 dev_priv->active_master = NULL;
587 }
588
589 ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
590 if (!from_open) {
591 ttm_vt_unlock(&vmaster->lock);
592 BUG_ON(vmw_fp->locked_master != file_priv->master);
593 drm_master_put(&vmw_fp->locked_master);
594 }
595
596 dev_priv->active_master = vmaster;
597
598 return 0;
599
600out_no_active_lock:
601 vmw_release_device(dev_priv);
602 return ret;
603}
604
605static void vmw_master_drop(struct drm_device *dev,
606 struct drm_file *file_priv,
607 bool from_release)
608{
609 struct vmw_private *dev_priv = vmw_priv(dev);
610 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
611 struct vmw_master *vmaster = vmw_master(file_priv->master);
612 int ret;
613
614 DRM_INFO("Master drop.\n");
615
616 /**
617 * Make sure the master doesn't disappear while we have
618 * it locked.
619 */
620
621 vmw_fp->locked_master = drm_master_get(file_priv->master);
622 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
623
624 if (unlikely((ret != 0))) {
625 DRM_ERROR("Unable to lock TTM at VT switch.\n");
626 drm_master_put(&vmw_fp->locked_master);
627 }
628
629 ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
630
631 if (dev_priv->stealth) {
632 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
633 if (unlikely(ret != 0))
634 DRM_ERROR("Unable to clean VRAM on master drop.\n");
635 vmw_release_device(dev_priv);
636 }
637 dev_priv->active_master = &dev_priv->fbdev_master;
638 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
639 ttm_vt_unlock(&dev_priv->fbdev_master.lock);
640
641 if (!dev_priv->stealth)
642 vmw_fb_on(dev_priv);
643}
644
645
646static void vmw_remove(struct pci_dev *pdev)
647{
648 struct drm_device *dev = pci_get_drvdata(pdev);
649
650 drm_put_dev(dev);
651}
652
653static struct drm_driver driver = {
654 .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
655 DRIVER_MODESET,
656 .load = vmw_driver_load,
657 .unload = vmw_driver_unload,
658 .firstopen = vmw_firstopen,
659 .lastclose = vmw_lastclose,
660 .irq_preinstall = vmw_irq_preinstall,
661 .irq_postinstall = vmw_irq_postinstall,
662 .irq_uninstall = vmw_irq_uninstall,
663 .irq_handler = vmw_irq_handler,
664 .reclaim_buffers_locked = NULL,
665 .get_map_ofs = drm_core_get_map_ofs,
666 .get_reg_ofs = drm_core_get_reg_ofs,
667 .ioctls = vmw_ioctls,
668 .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
669 .dma_quiescent = NULL, /*vmw_dma_quiescent, */
670 .master_create = vmw_master_create,
671 .master_destroy = vmw_master_destroy,
672 .master_set = vmw_master_set,
673 .master_drop = vmw_master_drop,
674 .open = vmw_driver_open,
675 .postclose = vmw_postclose,
676 .fops = {
677 .owner = THIS_MODULE,
678 .open = drm_open,
679 .release = drm_release,
680 .unlocked_ioctl = vmw_unlocked_ioctl,
681 .mmap = vmw_mmap,
682 .poll = drm_poll,
683 .fasync = drm_fasync,
684#if defined(CONFIG_COMPAT)
685 .compat_ioctl = drm_compat_ioctl,
686#endif
687 },
688 .pci_driver = {
689 .name = VMWGFX_DRIVER_NAME,
690 .id_table = vmw_pci_id_list,
691 .probe = vmw_probe,
692 .remove = vmw_remove
693 },
694 .name = VMWGFX_DRIVER_NAME,
695 .desc = VMWGFX_DRIVER_DESC,
696 .date = VMWGFX_DRIVER_DATE,
697 .major = VMWGFX_DRIVER_MAJOR,
698 .minor = VMWGFX_DRIVER_MINOR,
699 .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
700};
701
702static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
703{
704 return drm_get_dev(pdev, ent, &driver);
705}
706
707static int __init vmwgfx_init(void)
708{
709 int ret;
710 ret = drm_init(&driver);
711 if (ret)
712 DRM_ERROR("Failed initializing DRM.\n");
713 return ret;
714}
715
716static void __exit vmwgfx_exit(void)
717{
718 drm_exit(&driver);
719}
720
721module_init(vmwgfx_init);
722module_exit(vmwgfx_exit);
723
724MODULE_AUTHOR("VMware Inc. and others");
725MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
726MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
new file mode 100644
index 000000000000..e61bd85b6975
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -0,0 +1,513 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_DRV_H_
29#define _VMWGFX_DRV_H_
30
31#include "vmwgfx_reg.h"
32#include "drmP.h"
33#include "vmwgfx_drm.h"
34#include "drm_hashtab.h"
35#include "ttm/ttm_bo_driver.h"
36#include "ttm/ttm_object.h"
37#include "ttm/ttm_lock.h"
38#include "ttm/ttm_execbuf_util.h"
39#include "ttm/ttm_module.h"
40
41#define VMWGFX_DRIVER_DATE "20090724"
42#define VMWGFX_DRIVER_MAJOR 0
43#define VMWGFX_DRIVER_MINOR 1
44#define VMWGFX_DRIVER_PATCHLEVEL 2
45#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
46#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
47#define VMWGFX_MAX_RELOCATIONS 2048
48#define VMWGFX_MAX_GMRS 2048
49
50struct vmw_fpriv {
51 struct drm_master *locked_master;
52 struct ttm_object_file *tfile;
53};
54
55struct vmw_dma_buffer {
56 struct ttm_buffer_object base;
57 struct list_head validate_list;
58 struct list_head gmr_lru;
59 uint32_t gmr_id;
60 bool gmr_bound;
61 uint32_t cur_validate_node;
62 bool on_validate_list;
63};
64
65struct vmw_resource {
66 struct kref kref;
67 struct vmw_private *dev_priv;
68 struct idr *idr;
69 int id;
70 enum ttm_object_type res_type;
71 bool avail;
72 void (*hw_destroy) (struct vmw_resource *res);
73 void (*res_free) (struct vmw_resource *res);
74
75 /* TODO is a generic snooper needed? */
76#if 0
77 void (*snoop)(struct vmw_resource *res,
78 struct ttm_object_file *tfile,
79 SVGA3dCmdHeader *header);
80 void *snoop_priv;
81#endif
82};
83
84struct vmw_cursor_snooper {
85 struct drm_crtc *crtc;
86 size_t age;
87 uint32_t *image;
88};
89
90struct vmw_surface {
91 struct vmw_resource res;
92 uint32_t flags;
93 uint32_t format;
94 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
95 struct drm_vmw_size *sizes;
96 uint32_t num_sizes;
97
98 /* TODO so far just a extra pointer */
99 struct vmw_cursor_snooper snooper;
100};
101
102struct vmw_fifo_state {
103 unsigned long reserved_size;
104 __le32 *dynamic_buffer;
105 __le32 *static_buffer;
106 __le32 *last_buffer;
107 uint32_t last_data_size;
108 uint32_t last_buffer_size;
109 bool last_buffer_add;
110 unsigned long static_buffer_size;
111 bool using_bounce_buffer;
112 uint32_t capabilities;
113 struct rw_semaphore rwsem;
114};
115
116struct vmw_relocation {
117 SVGAGuestPtr *location;
118 uint32_t index;
119};
120
121struct vmw_sw_context{
122 struct ida bo_list;
123 uint32_t last_cid;
124 bool cid_valid;
125 uint32_t last_sid;
126 uint32_t sid_translation;
127 bool sid_valid;
128 struct ttm_object_file *tfile;
129 struct list_head validate_nodes;
130 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
131 uint32_t cur_reloc;
132 struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
133 uint32_t cur_val_buf;
134};
135
136struct vmw_legacy_display;
137struct vmw_overlay;
138
139struct vmw_master {
140 struct ttm_lock lock;
141};
142
143struct vmw_private {
144 struct ttm_bo_device bdev;
145 struct ttm_bo_global_ref bo_global_ref;
146 struct ttm_global_reference mem_global_ref;
147
148 struct vmw_fifo_state fifo;
149
150 struct drm_device *dev;
151 unsigned long vmw_chipset;
152 unsigned int io_start;
153 uint32_t vram_start;
154 uint32_t vram_size;
155 uint32_t mmio_start;
156 uint32_t mmio_size;
157 uint32_t fb_max_width;
158 uint32_t fb_max_height;
159 __le32 __iomem *mmio_virt;
160 int mmio_mtrr;
161 uint32_t capabilities;
162 uint32_t max_gmr_descriptors;
163 uint32_t max_gmr_ids;
164 struct mutex hw_mutex;
165
166 /*
167 * VGA registers.
168 */
169
170 uint32_t vga_width;
171 uint32_t vga_height;
172 uint32_t vga_depth;
173 uint32_t vga_bpp;
174 uint32_t vga_pseudo;
175 uint32_t vga_red_mask;
176 uint32_t vga_blue_mask;
177 uint32_t vga_green_mask;
178
179 /*
180 * Framebuffer info.
181 */
182
183 void *fb_info;
184 struct vmw_legacy_display *ldu_priv;
185 struct vmw_overlay *overlay_priv;
186
187 /*
188 * Context and surface management.
189 */
190
191 rwlock_t resource_lock;
192 struct idr context_idr;
193 struct idr surface_idr;
194 struct idr stream_idr;
195
196 /*
197 * Block lastclose from racing with firstopen.
198 */
199
200 struct mutex init_mutex;
201
202 /*
203 * A resource manager for kernel-only surfaces and
204 * contexts.
205 */
206
207 struct ttm_object_device *tdev;
208
209 /*
210 * Fencing and IRQs.
211 */
212
213 uint32_t fence_seq;
214 wait_queue_head_t fence_queue;
215 wait_queue_head_t fifo_queue;
216 atomic_t fence_queue_waiters;
217 atomic_t fifo_queue_waiters;
218 uint32_t last_read_sequence;
219 spinlock_t irq_lock;
220
221 /*
222 * Device state
223 */
224
225 uint32_t traces_state;
226 uint32_t enable_state;
227 uint32_t config_done_state;
228
229 /**
230 * Execbuf
231 */
232 /**
233 * Protected by the cmdbuf mutex.
234 */
235
236 struct vmw_sw_context ctx;
237 uint32_t val_seq;
238 struct mutex cmdbuf_mutex;
239
240 /**
241 * GMR management. Protected by the lru spinlock.
242 */
243
244 struct ida gmr_ida;
245 struct list_head gmr_lru;
246
247
248 /**
249 * Operating mode.
250 */
251
252 bool stealth;
253 bool is_opened;
254
255 /**
256 * Master management.
257 */
258
259 struct vmw_master *active_master;
260 struct vmw_master fbdev_master;
261};
262
263static inline struct vmw_private *vmw_priv(struct drm_device *dev)
264{
265 return (struct vmw_private *)dev->dev_private;
266}
267
268static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
269{
270 return (struct vmw_fpriv *)file_priv->driver_priv;
271}
272
273static inline struct vmw_master *vmw_master(struct drm_master *master)
274{
275 return (struct vmw_master *) master->driver_priv;
276}
277
278static inline void vmw_write(struct vmw_private *dev_priv,
279 unsigned int offset, uint32_t value)
280{
281 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
282 outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
283}
284
285static inline uint32_t vmw_read(struct vmw_private *dev_priv,
286 unsigned int offset)
287{
288 uint32_t val;
289
290 outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
291 val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
292 return val;
293}
294
295/**
296 * GMR utilities - vmwgfx_gmr.c
297 */
298
299extern int vmw_gmr_bind(struct vmw_private *dev_priv,
300 struct ttm_buffer_object *bo);
301extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
302
303/**
304 * Resource utilities - vmwgfx_resource.c
305 */
306
307extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
308extern void vmw_resource_unreference(struct vmw_resource **p_res);
309extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
310extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
311 struct drm_file *file_priv);
312extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
313 struct drm_file *file_priv);
314extern int vmw_context_check(struct vmw_private *dev_priv,
315 struct ttm_object_file *tfile,
316 int id);
317extern void vmw_surface_res_free(struct vmw_resource *res);
318extern int vmw_surface_init(struct vmw_private *dev_priv,
319 struct vmw_surface *srf,
320 void (*res_free) (struct vmw_resource *res));
321extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
322 struct ttm_object_file *tfile,
323 uint32_t handle,
324 struct vmw_surface **out);
325extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv);
327extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
328 struct drm_file *file_priv);
329extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
330 struct drm_file *file_priv);
331extern int vmw_surface_check(struct vmw_private *dev_priv,
332 struct ttm_object_file *tfile,
333 uint32_t handle, int *id);
334extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
335extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
336 struct vmw_dma_buffer *vmw_bo,
337 size_t size, struct ttm_placement *placement,
338 bool interuptable,
339 void (*bo_free) (struct ttm_buffer_object *bo));
340extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
341 struct drm_file *file_priv);
342extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
343 struct drm_file *file_priv);
344extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
345 uint32_t cur_validate_node);
346extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
347extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
348 uint32_t id, struct vmw_dma_buffer **out);
349extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
350extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
351extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
352extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
353 struct vmw_dma_buffer *bo);
354extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
355 struct vmw_dma_buffer *bo);
356extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
357 struct drm_file *file_priv);
358extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
359 struct drm_file *file_priv);
360extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
361 struct ttm_object_file *tfile,
362 uint32_t *inout_id,
363 struct vmw_resource **out);
364
365
366/**
367 * Misc Ioctl functionality - vmwgfx_ioctl.c
368 */
369
370extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
371 struct drm_file *file_priv);
372extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
373 struct drm_file *file_priv);
374
375/**
376 * Fifo utilities - vmwgfx_fifo.c
377 */
378
379extern int vmw_fifo_init(struct vmw_private *dev_priv,
380 struct vmw_fifo_state *fifo);
381extern void vmw_fifo_release(struct vmw_private *dev_priv,
382 struct vmw_fifo_state *fifo);
383extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
384extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
385extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
386 uint32_t *sequence);
387extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
388extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
389
390/**
391 * TTM glue - vmwgfx_ttm_glue.c
392 */
393
394extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
395extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
396extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
397
398/**
399 * TTM buffer object driver - vmwgfx_buffer.c
400 */
401
402extern struct ttm_placement vmw_vram_placement;
403extern struct ttm_placement vmw_vram_ne_placement;
404extern struct ttm_placement vmw_sys_placement;
405extern struct ttm_bo_driver vmw_bo_driver;
406extern int vmw_dma_quiescent(struct drm_device *dev);
407
408/**
409 * Command submission - vmwgfx_execbuf.c
410 */
411
412extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
413 struct drm_file *file_priv);
414
415/**
416 * IRQs and wating - vmwgfx_irq.c
417 */
418
419extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
420extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
421 uint32_t sequence, bool interruptible,
422 unsigned long timeout);
423extern void vmw_irq_preinstall(struct drm_device *dev);
424extern int vmw_irq_postinstall(struct drm_device *dev);
425extern void vmw_irq_uninstall(struct drm_device *dev);
426extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
427 uint32_t sequence);
428extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *file_priv);
430extern int vmw_fallback_wait(struct vmw_private *dev_priv,
431 bool lazy,
432 bool fifo_idle,
433 uint32_t sequence,
434 bool interruptible,
435 unsigned long timeout);
436
437/**
438 * Kernel framebuffer - vmwgfx_fb.c
439 */
440
441int vmw_fb_init(struct vmw_private *vmw_priv);
442int vmw_fb_close(struct vmw_private *dev_priv);
443int vmw_fb_off(struct vmw_private *vmw_priv);
444int vmw_fb_on(struct vmw_private *vmw_priv);
445
446/**
447 * Kernel modesetting - vmwgfx_kms.c
448 */
449
450int vmw_kms_init(struct vmw_private *dev_priv);
451int vmw_kms_close(struct vmw_private *dev_priv);
452int vmw_kms_save_vga(struct vmw_private *vmw_priv);
453int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
454int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
455 struct drm_file *file_priv);
456void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
457void vmw_kms_cursor_snoop(struct vmw_surface *srf,
458 struct ttm_object_file *tfile,
459 struct ttm_buffer_object *bo,
460 SVGA3dCmdHeader *header);
461
462/**
463 * Overlay control - vmwgfx_overlay.c
464 */
465
466int vmw_overlay_init(struct vmw_private *dev_priv);
467int vmw_overlay_close(struct vmw_private *dev_priv);
468int vmw_overlay_ioctl(struct drm_device *dev, void *data,
469 struct drm_file *file_priv);
470int vmw_overlay_stop_all(struct vmw_private *dev_priv);
471int vmw_overlay_resume_all(struct vmw_private *dev_priv);
472int vmw_overlay_pause_all(struct vmw_private *dev_priv);
473int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
474int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
475int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
476int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
477
478/**
479 * Inline helper functions
480 */
481
482static inline void vmw_surface_unreference(struct vmw_surface **srf)
483{
484 struct vmw_surface *tmp_srf = *srf;
485 struct vmw_resource *res = &tmp_srf->res;
486 *srf = NULL;
487
488 vmw_resource_unreference(&res);
489}
490
491static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
492{
493 (void) vmw_resource_reference(&srf->res);
494 return srf;
495}
496
497static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
498{
499 struct vmw_dma_buffer *tmp_buf = *buf;
500 struct ttm_buffer_object *bo = &tmp_buf->base;
501 *buf = NULL;
502
503 ttm_bo_unref(&bo);
504}
505
506static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
507{
508 if (ttm_bo_reference(&buf->base))
509 return buf;
510 return NULL;
511}
512
513#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
new file mode 100644
index 000000000000..2e92da567403
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -0,0 +1,621 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48 struct vmw_sw_context *sw_context,
49 SVGA3dCmdHeader *header)
50{
51 struct vmw_cid_cmd {
52 SVGA3dCmdHeader header;
53 __le32 cid;
54 } *cmd;
55 int ret;
56
57 cmd = container_of(header, struct vmw_cid_cmd, header);
58 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59 return 0;
60
61 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
62 if (unlikely(ret != 0)) {
63 DRM_ERROR("Could not find or use context %u\n",
64 (unsigned) cmd->cid);
65 return ret;
66 }
67
68 sw_context->last_cid = cmd->cid;
69 sw_context->cid_valid = true;
70
71 return 0;
72}
73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context,
76 uint32_t *sid)
77{
78 if (*sid == SVGA3D_INVALID_ID)
79 return 0;
80
81 if (unlikely((!sw_context->sid_valid ||
82 *sid != sw_context->last_sid))) {
83 int real_id;
84 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85 *sid, &real_id);
86
87 if (unlikely(ret != 0)) {
88 DRM_ERROR("Could ot find or use surface 0x%08x "
89 "address 0x%08lx\n",
90 (unsigned int) *sid,
91 (unsigned long) sid);
92 return ret;
93 }
94
95 sw_context->last_sid = *sid;
96 sw_context->sid_valid = true;
97 *sid = real_id;
98 sw_context->sid_translation = real_id;
99 } else
100 *sid = sw_context->sid_translation;
101
102 return 0;
103}
104
105
106static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107 struct vmw_sw_context *sw_context,
108 SVGA3dCmdHeader *header)
109{
110 struct vmw_sid_cmd {
111 SVGA3dCmdHeader header;
112 SVGA3dCmdSetRenderTarget body;
113 } *cmd;
114 int ret;
115
116 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
117 if (unlikely(ret != 0))
118 return ret;
119
120 cmd = container_of(header, struct vmw_sid_cmd, header);
121 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122 return ret;
123}
124
125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126 struct vmw_sw_context *sw_context,
127 SVGA3dCmdHeader *header)
128{
129 struct vmw_sid_cmd {
130 SVGA3dCmdHeader header;
131 SVGA3dCmdSurfaceCopy body;
132 } *cmd;
133 int ret;
134
135 cmd = container_of(header, struct vmw_sid_cmd, header);
136 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
137 if (unlikely(ret != 0))
138 return ret;
139 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
140}
141
142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 SVGA3dCmdHeader *header)
145{
146 struct vmw_sid_cmd {
147 SVGA3dCmdHeader header;
148 SVGA3dCmdSurfaceStretchBlt body;
149 } *cmd;
150 int ret;
151
152 cmd = container_of(header, struct vmw_sid_cmd, header);
153 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
154 if (unlikely(ret != 0))
155 return ret;
156 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
157}
158
159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160 struct vmw_sw_context *sw_context,
161 SVGA3dCmdHeader *header)
162{
163 struct vmw_sid_cmd {
164 SVGA3dCmdHeader header;
165 SVGA3dCmdBlitSurfaceToScreen body;
166 } *cmd;
167
168 cmd = container_of(header, struct vmw_sid_cmd, header);
169 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
170}
171
172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173 struct vmw_sw_context *sw_context,
174 SVGA3dCmdHeader *header)
175{
176 struct vmw_sid_cmd {
177 SVGA3dCmdHeader header;
178 SVGA3dCmdPresent body;
179 } *cmd;
180
181 cmd = container_of(header, struct vmw_sid_cmd, header);
182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183}
184
185static int vmw_cmd_dma(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context,
187 SVGA3dCmdHeader *header)
188{
189 uint32_t handle;
190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo;
192 struct vmw_surface *srf = NULL;
193 struct vmw_dma_cmd {
194 SVGA3dCmdHeader header;
195 SVGA3dCmdSurfaceDMA dma;
196 } *cmd;
197 struct vmw_relocation *reloc;
198 int ret;
199 uint32_t cur_validate_node;
200 struct ttm_validate_buffer *val_buf;
201
202 cmd = container_of(header, struct vmw_dma_cmd, header);
203 handle = cmd->dma.guest.ptr.gmrId;
204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
205 if (unlikely(ret != 0)) {
206 DRM_ERROR("Could not find or use GMR region.\n");
207 return -EINVAL;
208 }
209 bo = &vmw_bo->base;
210
211 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
212 DRM_ERROR("Max number of DMA commands per submission"
213 " exceeded\n");
214 ret = -EINVAL;
215 goto out_no_reloc;
216 }
217
218 reloc = &sw_context->relocs[sw_context->cur_reloc++];
219 reloc->location = &cmd->dma.guest.ptr;
220
221 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
222 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
223 DRM_ERROR("Max number of DMA buffers per submission"
224 " exceeded.\n");
225 ret = -EINVAL;
226 goto out_no_reloc;
227 }
228
229 reloc->index = cur_validate_node;
230 if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
231 val_buf = &sw_context->val_bufs[cur_validate_node];
232 val_buf->bo = ttm_bo_reference(bo);
233 val_buf->new_sync_obj_arg = (void *) dev_priv;
234 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
235 ++sw_context->cur_val_buf;
236 }
237
238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
239 cmd->dma.host.sid, &srf);
240 if (ret) {
241 DRM_ERROR("could not find surface\n");
242 goto out_no_reloc;
243 }
244
245 /**
246 * Patch command stream with device SID.
247 */
248
249 cmd->dma.host.sid = srf->res.id;
250 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
251 /**
252 * FIXME: May deadlock here when called from the
253 * command parsing code.
254 */
255 vmw_surface_unreference(&srf);
256
257out_no_reloc:
258 vmw_dmabuf_unreference(&vmw_bo);
259 return ret;
260}
261
262static int vmw_cmd_draw(struct vmw_private *dev_priv,
263 struct vmw_sw_context *sw_context,
264 SVGA3dCmdHeader *header)
265{
266 struct vmw_draw_cmd {
267 SVGA3dCmdHeader header;
268 SVGA3dCmdDrawPrimitives body;
269 } *cmd;
270 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
271 (unsigned long)header + sizeof(*cmd));
272 SVGA3dPrimitiveRange *range;
273 uint32_t i;
274 uint32_t maxnum;
275 int ret;
276
277 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
278 if (unlikely(ret != 0))
279 return ret;
280
281 cmd = container_of(header, struct vmw_draw_cmd, header);
282 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
283
284 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
285 DRM_ERROR("Illegal number of vertex declarations.\n");
286 return -EINVAL;
287 }
288
289 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
290 ret = vmw_cmd_sid_check(dev_priv, sw_context,
291 &decl->array.surfaceId);
292 if (unlikely(ret != 0))
293 return ret;
294 }
295
296 maxnum = (header->size - sizeof(cmd->body) -
297 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
298 if (unlikely(cmd->body.numRanges > maxnum)) {
299 DRM_ERROR("Illegal number of index ranges.\n");
300 return -EINVAL;
301 }
302
303 range = (SVGA3dPrimitiveRange *) decl;
304 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
305 ret = vmw_cmd_sid_check(dev_priv, sw_context,
306 &range->indexArray.surfaceId);
307 if (unlikely(ret != 0))
308 return ret;
309 }
310 return 0;
311}
312
313
314static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
315 struct vmw_sw_context *sw_context,
316 SVGA3dCmdHeader *header)
317{
318 struct vmw_tex_state_cmd {
319 SVGA3dCmdHeader header;
320 SVGA3dCmdSetTextureState state;
321 };
322
323 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
324 ((unsigned long) header + header->size + sizeof(header));
325 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
326 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
327 int ret;
328
329 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
330 if (unlikely(ret != 0))
331 return ret;
332
333 for (; cur_state < last_state; ++cur_state) {
334 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
335 continue;
336
337 ret = vmw_cmd_sid_check(dev_priv, sw_context,
338 &cur_state->value);
339 if (unlikely(ret != 0))
340 return ret;
341 }
342
343 return 0;
344}
345
346
347typedef int (*vmw_cmd_func) (struct vmw_private *,
348 struct vmw_sw_context *,
349 SVGA3dCmdHeader *);
350
351#define VMW_CMD_DEF(cmd, func) \
352 [cmd - SVGA_3D_CMD_BASE] = func
353
354static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
355 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
356 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
357 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
358 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
359 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
360 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
361 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
362 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
363 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
364 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
365 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
366 &vmw_cmd_set_render_target_check),
367 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
368 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
369 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
370 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
371 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
372 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
373 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
374 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
375 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
376 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
377 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
378 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
383 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
384 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
385 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
386 &vmw_cmd_blt_surf_screen_check)
387};
388
389static int vmw_cmd_check(struct vmw_private *dev_priv,
390 struct vmw_sw_context *sw_context,
391 void *buf, uint32_t *size)
392{
393 uint32_t cmd_id;
394 uint32_t size_remaining = *size;
395 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
396 int ret;
397
398 cmd_id = ((uint32_t *)buf)[0];
399 if (cmd_id == SVGA_CMD_UPDATE) {
400 *size = 5 << 2;
401 return 0;
402 }
403
404 cmd_id = le32_to_cpu(header->id);
405 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
406
407 cmd_id -= SVGA_3D_CMD_BASE;
408 if (unlikely(*size > size_remaining))
409 goto out_err;
410
411 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
412 goto out_err;
413
414 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
415 if (unlikely(ret != 0))
416 goto out_err;
417
418 return 0;
419out_err:
420 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
421 cmd_id + SVGA_3D_CMD_BASE);
422 return -EINVAL;
423}
424
425static int vmw_cmd_check_all(struct vmw_private *dev_priv,
426 struct vmw_sw_context *sw_context,
427 void *buf, uint32_t size)
428{
429 int32_t cur_size = size;
430 int ret;
431
432 while (cur_size > 0) {
433 size = cur_size;
434 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
435 if (unlikely(ret != 0))
436 return ret;
437 buf = (void *)((unsigned long) buf + size);
438 cur_size -= size;
439 }
440
441 if (unlikely(cur_size != 0)) {
442 DRM_ERROR("Command verifier out of sync.\n");
443 return -EINVAL;
444 }
445
446 return 0;
447}
448
449static void vmw_free_relocations(struct vmw_sw_context *sw_context)
450{
451 sw_context->cur_reloc = 0;
452}
453
454static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
455{
456 uint32_t i;
457 struct vmw_relocation *reloc;
458 struct ttm_validate_buffer *validate;
459 struct ttm_buffer_object *bo;
460
461 for (i = 0; i < sw_context->cur_reloc; ++i) {
462 reloc = &sw_context->relocs[i];
463 validate = &sw_context->val_bufs[reloc->index];
464 bo = validate->bo;
465 reloc->location->offset += bo->offset;
466 reloc->location->gmrId = vmw_dmabuf_gmr(bo);
467 }
468 vmw_free_relocations(sw_context);
469}
470
471static void vmw_clear_validations(struct vmw_sw_context *sw_context)
472{
473 struct ttm_validate_buffer *entry, *next;
474
475 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
476 head) {
477 list_del(&entry->head);
478 vmw_dmabuf_validate_clear(entry->bo);
479 ttm_bo_unref(&entry->bo);
480 sw_context->cur_val_buf--;
481 }
482 BUG_ON(sw_context->cur_val_buf != 0);
483}
484
485static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
486 struct ttm_buffer_object *bo)
487{
488 int ret;
489
490 if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
491 return 0;
492
493 ret = vmw_gmr_bind(dev_priv, bo);
494 if (likely(ret == 0 || ret == -ERESTARTSYS))
495 return ret;
496
497
498 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
499 return ret;
500}
501
502
503static int vmw_validate_buffers(struct vmw_private *dev_priv,
504 struct vmw_sw_context *sw_context)
505{
506 struct ttm_validate_buffer *entry;
507 int ret;
508
509 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
510 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
511 if (unlikely(ret != 0))
512 return ret;
513 }
514 return 0;
515}
516
517int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
518 struct drm_file *file_priv)
519{
520 struct vmw_private *dev_priv = vmw_priv(dev);
521 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
522 struct drm_vmw_fence_rep fence_rep;
523 struct drm_vmw_fence_rep __user *user_fence_rep;
524 int ret;
525 void *user_cmd;
526 void *cmd;
527 uint32_t sequence;
528 struct vmw_sw_context *sw_context = &dev_priv->ctx;
529 struct vmw_master *vmaster = vmw_master(file_priv->master);
530
531 ret = ttm_read_lock(&vmaster->lock, true);
532 if (unlikely(ret != 0))
533 return ret;
534
535 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
536 if (unlikely(ret != 0)) {
537 ret = -ERESTARTSYS;
538 goto out_no_cmd_mutex;
539 }
540
541 cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
542 if (unlikely(cmd == NULL)) {
543 DRM_ERROR("Failed reserving fifo space for commands.\n");
544 ret = -ENOMEM;
545 goto out_unlock;
546 }
547
548 user_cmd = (void __user *)(unsigned long)arg->commands;
549 ret = copy_from_user(cmd, user_cmd, arg->command_size);
550
551 if (unlikely(ret != 0)) {
552 DRM_ERROR("Failed copying commands.\n");
553 goto out_commit;
554 }
555
556 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
557 sw_context->cid_valid = false;
558 sw_context->sid_valid = false;
559 sw_context->cur_reloc = 0;
560 sw_context->cur_val_buf = 0;
561
562 INIT_LIST_HEAD(&sw_context->validate_nodes);
563
564 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
565 if (unlikely(ret != 0))
566 goto out_err;
567 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
568 dev_priv->val_seq++);
569 if (unlikely(ret != 0))
570 goto out_err;
571
572 ret = vmw_validate_buffers(dev_priv, sw_context);
573 if (unlikely(ret != 0))
574 goto out_err;
575
576 vmw_apply_relocations(sw_context);
577 vmw_fifo_commit(dev_priv, arg->command_size);
578
579 ret = vmw_fifo_send_fence(dev_priv, &sequence);
580
581 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
582 (void *)(unsigned long) sequence);
583 vmw_clear_validations(sw_context);
584 mutex_unlock(&dev_priv->cmdbuf_mutex);
585
586 /*
587 * This error is harmless, because if fence submission fails,
588 * vmw_fifo_send_fence will sync.
589 */
590
591 if (ret != 0)
592 DRM_ERROR("Fence submission error. Syncing.\n");
593
594 fence_rep.error = ret;
595 fence_rep.fence_seq = (uint64_t) sequence;
596
597 user_fence_rep = (struct drm_vmw_fence_rep __user *)
598 (unsigned long)arg->fence_rep;
599
600 /*
601 * copy_to_user errors will be detected by user space not
602 * seeing fence_rep::error filled in.
603 */
604
605 ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
606
607 vmw_kms_cursor_post_execbuf(dev_priv);
608 ttm_read_unlock(&vmaster->lock);
609 return 0;
610out_err:
611 vmw_free_relocations(sw_context);
612 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
613 vmw_clear_validations(sw_context);
614out_commit:
615 vmw_fifo_commit(dev_priv, 0);
616out_unlock:
617 mutex_unlock(&dev_priv->cmdbuf_mutex);
618out_no_cmd_mutex:
619 ttm_read_unlock(&vmaster->lock);
620 return ret;
621}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
new file mode 100644
index 000000000000..641dde76ada1
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -0,0 +1,742 @@
1/**************************************************************************
2 *
3 * Copyright © 2007 David Airlie
4 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include "drmP.h"
30#include "vmwgfx_drv.h"
31
32#include "ttm/ttm_placement.h"
33
34#define VMW_DIRTY_DELAY (HZ / 30)
35
36struct vmw_fb_par {
37 struct vmw_private *vmw_priv;
38
39 void *vmalloc;
40
41 struct vmw_dma_buffer *vmw_bo;
42 struct ttm_bo_kmap_obj map;
43
44 u32 pseudo_palette[17];
45
46 unsigned depth;
47 unsigned bpp;
48
49 unsigned max_width;
50 unsigned max_height;
51
52 void *bo_ptr;
53 unsigned bo_size;
54 bool bo_iowrite;
55
56 struct {
57 spinlock_t lock;
58 bool active;
59 unsigned x1;
60 unsigned y1;
61 unsigned x2;
62 unsigned y2;
63 } dirty;
64};
65
66static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
67 unsigned blue, unsigned transp,
68 struct fb_info *info)
69{
70 struct vmw_fb_par *par = info->par;
71 u32 *pal = par->pseudo_palette;
72
73 if (regno > 15) {
74 DRM_ERROR("Bad regno %u.\n", regno);
75 return 1;
76 }
77
78 switch (par->depth) {
79 case 24:
80 case 32:
81 pal[regno] = ((red & 0xff00) << 8) |
82 (green & 0xff00) |
83 ((blue & 0xff00) >> 8);
84 break;
85 default:
86 DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
87 return 1;
88 }
89
90 return 0;
91}
92
93static int vmw_fb_check_var(struct fb_var_screeninfo *var,
94 struct fb_info *info)
95{
96 int depth = var->bits_per_pixel;
97 struct vmw_fb_par *par = info->par;
98 struct vmw_private *vmw_priv = par->vmw_priv;
99
100 switch (var->bits_per_pixel) {
101 case 32:
102 depth = (var->transp.length > 0) ? 32 : 24;
103 break;
104 default:
105 DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
106 return -EINVAL;
107 }
108
109 switch (depth) {
110 case 24:
111 var->red.offset = 16;
112 var->green.offset = 8;
113 var->blue.offset = 0;
114 var->red.length = 8;
115 var->green.length = 8;
116 var->blue.length = 8;
117 var->transp.length = 0;
118 var->transp.offset = 0;
119 break;
120 case 32:
121 var->red.offset = 16;
122 var->green.offset = 8;
123 var->blue.offset = 0;
124 var->red.length = 8;
125 var->green.length = 8;
126 var->blue.length = 8;
127 var->transp.length = 8;
128 var->transp.offset = 24;
129 break;
130 default:
131 DRM_ERROR("Bad depth %u.\n", depth);
132 return -EINVAL;
133 }
134
135 /* without multimon its hard to resize */
136 if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
137 (var->xres != par->max_width ||
138 var->yres != par->max_height)) {
139 DRM_ERROR("Tried to resize, but we don't have multimon\n");
140 return -EINVAL;
141 }
142
143 if (var->xres > par->max_width ||
144 var->yres > par->max_height) {
145 DRM_ERROR("Requested geom can not fit in framebuffer\n");
146 return -EINVAL;
147 }
148
149 return 0;
150}
151
152static int vmw_fb_set_par(struct fb_info *info)
153{
154 struct vmw_fb_par *par = info->par;
155 struct vmw_private *vmw_priv = par->vmw_priv;
156
157 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
158 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
159 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
160 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
161 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
162 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
163 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
164 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
165 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
166
167 vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
168 vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
169 vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
170 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
171 vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
172 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
173 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
174 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
175
176 /* TODO check if pitch and offset changes */
177
178 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
182 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
183 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
184 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
185 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
186 } else {
187 vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
188 vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
189
190 /* TODO check if pitch and offset changes */
191 }
192
193 return 0;
194}
195
196static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
197 struct fb_info *info)
198{
199 return 0;
200}
201
202static int vmw_fb_blank(int blank, struct fb_info *info)
203{
204 return 0;
205}
206
207/*
208 * Dirty code
209 */
210
211static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
212{
213 struct vmw_private *vmw_priv = par->vmw_priv;
214 struct fb_info *info = vmw_priv->fb_info;
215 int stride = (info->fix.line_length / 4);
216 int *src = (int *)info->screen_base;
217 __le32 __iomem *vram_mem = par->bo_ptr;
218 unsigned long flags;
219 unsigned x, y, w, h;
220 int i, k;
221 struct {
222 uint32_t header;
223 SVGAFifoCmdUpdate body;
224 } *cmd;
225
226 spin_lock_irqsave(&par->dirty.lock, flags);
227 if (!par->dirty.active) {
228 spin_unlock_irqrestore(&par->dirty.lock, flags);
229 return;
230 }
231 x = par->dirty.x1;
232 y = par->dirty.y1;
233 w = min(par->dirty.x2, info->var.xres) - x;
234 h = min(par->dirty.y2, info->var.yres) - y;
235 par->dirty.x1 = par->dirty.x2 = 0;
236 par->dirty.y1 = par->dirty.y2 = 0;
237 spin_unlock_irqrestore(&par->dirty.lock, flags);
238
239 for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
240 for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
241 iowrite32(src[k], vram_mem + k);
242 }
243
244#if 0
245 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
246#endif
247
248 cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
249 if (unlikely(cmd == NULL)) {
250 DRM_ERROR("Fifo reserve failed.\n");
251 return;
252 }
253
254 cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
255 cmd->body.x = cpu_to_le32(x);
256 cmd->body.y = cpu_to_le32(y);
257 cmd->body.width = cpu_to_le32(w);
258 cmd->body.height = cpu_to_le32(h);
259 vmw_fifo_commit(vmw_priv, sizeof(*cmd));
260}
261
262static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
263 unsigned x1, unsigned y1,
264 unsigned width, unsigned height)
265{
266 struct fb_info *info = par->vmw_priv->fb_info;
267 unsigned long flags;
268 unsigned x2 = x1 + width;
269 unsigned y2 = y1 + height;
270
271 spin_lock_irqsave(&par->dirty.lock, flags);
272 if (par->dirty.x1 == par->dirty.x2) {
273 par->dirty.x1 = x1;
274 par->dirty.y1 = y1;
275 par->dirty.x2 = x2;
276 par->dirty.y2 = y2;
277 /* if we are active start the dirty work
278 * we share the work with the defio system */
279 if (par->dirty.active)
280 schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
281 } else {
282 if (x1 < par->dirty.x1)
283 par->dirty.x1 = x1;
284 if (y1 < par->dirty.y1)
285 par->dirty.y1 = y1;
286 if (x2 > par->dirty.x2)
287 par->dirty.x2 = x2;
288 if (y2 > par->dirty.y2)
289 par->dirty.y2 = y2;
290 }
291 spin_unlock_irqrestore(&par->dirty.lock, flags);
292}
293
294static void vmw_deferred_io(struct fb_info *info,
295 struct list_head *pagelist)
296{
297 struct vmw_fb_par *par = info->par;
298 unsigned long start, end, min, max;
299 unsigned long flags;
300 struct page *page;
301 int y1, y2;
302
303 min = ULONG_MAX;
304 max = 0;
305 list_for_each_entry(page, pagelist, lru) {
306 start = page->index << PAGE_SHIFT;
307 end = start + PAGE_SIZE - 1;
308 min = min(min, start);
309 max = max(max, end);
310 }
311
312 if (min < max) {
313 y1 = min / info->fix.line_length;
314 y2 = (max / info->fix.line_length) + 1;
315
316 spin_lock_irqsave(&par->dirty.lock, flags);
317 par->dirty.x1 = 0;
318 par->dirty.y1 = y1;
319 par->dirty.x2 = info->var.xres;
320 par->dirty.y2 = y2;
321 spin_unlock_irqrestore(&par->dirty.lock, flags);
322 }
323
324 vmw_fb_dirty_flush(par);
325};
326
327struct fb_deferred_io vmw_defio = {
328 .delay = VMW_DIRTY_DELAY,
329 .deferred_io = vmw_deferred_io,
330};
331
332/*
333 * Draw code
334 */
335
336static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
337{
338 cfb_fillrect(info, rect);
339 vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
340 rect->width, rect->height);
341}
342
343static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
344{
345 cfb_copyarea(info, region);
346 vmw_fb_dirty_mark(info->par, region->dx, region->dy,
347 region->width, region->height);
348}
349
350static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
351{
352 cfb_imageblit(info, image);
353 vmw_fb_dirty_mark(info->par, image->dx, image->dy,
354 image->width, image->height);
355}
356
357/*
358 * Bring up code
359 */
360
361static struct fb_ops vmw_fb_ops = {
362 .owner = THIS_MODULE,
363 .fb_check_var = vmw_fb_check_var,
364 .fb_set_par = vmw_fb_set_par,
365 .fb_setcolreg = vmw_fb_setcolreg,
366 .fb_fillrect = vmw_fb_fillrect,
367 .fb_copyarea = vmw_fb_copyarea,
368 .fb_imageblit = vmw_fb_imageblit,
369 .fb_pan_display = vmw_fb_pan_display,
370 .fb_blank = vmw_fb_blank,
371};
372
373static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
374 size_t size, struct vmw_dma_buffer **out)
375{
376 struct vmw_dma_buffer *vmw_bo;
377 struct ttm_placement ne_placement = vmw_vram_ne_placement;
378 int ret;
379
380 ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
381
382 /* interuptable? */
383 ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
384 if (unlikely(ret != 0))
385 return ret;
386
387 vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
388 if (!vmw_bo)
389 goto err_unlock;
390
391 ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
392 &ne_placement,
393 false,
394 &vmw_dmabuf_bo_free);
395 if (unlikely(ret != 0))
396 goto err_unlock; /* init frees the buffer on failure */
397
398 *out = vmw_bo;
399
400 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
401
402 return 0;
403
404err_unlock:
405 ttm_write_unlock(&vmw_priv->fbdev_master.lock);
406 return ret;
407}
408
409int vmw_fb_init(struct vmw_private *vmw_priv)
410{
411 struct device *device = &vmw_priv->dev->pdev->dev;
412 struct vmw_fb_par *par;
413 struct fb_info *info;
414 unsigned initial_width, initial_height;
415 unsigned fb_width, fb_height;
416 unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
417 int ret;
418
419 initial_width = 800;
420 initial_height = 600;
421
422 fb_bbp = 32;
423 fb_depth = 24;
424
425 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
426 fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
427 fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
428 } else {
429 fb_width = min(vmw_priv->fb_max_width, initial_width);
430 fb_height = min(vmw_priv->fb_max_height, initial_height);
431 }
432
433 initial_width = min(fb_width, initial_width);
434 initial_height = min(fb_height, initial_height);
435
436 vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
437 vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
438 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
439 vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
440 vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
441 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
442 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
443
444 fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
445 fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
446 fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
447
448 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
449 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
450 DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
451 DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
452 DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
453 DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
454 DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
455 DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
456 DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
457 DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
458 DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
459 DRM_DEBUG("fb_pitch %u\n", fb_pitch);
460 DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
461
462 info = framebuffer_alloc(sizeof(*par), device);
463 if (!info)
464 return -ENOMEM;
465
466 /*
467 * Par
468 */
469 vmw_priv->fb_info = info;
470 par = info->par;
471 par->vmw_priv = vmw_priv;
472 par->depth = fb_depth;
473 par->bpp = fb_bbp;
474 par->vmalloc = NULL;
475 par->max_width = fb_width;
476 par->max_height = fb_height;
477
478 /*
479 * Create buffers and alloc memory
480 */
481 par->vmalloc = vmalloc(fb_size);
482 if (unlikely(par->vmalloc == NULL)) {
483 ret = -ENOMEM;
484 goto err_free;
485 }
486
487 ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
488 if (unlikely(ret != 0))
489 goto err_free;
490
491 ret = ttm_bo_kmap(&par->vmw_bo->base,
492 0,
493 par->vmw_bo->base.num_pages,
494 &par->map);
495 if (unlikely(ret != 0))
496 goto err_unref;
497 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
498 par->bo_size = fb_size;
499
500 /*
501 * Fixed and var
502 */
503 strcpy(info->fix.id, "svgadrmfb");
504 info->fix.type = FB_TYPE_PACKED_PIXELS;
505 info->fix.visual = FB_VISUAL_TRUECOLOR;
506 info->fix.type_aux = 0;
507 info->fix.xpanstep = 1; /* doing it in hw */
508 info->fix.ypanstep = 1; /* doing it in hw */
509 info->fix.ywrapstep = 0;
510 info->fix.accel = FB_ACCEL_NONE;
511 info->fix.line_length = fb_pitch;
512
513 info->fix.smem_start = 0;
514 info->fix.smem_len = fb_size;
515
516 info->fix.mmio_start = 0;
517 info->fix.mmio_len = 0;
518
519 info->pseudo_palette = par->pseudo_palette;
520 info->screen_base = par->vmalloc;
521 info->screen_size = fb_size;
522
523 info->flags = FBINFO_DEFAULT;
524 info->fbops = &vmw_fb_ops;
525
526 /* 24 depth per default */
527 info->var.red.offset = 16;
528 info->var.green.offset = 8;
529 info->var.blue.offset = 0;
530 info->var.red.length = 8;
531 info->var.green.length = 8;
532 info->var.blue.length = 8;
533 info->var.transp.offset = 0;
534 info->var.transp.length = 0;
535
536 info->var.xres_virtual = fb_width;
537 info->var.yres_virtual = fb_height;
538 info->var.bits_per_pixel = par->bpp;
539 info->var.xoffset = 0;
540 info->var.yoffset = 0;
541 info->var.activate = FB_ACTIVATE_NOW;
542 info->var.height = -1;
543 info->var.width = -1;
544
545 info->var.xres = initial_width;
546 info->var.yres = initial_height;
547
548#if 0
549 info->pixmap.size = 64*1024;
550 info->pixmap.buf_align = 8;
551 info->pixmap.access_align = 32;
552 info->pixmap.flags = FB_PIXMAP_SYSTEM;
553 info->pixmap.scan_align = 1;
554#else
555 info->pixmap.size = 0;
556 info->pixmap.buf_align = 8;
557 info->pixmap.access_align = 32;
558 info->pixmap.flags = FB_PIXMAP_SYSTEM;
559 info->pixmap.scan_align = 1;
560#endif
561
562 /*
563 * Dirty & Deferred IO
564 */
565 par->dirty.x1 = par->dirty.x2 = 0;
566 par->dirty.y1 = par->dirty.y1 = 0;
567 par->dirty.active = true;
568 spin_lock_init(&par->dirty.lock);
569 info->fbdefio = &vmw_defio;
570 fb_deferred_io_init(info);
571
572 ret = register_framebuffer(info);
573 if (unlikely(ret != 0))
574 goto err_defio;
575
576 return 0;
577
578err_defio:
579 fb_deferred_io_cleanup(info);
580 ttm_bo_kunmap(&par->map);
581err_unref:
582 ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
583err_free:
584 vfree(par->vmalloc);
585 framebuffer_release(info);
586 vmw_priv->fb_info = NULL;
587
588 return ret;
589}
590
591int vmw_fb_close(struct vmw_private *vmw_priv)
592{
593 struct fb_info *info;
594 struct vmw_fb_par *par;
595 struct ttm_buffer_object *bo;
596
597 if (!vmw_priv->fb_info)
598 return 0;
599
600 info = vmw_priv->fb_info;
601 par = info->par;
602 bo = &par->vmw_bo->base;
603 par->vmw_bo = NULL;
604
605 /* ??? order */
606 fb_deferred_io_cleanup(info);
607 unregister_framebuffer(info);
608
609 ttm_bo_kunmap(&par->map);
610 ttm_bo_unref(&bo);
611
612 vfree(par->vmalloc);
613 framebuffer_release(info);
614
615 return 0;
616}
617
618int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
619 struct vmw_dma_buffer *vmw_bo)
620{
621 struct ttm_buffer_object *bo = &vmw_bo->base;
622 int ret = 0;
623
624 ret = ttm_bo_reserve(bo, false, false, false, 0);
625 if (unlikely(ret != 0))
626 return ret;
627
628 ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
629 ttm_bo_unreserve(bo);
630
631 return ret;
632}
633
634int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
635 struct vmw_dma_buffer *vmw_bo)
636{
637 struct ttm_buffer_object *bo = &vmw_bo->base;
638 struct ttm_placement ne_placement = vmw_vram_ne_placement;
639 int ret = 0;
640
641 ne_placement.lpfn = bo->num_pages;
642
643 /* interuptable? */
644 ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
645 if (unlikely(ret != 0))
646 return ret;
647
648 ret = ttm_bo_reserve(bo, false, false, false, 0);
649 if (unlikely(ret != 0))
650 goto err_unlock;
651
652 if (vmw_bo->gmr_bound) {
653 vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
654 spin_lock(&bo->glob->lru_lock);
655 ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
656 spin_unlock(&bo->glob->lru_lock);
657 vmw_bo->gmr_bound = NULL;
658 }
659
660 ret = ttm_bo_validate(bo, &ne_placement, false, false);
661 ttm_bo_unreserve(bo);
662err_unlock:
663 ttm_write_unlock(&vmw_priv->active_master->lock);
664
665 return ret;
666}
667
668int vmw_fb_off(struct vmw_private *vmw_priv)
669{
670 struct fb_info *info;
671 struct vmw_fb_par *par;
672 unsigned long flags;
673
674 if (!vmw_priv->fb_info)
675 return -EINVAL;
676
677 info = vmw_priv->fb_info;
678 par = info->par;
679
680 spin_lock_irqsave(&par->dirty.lock, flags);
681 par->dirty.active = false;
682 spin_unlock_irqrestore(&par->dirty.lock, flags);
683
684 flush_scheduled_work();
685
686 par->bo_ptr = NULL;
687 ttm_bo_kunmap(&par->map);
688
689 vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
690
691 return 0;
692}
693
694int vmw_fb_on(struct vmw_private *vmw_priv)
695{
696 struct fb_info *info;
697 struct vmw_fb_par *par;
698 unsigned long flags;
699 bool dummy;
700 int ret;
701
702 if (!vmw_priv->fb_info)
703 return -EINVAL;
704
705 info = vmw_priv->fb_info;
706 par = info->par;
707
708 /* we are already active */
709 if (par->bo_ptr != NULL)
710 return 0;
711
712 /* Make sure that all overlays are stoped when we take over */
713 vmw_overlay_stop_all(vmw_priv);
714
715 ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
716 if (unlikely(ret != 0)) {
717 DRM_ERROR("could not move buffer to start of VRAM\n");
718 goto err_no_buffer;
719 }
720
721 ret = ttm_bo_kmap(&par->vmw_bo->base,
722 0,
723 par->vmw_bo->base.num_pages,
724 &par->map);
725 BUG_ON(ret != 0);
726 par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
727
728 spin_lock_irqsave(&par->dirty.lock, flags);
729 par->dirty.active = true;
730 spin_unlock_irqrestore(&par->dirty.lock, flags);
731
732err_no_buffer:
733 vmw_fb_set_par(info);
734
735 vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
736
737 /* If there already was stuff dirty we wont
738 * schedule a new work, so lets do it now */
739 schedule_delayed_work(&info->deferred_work, 0);
740
741 return 0;
742}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
new file mode 100644
index 000000000000..01feb48af333
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -0,0 +1,519 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "drmP.h"
30#include "ttm/ttm_placement.h"
31
32int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t max;
36 uint32_t min;
37 uint32_t dummy;
38 int ret;
39
40 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
41 fifo->static_buffer = vmalloc(fifo->static_buffer_size);
42 if (unlikely(fifo->static_buffer == NULL))
43 return -ENOMEM;
44
45 fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
46 fifo->last_data_size = 0;
47 fifo->last_buffer_add = false;
48 fifo->last_buffer = vmalloc(fifo->last_buffer_size);
49 if (unlikely(fifo->last_buffer == NULL)) {
50 ret = -ENOMEM;
51 goto out_err;
52 }
53
54 fifo->dynamic_buffer = NULL;
55 fifo->reserved_size = 0;
56 fifo->using_bounce_buffer = false;
57
58 init_rwsem(&fifo->rwsem);
59
60 /*
61 * Allow mapping the first page read-only to user-space.
62 */
63
64 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
65 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
66 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
67
68 mutex_lock(&dev_priv->hw_mutex);
69 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
70 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
71 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
72
73 min = 4;
74 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
75 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
76 min <<= 2;
77
78 if (min < PAGE_SIZE)
79 min = PAGE_SIZE;
80
81 iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
82 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
83 wmb();
84 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
85 iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
86 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
87 mb();
88
89 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
90 mutex_unlock(&dev_priv->hw_mutex);
91
92 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
93 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
94 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
95
96 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
97 (unsigned int) max,
98 (unsigned int) min,
99 (unsigned int) fifo->capabilities);
100
101 dev_priv->fence_seq = (uint32_t) -100;
102 dev_priv->last_read_sequence = (uint32_t) -100;
103 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
104
105 return vmw_fifo_send_fence(dev_priv, &dummy);
106out_err:
107 vfree(fifo->static_buffer);
108 fifo->static_buffer = NULL;
109 return ret;
110}
111
112void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
113{
114 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
115
116 mutex_lock(&dev_priv->hw_mutex);
117
118 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
119 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
120 vmw_write(dev_priv, SVGA_REG_SYNC, reason);
121 }
122
123 mutex_unlock(&dev_priv->hw_mutex);
124}
125
126void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
127{
128 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
129
130 mutex_lock(&dev_priv->hw_mutex);
131
132 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
133 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
134
135 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
136
137 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
138 dev_priv->config_done_state);
139 vmw_write(dev_priv, SVGA_REG_ENABLE,
140 dev_priv->enable_state);
141
142 mutex_unlock(&dev_priv->hw_mutex);
143
144 if (likely(fifo->last_buffer != NULL)) {
145 vfree(fifo->last_buffer);
146 fifo->last_buffer = NULL;
147 }
148
149 if (likely(fifo->static_buffer != NULL)) {
150 vfree(fifo->static_buffer);
151 fifo->static_buffer = NULL;
152 }
153
154 if (likely(fifo->dynamic_buffer != NULL)) {
155 vfree(fifo->dynamic_buffer);
156 fifo->dynamic_buffer = NULL;
157 }
158}
159
160static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
161{
162 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
163 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
164 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
165 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
166 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
167
168 return ((max - next_cmd) + (stop - min) <= bytes);
169}
170
171static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
172 uint32_t bytes, bool interruptible,
173 unsigned long timeout)
174{
175 int ret = 0;
176 unsigned long end_jiffies = jiffies + timeout;
177 DEFINE_WAIT(__wait);
178
179 DRM_INFO("Fifo wait noirq.\n");
180
181 for (;;) {
182 prepare_to_wait(&dev_priv->fifo_queue, &__wait,
183 (interruptible) ?
184 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
185 if (!vmw_fifo_is_full(dev_priv, bytes))
186 break;
187 if (time_after_eq(jiffies, end_jiffies)) {
188 ret = -EBUSY;
189 DRM_ERROR("SVGA device lockup.\n");
190 break;
191 }
192 schedule_timeout(1);
193 if (interruptible && signal_pending(current)) {
194 ret = -ERESTARTSYS;
195 break;
196 }
197 }
198 finish_wait(&dev_priv->fifo_queue, &__wait);
199 wake_up_all(&dev_priv->fifo_queue);
200 DRM_INFO("Fifo noirq exit.\n");
201 return ret;
202}
203
204static int vmw_fifo_wait(struct vmw_private *dev_priv,
205 uint32_t bytes, bool interruptible,
206 unsigned long timeout)
207{
208 long ret = 1L;
209 unsigned long irq_flags;
210
211 if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
212 return 0;
213
214 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
215 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
216 return vmw_fifo_wait_noirq(dev_priv, bytes,
217 interruptible, timeout);
218
219 mutex_lock(&dev_priv->hw_mutex);
220 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
221 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
222 outl(SVGA_IRQFLAG_FIFO_PROGRESS,
223 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
224 vmw_write(dev_priv, SVGA_REG_IRQMASK,
225 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
226 SVGA_IRQFLAG_FIFO_PROGRESS);
227 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
228 }
229 mutex_unlock(&dev_priv->hw_mutex);
230
231 if (interruptible)
232 ret = wait_event_interruptible_timeout
233 (dev_priv->fifo_queue,
234 !vmw_fifo_is_full(dev_priv, bytes), timeout);
235 else
236 ret = wait_event_timeout
237 (dev_priv->fifo_queue,
238 !vmw_fifo_is_full(dev_priv, bytes), timeout);
239
240 if (unlikely(ret == 0))
241 ret = -EBUSY;
242 else if (likely(ret > 0))
243 ret = 0;
244
245 mutex_lock(&dev_priv->hw_mutex);
246 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
247 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
248 vmw_write(dev_priv, SVGA_REG_IRQMASK,
249 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
250 ~SVGA_IRQFLAG_FIFO_PROGRESS);
251 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
252 }
253 mutex_unlock(&dev_priv->hw_mutex);
254
255 return ret;
256}
257
258void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
259{
260 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
261 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
262 uint32_t max;
263 uint32_t min;
264 uint32_t next_cmd;
265 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
266 int ret;
267
268 down_write(&fifo_state->rwsem);
269 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
270 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
271 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
272
273 if (unlikely(bytes >= (max - min)))
274 goto out_err;
275
276 BUG_ON(fifo_state->reserved_size != 0);
277 BUG_ON(fifo_state->dynamic_buffer != NULL);
278
279 fifo_state->reserved_size = bytes;
280
281 while (1) {
282 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
283 bool need_bounce = false;
284 bool reserve_in_place = false;
285
286 if (next_cmd >= stop) {
287 if (likely((next_cmd + bytes < max ||
288 (next_cmd + bytes == max && stop > min))))
289 reserve_in_place = true;
290
291 else if (vmw_fifo_is_full(dev_priv, bytes)) {
292 ret = vmw_fifo_wait(dev_priv, bytes,
293 false, 3 * HZ);
294 if (unlikely(ret != 0))
295 goto out_err;
296 } else
297 need_bounce = true;
298
299 } else {
300
301 if (likely((next_cmd + bytes < stop)))
302 reserve_in_place = true;
303 else {
304 ret = vmw_fifo_wait(dev_priv, bytes,
305 false, 3 * HZ);
306 if (unlikely(ret != 0))
307 goto out_err;
308 }
309 }
310
311 if (reserve_in_place) {
312 if (reserveable || bytes <= sizeof(uint32_t)) {
313 fifo_state->using_bounce_buffer = false;
314
315 if (reserveable)
316 iowrite32(bytes, fifo_mem +
317 SVGA_FIFO_RESERVED);
318 return fifo_mem + (next_cmd >> 2);
319 } else {
320 need_bounce = true;
321 }
322 }
323
324 if (need_bounce) {
325 fifo_state->using_bounce_buffer = true;
326 if (bytes < fifo_state->static_buffer_size)
327 return fifo_state->static_buffer;
328 else {
329 fifo_state->dynamic_buffer = vmalloc(bytes);
330 return fifo_state->dynamic_buffer;
331 }
332 }
333 }
334out_err:
335 fifo_state->reserved_size = 0;
336 up_write(&fifo_state->rwsem);
337 return NULL;
338}
339
340static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
341 __le32 __iomem *fifo_mem,
342 uint32_t next_cmd,
343 uint32_t max, uint32_t min, uint32_t bytes)
344{
345 uint32_t chunk_size = max - next_cmd;
346 uint32_t rest;
347 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
348 fifo_state->dynamic_buffer : fifo_state->static_buffer;
349
350 if (bytes < chunk_size)
351 chunk_size = bytes;
352
353 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
354 mb();
355 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
356 rest = bytes - chunk_size;
357 if (rest)
358 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
359 rest);
360}
361
362static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
363 __le32 __iomem *fifo_mem,
364 uint32_t next_cmd,
365 uint32_t max, uint32_t min, uint32_t bytes)
366{
367 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
368 fifo_state->dynamic_buffer : fifo_state->static_buffer;
369
370 while (bytes > 0) {
371 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
372 next_cmd += sizeof(uint32_t);
373 if (unlikely(next_cmd == max))
374 next_cmd = min;
375 mb();
376 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
377 mb();
378 bytes -= sizeof(uint32_t);
379 }
380}
381
382void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
383{
384 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
385 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
386 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
387 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
388 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
389 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
390
391 BUG_ON((bytes & 3) != 0);
392 BUG_ON(bytes > fifo_state->reserved_size);
393
394 fifo_state->reserved_size = 0;
395
396 if (fifo_state->using_bounce_buffer) {
397 if (reserveable)
398 vmw_fifo_res_copy(fifo_state, fifo_mem,
399 next_cmd, max, min, bytes);
400 else
401 vmw_fifo_slow_copy(fifo_state, fifo_mem,
402 next_cmd, max, min, bytes);
403
404 if (fifo_state->dynamic_buffer) {
405 vfree(fifo_state->dynamic_buffer);
406 fifo_state->dynamic_buffer = NULL;
407 }
408
409 }
410
411 if (fifo_state->using_bounce_buffer || reserveable) {
412 next_cmd += bytes;
413 if (next_cmd >= max)
414 next_cmd -= max - min;
415 mb();
416 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
417 }
418
419 if (reserveable)
420 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
421 mb();
422 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
423 up_write(&fifo_state->rwsem);
424}
425
426int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
427{
428 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
429 struct svga_fifo_cmd_fence *cmd_fence;
430 void *fm;
431 int ret = 0;
432 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
433
434 fm = vmw_fifo_reserve(dev_priv, bytes);
435 if (unlikely(fm == NULL)) {
436 down_write(&fifo_state->rwsem);
437 *sequence = dev_priv->fence_seq;
438 up_write(&fifo_state->rwsem);
439 ret = -ENOMEM;
440 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
441 false, 3*HZ);
442 goto out_err;
443 }
444
445 do {
446 *sequence = dev_priv->fence_seq++;
447 } while (*sequence == 0);
448
449 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
450
451 /*
452 * Don't request hardware to send a fence. The
453 * waiting code in vmwgfx_irq.c will emulate this.
454 */
455
456 vmw_fifo_commit(dev_priv, 0);
457 return 0;
458 }
459
460 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
461 cmd_fence = (struct svga_fifo_cmd_fence *)
462 ((unsigned long)fm + sizeof(__le32));
463
464 iowrite32(*sequence, &cmd_fence->fence);
465 fifo_state->last_buffer_add = true;
466 vmw_fifo_commit(dev_priv, bytes);
467 fifo_state->last_buffer_add = false;
468
469out_err:
470 return ret;
471}
472
473/**
474 * Map the first page of the FIFO read-only to user-space.
475 */
476
477static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
478{
479 int ret;
480 unsigned long address = (unsigned long)vmf->virtual_address;
481
482 if (address != vma->vm_start)
483 return VM_FAULT_SIGBUS;
484
485 ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
486 if (likely(ret == -EBUSY || ret == 0))
487 return VM_FAULT_NOPAGE;
488 else if (ret == -ENOMEM)
489 return VM_FAULT_OOM;
490
491 return VM_FAULT_SIGBUS;
492}
493
494static struct vm_operations_struct vmw_fifo_vm_ops = {
495 .fault = vmw_fifo_vm_fault,
496 .open = NULL,
497 .close = NULL
498};
499
500int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
501{
502 struct drm_file *file_priv;
503 struct vmw_private *dev_priv;
504
505 file_priv = (struct drm_file *)filp->private_data;
506 dev_priv = vmw_priv(file_priv->minor->dev);
507
508 if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
509 (vma->vm_end - vma->vm_start) != PAGE_SIZE)
510 return -EINVAL;
511
512 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
513 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
514 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
515 vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
516 vma->vm_page_prot);
517 vma->vm_ops = &vmw_fifo_vm_ops;
518 return 0;
519}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
new file mode 100644
index 000000000000..5f8908a5d7fd
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -0,0 +1,213 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "drmP.h"
30#include "ttm/ttm_bo_driver.h"
31
32/**
33 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
34 * the number of used descriptors.
35 */
36
37static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
38 struct page *pages[],
39 unsigned long num_pages)
40{
41 struct page *page, *next;
42 struct svga_guest_mem_descriptor *page_virtual = NULL;
43 struct svga_guest_mem_descriptor *desc_virtual = NULL;
44 unsigned int desc_per_page;
45 unsigned long prev_pfn;
46 unsigned long pfn;
47 int ret;
48
49 desc_per_page = PAGE_SIZE /
50 sizeof(struct svga_guest_mem_descriptor) - 1;
51
52 while (likely(num_pages != 0)) {
53 page = alloc_page(__GFP_HIGHMEM);
54 if (unlikely(page == NULL)) {
55 ret = -ENOMEM;
56 goto out_err;
57 }
58
59 list_add_tail(&page->lru, desc_pages);
60
61 /*
62 * Point previous page terminating descriptor to this
63 * page before unmapping it.
64 */
65
66 if (likely(page_virtual != NULL)) {
67 desc_virtual->ppn = page_to_pfn(page);
68 kunmap_atomic(page_virtual, KM_USER0);
69 }
70
71 page_virtual = kmap_atomic(page, KM_USER0);
72 desc_virtual = page_virtual - 1;
73 prev_pfn = ~(0UL);
74
75 while (likely(num_pages != 0)) {
76 pfn = page_to_pfn(*pages);
77
78 if (pfn != prev_pfn + 1) {
79
80 if (desc_virtual - page_virtual ==
81 desc_per_page - 1)
82 break;
83
84 (++desc_virtual)->ppn = cpu_to_le32(pfn);
85 desc_virtual->num_pages = cpu_to_le32(1);
86 } else {
87 uint32_t tmp =
88 le32_to_cpu(desc_virtual->num_pages);
89 desc_virtual->num_pages = cpu_to_le32(tmp + 1);
90 }
91 prev_pfn = pfn;
92 --num_pages;
93 ++pages;
94 }
95
96 (++desc_virtual)->ppn = cpu_to_le32(0);
97 desc_virtual->num_pages = cpu_to_le32(0);
98 }
99
100 if (likely(page_virtual != NULL))
101 kunmap_atomic(page_virtual, KM_USER0);
102
103 return 0;
104out_err:
105 list_for_each_entry_safe(page, next, desc_pages, lru) {
106 list_del_init(&page->lru);
107 __free_page(page);
108 }
109 return ret;
110}
111
112static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
113{
114 struct page *page, *next;
115
116 list_for_each_entry_safe(page, next, desc_pages, lru) {
117 list_del_init(&page->lru);
118 __free_page(page);
119 }
120}
121
122static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
123 int gmr_id, struct list_head *desc_pages)
124{
125 struct page *page;
126
127 if (unlikely(list_empty(desc_pages)))
128 return;
129
130 page = list_entry(desc_pages->next, struct page, lru);
131
132 mutex_lock(&dev_priv->hw_mutex);
133
134 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
135 wmb();
136 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
137 mb();
138
139 mutex_unlock(&dev_priv->hw_mutex);
140
141}
142
143/**
144 * FIXME: Adjust to the ttm lowmem / highmem storage to minimize
145 * the number of used descriptors.
146 */
147
148static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
149 unsigned long num_pages)
150{
151 unsigned long prev_pfn = ~(0UL);
152 unsigned long pfn;
153 unsigned long descriptors = 0;
154
155 while (num_pages--) {
156 pfn = page_to_pfn(*pages++);
157 if (prev_pfn + 1 != pfn)
158 ++descriptors;
159 prev_pfn = pfn;
160 }
161
162 return descriptors;
163}
164
165int vmw_gmr_bind(struct vmw_private *dev_priv,
166 struct ttm_buffer_object *bo)
167{
168 struct ttm_tt *ttm = bo->ttm;
169 unsigned long descriptors;
170 int ret;
171 uint32_t id;
172 struct list_head desc_pages;
173
174 if (!(dev_priv->capabilities & SVGA_CAP_GMR))
175 return -EINVAL;
176
177 ret = ttm_tt_populate(ttm);
178 if (unlikely(ret != 0))
179 return ret;
180
181 descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
182 if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
183 return -EINVAL;
184
185 INIT_LIST_HEAD(&desc_pages);
186 ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
187 ttm->num_pages);
188 if (unlikely(ret != 0))
189 return ret;
190
191 ret = vmw_gmr_id_alloc(dev_priv, &id);
192 if (unlikely(ret != 0))
193 goto out_no_id;
194
195 vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
196 vmw_gmr_free_descriptors(&desc_pages);
197 vmw_dmabuf_set_gmr(bo, id);
198 return 0;
199
200out_no_id:
201 vmw_gmr_free_descriptors(&desc_pages);
202 return ret;
203}
204
205void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
206{
207 mutex_lock(&dev_priv->hw_mutex);
208 vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
209 wmb();
210 vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
211 mb();
212 mutex_unlock(&dev_priv->hw_mutex);
213}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
new file mode 100644
index 000000000000..5fa6a4ed238a
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -0,0 +1,81 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30
31int vmw_getparam_ioctl(struct drm_device *dev, void *data,
32 struct drm_file *file_priv)
33{
34 struct vmw_private *dev_priv = vmw_priv(dev);
35 struct drm_vmw_getparam_arg *param =
36 (struct drm_vmw_getparam_arg *)data;
37
38 switch (param->param) {
39 case DRM_VMW_PARAM_NUM_STREAMS:
40 param->value = vmw_overlay_num_overlays(dev_priv);
41 break;
42 case DRM_VMW_PARAM_NUM_FREE_STREAMS:
43 param->value = vmw_overlay_num_free_overlays(dev_priv);
44 break;
45 case DRM_VMW_PARAM_3D:
46 param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0;
47 break;
48 case DRM_VMW_PARAM_FIFO_OFFSET:
49 param->value = dev_priv->mmio_start;
50 break;
51 default:
52 DRM_ERROR("Illegal vmwgfx get param request: %d\n",
53 param->param);
54 return -EINVAL;
55 }
56
57 return 0;
58}
59
60int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
61 struct drm_file *file_priv)
62{
63 struct vmw_private *dev_priv = vmw_priv(dev);
64 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
65 struct drm_vmw_fifo_debug_arg *arg =
66 (struct drm_vmw_fifo_debug_arg *)data;
67 __le32 __user *buffer = (__le32 __user *)
68 (unsigned long)arg->debug_buffer;
69
70 if (unlikely(fifo_state->last_buffer == NULL))
71 return -EINVAL;
72
73 if (arg->debug_buffer_size < fifo_state->last_data_size) {
74 arg->used_size = arg->debug_buffer_size;
75 arg->did_not_fit = 1;
76 } else {
77 arg->used_size = fifo_state->last_data_size;
78 arg->did_not_fit = 0;
79 }
80 return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
81}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
new file mode 100644
index 000000000000..d40086fc8647
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -0,0 +1,293 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 24)
32
33irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
34{
35 struct drm_device *dev = (struct drm_device *)arg;
36 struct vmw_private *dev_priv = vmw_priv(dev);
37 uint32_t status;
38
39 spin_lock(&dev_priv->irq_lock);
40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
41 spin_unlock(&dev_priv->irq_lock);
42
43 if (status & SVGA_IRQFLAG_ANY_FENCE)
44 wake_up_all(&dev_priv->fence_queue);
45 if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
46 wake_up_all(&dev_priv->fifo_queue);
47
48 if (likely(status)) {
49 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
50 return IRQ_HANDLED;
51 }
52
53 return IRQ_NONE;
54}
55
56static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
57{
58 uint32_t busy;
59
60 mutex_lock(&dev_priv->hw_mutex);
61 busy = vmw_read(dev_priv, SVGA_REG_BUSY);
62 mutex_unlock(&dev_priv->hw_mutex);
63
64 return (busy == 0);
65}
66
67
68bool vmw_fence_signaled(struct vmw_private *dev_priv,
69 uint32_t sequence)
70{
71 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
72 struct vmw_fifo_state *fifo_state;
73 bool ret;
74
75 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
76 return true;
77
78 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
79 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
80 return true;
81
82 fifo_state = &dev_priv->fifo;
83 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
84 vmw_fifo_idle(dev_priv, sequence))
85 return true;
86
87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled.
97 */
98
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
100 up_read(&fifo_state->rwsem);
101
102 return ret;
103}
104
105int vmw_fallback_wait(struct vmw_private *dev_priv,
106 bool lazy,
107 bool fifo_idle,
108 uint32_t sequence,
109 bool interruptible,
110 unsigned long timeout)
111{
112 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
113
114 uint32_t count = 0;
115 uint32_t signal_seq;
116 int ret;
117 unsigned long end_jiffies = jiffies + timeout;
118 bool (*wait_condition)(struct vmw_private *, uint32_t);
119 DEFINE_WAIT(__wait);
120
121 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
122 &vmw_fence_signaled;
123
124 /**
125 * Block command submission while waiting for idle.
126 */
127
128 if (fifo_idle)
129 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq;
131 ret = 0;
132
133 for (;;) {
134 prepare_to_wait(&dev_priv->fence_queue, &__wait,
135 (interruptible) ?
136 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
137 if (wait_condition(dev_priv, sequence))
138 break;
139 if (time_after_eq(jiffies, end_jiffies)) {
140 DRM_ERROR("SVGA device lockup.\n");
141 break;
142 }
143 if (lazy)
144 schedule_timeout(1);
145 else if ((++count & 0x0F) == 0) {
146 /**
147 * FIXME: Use schedule_hr_timeout here for
148 * newer kernels and lower CPU utilization.
149 */
150
151 __set_current_state(TASK_RUNNING);
152 schedule();
153 __set_current_state((interruptible) ?
154 TASK_INTERRUPTIBLE :
155 TASK_UNINTERRUPTIBLE);
156 }
157 if (interruptible && signal_pending(current)) {
158 ret = -ERESTARTSYS;
159 break;
160 }
161 }
162 finish_wait(&dev_priv->fence_queue, &__wait);
163 if (ret == 0 && fifo_idle) {
164 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
165 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
166 }
167 wake_up_all(&dev_priv->fence_queue);
168 if (fifo_idle)
169 up_read(&fifo_state->rwsem);
170
171 return ret;
172}
173
174int vmw_wait_fence(struct vmw_private *dev_priv,
175 bool lazy, uint32_t sequence,
176 bool interruptible, unsigned long timeout)
177{
178 long ret;
179 unsigned long irq_flags;
180 struct vmw_fifo_state *fifo = &dev_priv->fifo;
181
182 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
183 return 0;
184
185 if (likely(vmw_fence_signaled(dev_priv, sequence)))
186 return 0;
187
188 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
189
190 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
191 return vmw_fallback_wait(dev_priv, lazy, true, sequence,
192 interruptible, timeout);
193
194 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
195 return vmw_fallback_wait(dev_priv, lazy, false, sequence,
196 interruptible, timeout);
197
198 mutex_lock(&dev_priv->hw_mutex);
199 if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
200 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
201 outl(SVGA_IRQFLAG_ANY_FENCE,
202 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
203 vmw_write(dev_priv, SVGA_REG_IRQMASK,
204 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
205 SVGA_IRQFLAG_ANY_FENCE);
206 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
207 }
208 mutex_unlock(&dev_priv->hw_mutex);
209
210 if (interruptible)
211 ret = wait_event_interruptible_timeout
212 (dev_priv->fence_queue,
213 vmw_fence_signaled(dev_priv, sequence),
214 timeout);
215 else
216 ret = wait_event_timeout
217 (dev_priv->fence_queue,
218 vmw_fence_signaled(dev_priv, sequence),
219 timeout);
220
221 if (unlikely(ret == 0))
222 ret = -EBUSY;
223 else if (likely(ret > 0))
224 ret = 0;
225
226 mutex_lock(&dev_priv->hw_mutex);
227 if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
228 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
229 vmw_write(dev_priv, SVGA_REG_IRQMASK,
230 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
231 ~SVGA_IRQFLAG_ANY_FENCE);
232 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
233 }
234 mutex_unlock(&dev_priv->hw_mutex);
235
236 return ret;
237}
238
239void vmw_irq_preinstall(struct drm_device *dev)
240{
241 struct vmw_private *dev_priv = vmw_priv(dev);
242 uint32_t status;
243
244 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
245 return;
246
247 spin_lock_init(&dev_priv->irq_lock);
248 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
249 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
250}
251
252int vmw_irq_postinstall(struct drm_device *dev)
253{
254 return 0;
255}
256
257void vmw_irq_uninstall(struct drm_device *dev)
258{
259 struct vmw_private *dev_priv = vmw_priv(dev);
260 uint32_t status;
261
262 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
263 return;
264
265 mutex_lock(&dev_priv->hw_mutex);
266 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
267 mutex_unlock(&dev_priv->hw_mutex);
268
269 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
270 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
271}
272
273#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
274
275int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
276 struct drm_file *file_priv)
277{
278 struct drm_vmw_fence_wait_arg *arg =
279 (struct drm_vmw_fence_wait_arg *)data;
280 unsigned long timeout;
281
282 if (!arg->cookie_valid) {
283 arg->cookie_valid = 1;
284 arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
285 }
286
287 timeout = jiffies;
288 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
289 return -EBUSY;
290
291 timeout = (unsigned long)arg->kernel_cookie - timeout;
292 return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
293}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
new file mode 100644
index 000000000000..b1af76e371c3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -0,0 +1,872 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
29
30/* Might need a hrtimer here? */
31#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
32
33
34void vmw_display_unit_cleanup(struct vmw_display_unit *du)
35{
36 if (du->cursor_surface)
37 vmw_surface_unreference(&du->cursor_surface);
38 if (du->cursor_dmabuf)
39 vmw_dmabuf_unreference(&du->cursor_dmabuf);
40 drm_crtc_cleanup(&du->crtc);
41 drm_encoder_cleanup(&du->encoder);
42 drm_connector_cleanup(&du->connector);
43}
44
45/*
46 * Display Unit Cursor functions
47 */
48
49int vmw_cursor_update_image(struct vmw_private *dev_priv,
50 u32 *image, u32 width, u32 height,
51 u32 hotspotX, u32 hotspotY)
52{
53 struct {
54 u32 cmd;
55 SVGAFifoCmdDefineAlphaCursor cursor;
56 } *cmd;
57 u32 image_size = width * height * 4;
58 u32 cmd_size = sizeof(*cmd) + image_size;
59
60 if (!image)
61 return -EINVAL;
62
63 cmd = vmw_fifo_reserve(dev_priv, cmd_size);
64 if (unlikely(cmd == NULL)) {
65 DRM_ERROR("Fifo reserve failed.\n");
66 return -ENOMEM;
67 }
68
69 memset(cmd, 0, sizeof(*cmd));
70
71 memcpy(&cmd[1], image, image_size);
72
73 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
74 cmd->cursor.id = cpu_to_le32(0);
75 cmd->cursor.width = cpu_to_le32(width);
76 cmd->cursor.height = cpu_to_le32(height);
77 cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
78 cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
79
80 vmw_fifo_commit(dev_priv, cmd_size);
81
82 return 0;
83}
84
85void vmw_cursor_update_position(struct vmw_private *dev_priv,
86 bool show, int x, int y)
87{
88 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
89 uint32_t count;
90
91 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
92 iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
93 iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
94 count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
95 iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
96}
97
98int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
99 uint32_t handle, uint32_t width, uint32_t height)
100{
101 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
102 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
103 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
104 struct vmw_surface *surface = NULL;
105 struct vmw_dma_buffer *dmabuf = NULL;
106 int ret;
107
108 if (handle) {
109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110 handle, &surface);
111 if (!ret) {
112 if (!surface->snooper.image) {
113 DRM_ERROR("surface not suitable for cursor\n");
114 return -EINVAL;
115 }
116 } else {
117 ret = vmw_user_dmabuf_lookup(tfile,
118 handle, &dmabuf);
119 if (ret) {
120 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
121 return -EINVAL;
122 }
123 }
124 }
125
126 /* takedown old cursor */
127 if (du->cursor_surface) {
128 du->cursor_surface->snooper.crtc = NULL;
129 vmw_surface_unreference(&du->cursor_surface);
130 }
131 if (du->cursor_dmabuf)
132 vmw_dmabuf_unreference(&du->cursor_dmabuf);
133
134 /* setup new image */
135 if (surface) {
136 /* vmw_user_surface_lookup takes one reference */
137 du->cursor_surface = surface;
138
139 du->cursor_surface->snooper.crtc = crtc;
140 du->cursor_age = du->cursor_surface->snooper.age;
141 vmw_cursor_update_image(dev_priv, surface->snooper.image,
142 64, 64, du->hotspot_x, du->hotspot_y);
143 } else if (dmabuf) {
144 struct ttm_bo_kmap_obj map;
145 unsigned long kmap_offset;
146 unsigned long kmap_num;
147 void *virtual;
148 bool dummy;
149
150 /* vmw_user_surface_lookup takes one reference */
151 du->cursor_dmabuf = dmabuf;
152
153 kmap_offset = 0;
154 kmap_num = (64*64*4) >> PAGE_SHIFT;
155
156 ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
157 if (unlikely(ret != 0)) {
158 DRM_ERROR("reserve failed\n");
159 return -EINVAL;
160 }
161
162 ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
163 if (unlikely(ret != 0))
164 goto err_unreserve;
165
166 virtual = ttm_kmap_obj_virtual(&map, &dummy);
167 vmw_cursor_update_image(dev_priv, virtual, 64, 64,
168 du->hotspot_x, du->hotspot_y);
169
170 ttm_bo_kunmap(&map);
171err_unreserve:
172 ttm_bo_unreserve(&dmabuf->base);
173
174 } else {
175 vmw_cursor_update_position(dev_priv, false, 0, 0);
176 return 0;
177 }
178
179 vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
180
181 return 0;
182}
183
184int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
185{
186 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
187 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
188 bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
189
190 du->cursor_x = x + crtc->x;
191 du->cursor_y = y + crtc->y;
192
193 vmw_cursor_update_position(dev_priv, shown,
194 du->cursor_x, du->cursor_y);
195
196 return 0;
197}
198
199void vmw_kms_cursor_snoop(struct vmw_surface *srf,
200 struct ttm_object_file *tfile,
201 struct ttm_buffer_object *bo,
202 SVGA3dCmdHeader *header)
203{
204 struct ttm_bo_kmap_obj map;
205 unsigned long kmap_offset;
206 unsigned long kmap_num;
207 SVGA3dCopyBox *box;
208 unsigned box_count;
209 void *virtual;
210 bool dummy;
211 struct vmw_dma_cmd {
212 SVGA3dCmdHeader header;
213 SVGA3dCmdSurfaceDMA dma;
214 } *cmd;
215 int ret;
216
217 cmd = container_of(header, struct vmw_dma_cmd, header);
218
219 /* No snooper installed */
220 if (!srf->snooper.image)
221 return;
222
223 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
224 DRM_ERROR("face and mipmap for cursors should never != 0\n");
225 return;
226 }
227
228 if (cmd->header.size < 64) {
229 DRM_ERROR("at least one full copy box must be given\n");
230 return;
231 }
232
233 box = (SVGA3dCopyBox *)&cmd[1];
234 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
235 sizeof(SVGA3dCopyBox);
236
237 if (cmd->dma.guest.pitch != (64 * 4) ||
238 cmd->dma.guest.ptr.offset % PAGE_SIZE ||
239 box->x != 0 || box->y != 0 || box->z != 0 ||
240 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
241 box->w != 64 || box->h != 64 || box->d != 1 ||
242 box_count != 1) {
243 /* TODO handle none page aligned offsets */
244 /* TODO handle partial uploads and pitch != 256 */
245 /* TODO handle more then one copy (size != 64) */
246 DRM_ERROR("lazy programer, cant handle wierd stuff\n");
247 return;
248 }
249
250 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
251 kmap_num = (64*64*4) >> PAGE_SHIFT;
252
253 ret = ttm_bo_reserve(bo, true, false, false, 0);
254 if (unlikely(ret != 0)) {
255 DRM_ERROR("reserve failed\n");
256 return;
257 }
258
259 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
260 if (unlikely(ret != 0))
261 goto err_unreserve;
262
263 virtual = ttm_kmap_obj_virtual(&map, &dummy);
264
265 memcpy(srf->snooper.image, virtual, 64*64*4);
266 srf->snooper.age++;
267
268 /* we can't call this function from this function since execbuf has
269 * reserved fifo space.
270 *
271 * if (srf->snooper.crtc)
272 * vmw_ldu_crtc_cursor_update_image(dev_priv,
273 * srf->snooper.image, 64, 64,
274 * du->hotspot_x, du->hotspot_y);
275 */
276
277 ttm_bo_kunmap(&map);
278err_unreserve:
279 ttm_bo_unreserve(bo);
280}
281
282void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
283{
284 struct drm_device *dev = dev_priv->dev;
285 struct vmw_display_unit *du;
286 struct drm_crtc *crtc;
287
288 mutex_lock(&dev->mode_config.mutex);
289
290 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
291 du = vmw_crtc_to_du(crtc);
292 if (!du->cursor_surface ||
293 du->cursor_age == du->cursor_surface->snooper.age)
294 continue;
295
296 du->cursor_age = du->cursor_surface->snooper.age;
297 vmw_cursor_update_image(dev_priv,
298 du->cursor_surface->snooper.image,
299 64, 64, du->hotspot_x, du->hotspot_y);
300 }
301
302 mutex_unlock(&dev->mode_config.mutex);
303}
304
305/*
306 * Generic framebuffer code
307 */
308
309int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
310 struct drm_file *file_priv,
311 unsigned int *handle)
312{
313 if (handle)
314 handle = 0;
315
316 return 0;
317}
318
319/*
320 * Surface framebuffer code
321 */
322
323#define vmw_framebuffer_to_vfbs(x) \
324 container_of(x, struct vmw_framebuffer_surface, base.base)
325
326struct vmw_framebuffer_surface {
327 struct vmw_framebuffer base;
328 struct vmw_surface *surface;
329 struct delayed_work d_work;
330 struct mutex work_lock;
331 bool present_fs;
332};
333
334void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
335{
336 struct vmw_framebuffer_surface *vfb =
337 vmw_framebuffer_to_vfbs(framebuffer);
338
339 cancel_delayed_work_sync(&vfb->d_work);
340 drm_framebuffer_cleanup(framebuffer);
341 vmw_surface_unreference(&vfb->surface);
342
343 kfree(framebuffer);
344}
345
346static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
347{
348 struct delayed_work *d_work =
349 container_of(work, struct delayed_work, work);
350 struct vmw_framebuffer_surface *vfbs =
351 container_of(d_work, struct vmw_framebuffer_surface, d_work);
352 struct vmw_surface *surf = vfbs->surface;
353 struct drm_framebuffer *framebuffer = &vfbs->base.base;
354 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
355
356 struct {
357 SVGA3dCmdHeader header;
358 SVGA3dCmdPresent body;
359 SVGA3dCopyRect cr;
360 } *cmd;
361
362 mutex_lock(&vfbs->work_lock);
363 if (!vfbs->present_fs)
364 goto out_unlock;
365
366 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
367 if (unlikely(cmd == NULL))
368 goto out_resched;
369
370 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
371 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
372 cmd->body.sid = cpu_to_le32(surf->res.id);
373 cmd->cr.x = cpu_to_le32(0);
374 cmd->cr.y = cpu_to_le32(0);
375 cmd->cr.srcx = cmd->cr.x;
376 cmd->cr.srcy = cmd->cr.y;
377 cmd->cr.w = cpu_to_le32(framebuffer->width);
378 cmd->cr.h = cpu_to_le32(framebuffer->height);
379 vfbs->present_fs = false;
380 vmw_fifo_commit(dev_priv, sizeof(*cmd));
381out_resched:
382 /**
383 * Will not re-add if already pending.
384 */
385 schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
386out_unlock:
387 mutex_unlock(&vfbs->work_lock);
388}
389
390
391int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
392 unsigned flags, unsigned color,
393 struct drm_clip_rect *clips,
394 unsigned num_clips)
395{
396 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
397 struct vmw_framebuffer_surface *vfbs =
398 vmw_framebuffer_to_vfbs(framebuffer);
399 struct vmw_surface *surf = vfbs->surface;
400 struct drm_clip_rect norect;
401 SVGA3dCopyRect *cr;
402 int i, inc = 1;
403
404 struct {
405 SVGA3dCmdHeader header;
406 SVGA3dCmdPresent body;
407 SVGA3dCopyRect cr;
408 } *cmd;
409
410 if (!num_clips ||
411 !(dev_priv->fifo.capabilities &
412 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
413 int ret;
414
415 mutex_lock(&vfbs->work_lock);
416 vfbs->present_fs = true;
417 ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
418 mutex_unlock(&vfbs->work_lock);
419 if (ret) {
420 /**
421 * No work pending, Force immediate present.
422 */
423 vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
424 }
425 return 0;
426 }
427
428 if (!num_clips) {
429 num_clips = 1;
430 clips = &norect;
431 norect.x1 = norect.y1 = 0;
432 norect.x2 = framebuffer->width;
433 norect.y2 = framebuffer->height;
434 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
435 num_clips /= 2;
436 inc = 2; /* skip source rects */
437 }
438
439 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
440 if (unlikely(cmd == NULL)) {
441 DRM_ERROR("Fifo reserve failed.\n");
442 return -ENOMEM;
443 }
444
445 memset(cmd, 0, sizeof(*cmd));
446
447 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
448 cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
449 cmd->body.sid = cpu_to_le32(surf->res.id);
450
451 for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
452 cr->x = cpu_to_le16(clips->x1);
453 cr->y = cpu_to_le16(clips->y1);
454 cr->srcx = cr->x;
455 cr->srcy = cr->y;
456 cr->w = cpu_to_le16(clips->x2 - clips->x1);
457 cr->h = cpu_to_le16(clips->y2 - clips->y1);
458 }
459
460 vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
461
462 return 0;
463}
464
465static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
466 .destroy = vmw_framebuffer_surface_destroy,
467 .dirty = vmw_framebuffer_surface_dirty,
468 .create_handle = vmw_framebuffer_create_handle,
469};
470
471int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
472 struct vmw_surface *surface,
473 struct vmw_framebuffer **out,
474 unsigned width, unsigned height)
475
476{
477 struct drm_device *dev = dev_priv->dev;
478 struct vmw_framebuffer_surface *vfbs;
479 int ret;
480
481 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
482 if (!vfbs) {
483 ret = -ENOMEM;
484 goto out_err1;
485 }
486
487 ret = drm_framebuffer_init(dev, &vfbs->base.base,
488 &vmw_framebuffer_surface_funcs);
489 if (ret)
490 goto out_err2;
491
492 if (!vmw_surface_reference(surface)) {
493 DRM_ERROR("failed to reference surface %p\n", surface);
494 goto out_err3;
495 }
496
497 /* XXX get the first 3 from the surface info */
498 vfbs->base.base.bits_per_pixel = 32;
499 vfbs->base.base.pitch = width * 32 / 4;
500 vfbs->base.base.depth = 24;
501 vfbs->base.base.width = width;
502 vfbs->base.base.height = height;
503 vfbs->base.pin = NULL;
504 vfbs->base.unpin = NULL;
505 vfbs->surface = surface;
506 mutex_init(&vfbs->work_lock);
507 INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
508 *out = &vfbs->base;
509
510 return 0;
511
512out_err3:
513 drm_framebuffer_cleanup(&vfbs->base.base);
514out_err2:
515 kfree(vfbs);
516out_err1:
517 return ret;
518}
519
520/*
521 * Dmabuf framebuffer code
522 */
523
524#define vmw_framebuffer_to_vfbd(x) \
525 container_of(x, struct vmw_framebuffer_dmabuf, base.base)
526
527struct vmw_framebuffer_dmabuf {
528 struct vmw_framebuffer base;
529 struct vmw_dma_buffer *buffer;
530};
531
532void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
533{
534 struct vmw_framebuffer_dmabuf *vfbd =
535 vmw_framebuffer_to_vfbd(framebuffer);
536
537 drm_framebuffer_cleanup(framebuffer);
538 vmw_dmabuf_unreference(&vfbd->buffer);
539
540 kfree(vfbd);
541}
542
543int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
544 unsigned flags, unsigned color,
545 struct drm_clip_rect *clips,
546 unsigned num_clips)
547{
548 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
549 struct drm_clip_rect norect;
550 struct {
551 uint32_t header;
552 SVGAFifoCmdUpdate body;
553 } *cmd;
554 int i, increment = 1;
555
556 if (!num_clips ||
557 !(dev_priv->fifo.capabilities &
558 SVGA_FIFO_CAP_SCREEN_OBJECT)) {
559 num_clips = 1;
560 clips = &norect;
561 norect.x1 = norect.y1 = 0;
562 norect.x2 = framebuffer->width;
563 norect.y2 = framebuffer->height;
564 } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
565 num_clips /= 2;
566 increment = 2;
567 }
568
569 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
570 if (unlikely(cmd == NULL)) {
571 DRM_ERROR("Fifo reserve failed.\n");
572 return -ENOMEM;
573 }
574
575 for (i = 0; i < num_clips; i++, clips += increment) {
576 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
577 cmd[i].body.x = cpu_to_le32(clips[i].x1);
578 cmd[i].body.y = cpu_to_le32(clips[i].y1);
579 cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1);
580 cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1);
581 }
582
583 vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
584
585 return 0;
586}
587
588static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
589 .destroy = vmw_framebuffer_dmabuf_destroy,
590 .dirty = vmw_framebuffer_dmabuf_dirty,
591 .create_handle = vmw_framebuffer_create_handle,
592};
593
594static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
595{
596 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
597 struct vmw_framebuffer_dmabuf *vfbd =
598 vmw_framebuffer_to_vfbd(&vfb->base);
599 int ret;
600
601 vmw_overlay_pause_all(dev_priv);
602
603 ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
604
605 if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
606 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
607 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0);
608 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
609 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
610 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
611 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
612 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
613 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
614
615 vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
616 vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width);
617 vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height);
618 vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel);
619 vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth);
620 vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000);
621 vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
622 vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
623 } else
624 WARN_ON(true);
625
626 vmw_overlay_resume_all(dev_priv);
627
628 return 0;
629}
630
631static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
632{
633 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
634 struct vmw_framebuffer_dmabuf *vfbd =
635 vmw_framebuffer_to_vfbd(&vfb->base);
636
637 if (!vfbd->buffer) {
638 WARN_ON(!vfbd->buffer);
639 return 0;
640 }
641
642 return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
643}
644
645int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
646 struct vmw_dma_buffer *dmabuf,
647 struct vmw_framebuffer **out,
648 unsigned width, unsigned height)
649
650{
651 struct drm_device *dev = dev_priv->dev;
652 struct vmw_framebuffer_dmabuf *vfbd;
653 int ret;
654
655 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
656 if (!vfbd) {
657 ret = -ENOMEM;
658 goto out_err1;
659 }
660
661 ret = drm_framebuffer_init(dev, &vfbd->base.base,
662 &vmw_framebuffer_dmabuf_funcs);
663 if (ret)
664 goto out_err2;
665
666 if (!vmw_dmabuf_reference(dmabuf)) {
667 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
668 goto out_err3;
669 }
670
671 /* XXX get the first 3 from the surface info */
672 vfbd->base.base.bits_per_pixel = 32;
673 vfbd->base.base.pitch = width * 32 / 4;
674 vfbd->base.base.depth = 24;
675 vfbd->base.base.width = width;
676 vfbd->base.base.height = height;
677 vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
678 vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
679 vfbd->buffer = dmabuf;
680 *out = &vfbd->base;
681
682 return 0;
683
684out_err3:
685 drm_framebuffer_cleanup(&vfbd->base.base);
686out_err2:
687 kfree(vfbd);
688out_err1:
689 return ret;
690}
691
692/*
693 * Generic Kernel modesetting functions
694 */
695
696static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
697 struct drm_file *file_priv,
698 struct drm_mode_fb_cmd *mode_cmd)
699{
700 struct vmw_private *dev_priv = vmw_priv(dev);
701 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
702 struct vmw_framebuffer *vfb = NULL;
703 struct vmw_surface *surface = NULL;
704 struct vmw_dma_buffer *bo = NULL;
705 int ret;
706
707 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
708 mode_cmd->handle, &surface);
709 if (ret)
710 goto try_dmabuf;
711
712 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
713 mode_cmd->width, mode_cmd->height);
714
715 /* vmw_user_surface_lookup takes one ref so does new_fb */
716 vmw_surface_unreference(&surface);
717
718 if (ret) {
719 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
720 return NULL;
721 }
722 return &vfb->base;
723
724try_dmabuf:
725 DRM_INFO("%s: trying buffer\n", __func__);
726
727 ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
728 if (ret) {
729 DRM_ERROR("failed to find buffer: %i\n", ret);
730 return NULL;
731 }
732
733 ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
734 mode_cmd->width, mode_cmd->height);
735
736 /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
737 vmw_dmabuf_unreference(&bo);
738
739 if (ret) {
740 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
741 return NULL;
742 }
743
744 return &vfb->base;
745}
746
747static int vmw_kms_fb_changed(struct drm_device *dev)
748{
749 return 0;
750}
751
752static struct drm_mode_config_funcs vmw_kms_funcs = {
753 .fb_create = vmw_kms_fb_create,
754 .fb_changed = vmw_kms_fb_changed,
755};
756
757int vmw_kms_init(struct vmw_private *dev_priv)
758{
759 struct drm_device *dev = dev_priv->dev;
760 int ret;
761
762 drm_mode_config_init(dev);
763 dev->mode_config.funcs = &vmw_kms_funcs;
764 dev->mode_config.min_width = 640;
765 dev->mode_config.min_height = 480;
766 dev->mode_config.max_width = 2048;
767 dev->mode_config.max_height = 2048;
768
769 ret = vmw_kms_init_legacy_display_system(dev_priv);
770
771 return 0;
772}
773
774int vmw_kms_close(struct vmw_private *dev_priv)
775{
776 /*
777 * Docs says we should take the lock before calling this function
778 * but since it destroys encoders and our destructor calls
779 * drm_encoder_cleanup which takes the lock we deadlock.
780 */
781 drm_mode_config_cleanup(dev_priv->dev);
782 vmw_kms_close_legacy_display_system(dev_priv);
783 return 0;
784}
785
786int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
787 struct drm_file *file_priv)
788{
789 struct drm_vmw_cursor_bypass_arg *arg = data;
790 struct vmw_display_unit *du;
791 struct drm_mode_object *obj;
792 struct drm_crtc *crtc;
793 int ret = 0;
794
795
796 mutex_lock(&dev->mode_config.mutex);
797 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
798
799 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
800 du = vmw_crtc_to_du(crtc);
801 du->hotspot_x = arg->xhot;
802 du->hotspot_y = arg->yhot;
803 }
804
805 mutex_unlock(&dev->mode_config.mutex);
806 return 0;
807 }
808
809 obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
810 if (!obj) {
811 ret = -EINVAL;
812 goto out;
813 }
814
815 crtc = obj_to_crtc(obj);
816 du = vmw_crtc_to_du(crtc);
817
818 du->hotspot_x = arg->xhot;
819 du->hotspot_y = arg->yhot;
820
821out:
822 mutex_unlock(&dev->mode_config.mutex);
823
824 return ret;
825}
826
827int vmw_kms_save_vga(struct vmw_private *vmw_priv)
828{
829 /*
830 * setup a single multimon monitor with the size
831 * of 0x0, this stops the UI from resizing when we
832 * change the framebuffer size
833 */
834 if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
835 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
836 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
837 vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
838 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
839 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
840 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
841 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
842 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
843 }
844
845 vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
846 vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
847 vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
848 vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
849 vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
850 vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
851 vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
852 vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
853
854 return 0;
855}
856
857int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
858{
859 vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
860 vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
861 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
862 vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
863 vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
864 vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
865 vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
866 vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
867
868 /* TODO check for multimon */
869 vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
870
871 return 0;
872}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
new file mode 100644
index 000000000000..8b95249f0531
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -0,0 +1,102 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef VMWGFX_KMS_H_
29#define VMWGFX_KMS_H_
30
31#include "drmP.h"
32#include "vmwgfx_drv.h"
33
34
35#define vmw_framebuffer_to_vfb(x) \
36 container_of(x, struct vmw_framebuffer, base)
37
38/**
39 * Base class for framebuffers
40 *
41 * @pin is called the when ever a crtc uses this framebuffer
42 * @unpin is called
43 */
44struct vmw_framebuffer {
45 struct drm_framebuffer base;
46 int (*pin)(struct vmw_framebuffer *fb);
47 int (*unpin)(struct vmw_framebuffer *fb);
48};
49
50
51#define vmw_crtc_to_du(x) \
52 container_of(x, struct vmw_display_unit, crtc)
53
54/*
55 * Basic cursor manipulation
56 */
57int vmw_cursor_update_image(struct vmw_private *dev_priv,
58 u32 *image, u32 width, u32 height,
59 u32 hotspotX, u32 hotspotY);
60void vmw_cursor_update_position(struct vmw_private *dev_priv,
61 bool show, int x, int y);
62
63/**
64 * Base class display unit.
65 *
66 * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
67 * so the display unit is all of them at the same time. This is true for both
68 * legacy multimon and screen objects.
69 */
70struct vmw_display_unit {
71 struct drm_crtc crtc;
72 struct drm_encoder encoder;
73 struct drm_connector connector;
74
75 struct vmw_surface *cursor_surface;
76 struct vmw_dma_buffer *cursor_dmabuf;
77 size_t cursor_age;
78
79 int cursor_x;
80 int cursor_y;
81
82 int hotspot_x;
83 int hotspot_y;
84
85 unsigned unit;
86};
87
88/*
89 * Shared display unit functions - vmwgfx_kms.c
90 */
91void vmw_display_unit_cleanup(struct vmw_display_unit *du);
92int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
93 uint32_t handle, uint32_t width, uint32_t height);
94int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
95
96/*
97 * Legacy display unit functions - vmwgfx_ldu.h
98 */
99int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
100int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
101
102#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
new file mode 100644
index 000000000000..90891593bf6c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -0,0 +1,516 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_kms.h"
29
30#define vmw_crtc_to_ldu(x) \
31 container_of(x, struct vmw_legacy_display_unit, base.crtc)
32#define vmw_encoder_to_ldu(x) \
33 container_of(x, struct vmw_legacy_display_unit, base.encoder)
34#define vmw_connector_to_ldu(x) \
35 container_of(x, struct vmw_legacy_display_unit, base.connector)
36
37struct vmw_legacy_display {
38 struct list_head active;
39
40 unsigned num_active;
41
42 struct vmw_framebuffer *fb;
43};
44
45/**
46 * Display unit using the legacy register interface.
47 */
48struct vmw_legacy_display_unit {
49 struct vmw_display_unit base;
50
51 struct list_head active;
52
53 unsigned unit;
54};
55
56static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
57{
58 list_del_init(&ldu->active);
59 vmw_display_unit_cleanup(&ldu->base);
60 kfree(ldu);
61}
62
63
64/*
65 * Legacy Display Unit CRTC functions
66 */
67
68static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
69{
70}
71
72static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
73{
74}
75
76static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
77 u16 *r, u16 *g, u16 *b,
78 uint32_t size)
79{
80}
81
82static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
83{
84 vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
85}
86
87static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
88{
89 struct vmw_legacy_display *lds = dev_priv->ldu_priv;
90 struct vmw_legacy_display_unit *entry;
91 struct drm_crtc *crtc;
92 int i = 0;
93
94 /* to stop the screen from changing size on resize */
95 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0);
96 for (i = 0; i < lds->num_active; i++) {
97 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
98 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
99 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
100 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
101 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0);
102 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
103 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
104 }
105
106 /* Now set the mode */
107 vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active);
108 i = 0;
109 list_for_each_entry(entry, &lds->active, active) {
110 crtc = &entry->base.crtc;
111
112 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
113 vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
114 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
115 vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
116 vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
117 vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
118 vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
119
120 i++;
121 }
122
123 return 0;
124}
125
126static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
127 struct vmw_legacy_display_unit *ldu)
128{
129 struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
130 if (list_empty(&ldu->active))
131 return 0;
132
133 list_del_init(&ldu->active);
134 if (--(ld->num_active) == 0) {
135 BUG_ON(!ld->fb);
136 if (ld->fb->unpin)
137 ld->fb->unpin(ld->fb);
138 ld->fb = NULL;
139 }
140
141 return 0;
142}
143
144static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
145 struct vmw_legacy_display_unit *ldu,
146 struct vmw_framebuffer *vfb)
147{
148 struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
149 struct vmw_legacy_display_unit *entry;
150 struct list_head *at;
151
152 if (!list_empty(&ldu->active))
153 return 0;
154
155 at = &ld->active;
156 list_for_each_entry(entry, &ld->active, active) {
157 if (entry->unit > ldu->unit)
158 break;
159
160 at = &entry->active;
161 }
162
163 list_add(&ldu->active, at);
164 if (ld->num_active++ == 0) {
165 BUG_ON(ld->fb);
166 if (vfb->pin)
167 vfb->pin(vfb);
168 ld->fb = vfb;
169 }
170
171 return 0;
172}
173
174static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
175{
176 struct vmw_private *dev_priv;
177 struct vmw_legacy_display_unit *ldu;
178 struct drm_connector *connector;
179 struct drm_display_mode *mode;
180 struct drm_encoder *encoder;
181 struct vmw_framebuffer *vfb;
182 struct drm_framebuffer *fb;
183 struct drm_crtc *crtc;
184
185 if (!set)
186 return -EINVAL;
187
188 if (!set->crtc)
189 return -EINVAL;
190
191 /* get the ldu */
192 crtc = set->crtc;
193 ldu = vmw_crtc_to_ldu(crtc);
194 vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
195 dev_priv = vmw_priv(crtc->dev);
196
197 if (set->num_connectors > 1) {
198 DRM_ERROR("to many connectors\n");
199 return -EINVAL;
200 }
201
202 if (set->num_connectors == 1 &&
203 set->connectors[0] != &ldu->base.connector) {
204 DRM_ERROR("connector doesn't match %p %p\n",
205 set->connectors[0], &ldu->base.connector);
206 return -EINVAL;
207 }
208
209 /* ldu only supports one fb active at the time */
210 if (dev_priv->ldu_priv->fb && vfb &&
211 dev_priv->ldu_priv->fb != vfb) {
212 DRM_ERROR("Multiple framebuffers not supported\n");
213 return -EINVAL;
214 }
215
216 /* since they always map one to one these are safe */
217 connector = &ldu->base.connector;
218 encoder = &ldu->base.encoder;
219
220 /* should we turn the crtc off? */
221 if (set->num_connectors == 0 || !set->mode || !set->fb) {
222
223 connector->encoder = NULL;
224 encoder->crtc = NULL;
225 crtc->fb = NULL;
226
227 vmw_ldu_del_active(dev_priv, ldu);
228
229 vmw_ldu_commit_list(dev_priv);
230
231 return 0;
232 }
233
234
235 /* we now know we want to set a mode */
236 mode = set->mode;
237 fb = set->fb;
238
239 if (set->x + mode->hdisplay > fb->width ||
240 set->y + mode->vdisplay > fb->height) {
241 DRM_ERROR("set outside of framebuffer\n");
242 return -EINVAL;
243 }
244
245 vmw_fb_off(dev_priv);
246
247 crtc->fb = fb;
248 encoder->crtc = crtc;
249 connector->encoder = encoder;
250 crtc->x = set->x;
251 crtc->y = set->y;
252 crtc->mode = *mode;
253
254 vmw_ldu_add_active(dev_priv, ldu, vfb);
255
256 vmw_ldu_commit_list(dev_priv);
257
258 return 0;
259}
260
261static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
262 .save = vmw_ldu_crtc_save,
263 .restore = vmw_ldu_crtc_restore,
264 .cursor_set = vmw_du_crtc_cursor_set,
265 .cursor_move = vmw_du_crtc_cursor_move,
266 .gamma_set = vmw_ldu_crtc_gamma_set,
267 .destroy = vmw_ldu_crtc_destroy,
268 .set_config = vmw_ldu_crtc_set_config,
269};
270
271/*
272 * Legacy Display Unit encoder functions
273 */
274
275static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
276{
277 vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
278}
279
280static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
281 .destroy = vmw_ldu_encoder_destroy,
282};
283
284/*
285 * Legacy Display Unit connector functions
286 */
287
288static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
289{
290}
291
292static void vmw_ldu_connector_save(struct drm_connector *connector)
293{
294}
295
296static void vmw_ldu_connector_restore(struct drm_connector *connector)
297{
298}
299
300static enum drm_connector_status
301 vmw_ldu_connector_detect(struct drm_connector *connector)
302{
303 /* XXX vmwctrl should control connection status */
304 if (vmw_connector_to_ldu(connector)->base.unit == 0)
305 return connector_status_connected;
306 return connector_status_disconnected;
307}
308
309static struct drm_display_mode vmw_ldu_connector_builtin[] = {
310 /* 640x480@60Hz */
311 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
312 752, 800, 0, 480, 489, 492, 525, 0,
313 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
314 /* 800x600@60Hz */
315 { DRM_MODE("800x600",
316 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
317 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628,
318 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
319 /* 1024x768@60Hz */
320 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
321 1184, 1344, 0, 768, 771, 777, 806, 0,
322 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
323 /* 1152x864@75Hz */
324 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
325 1344, 1600, 0, 864, 865, 868, 900, 0,
326 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
327 /* 1280x768@60Hz */
328 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
329 1472, 1664, 0, 768, 771, 778, 798, 0,
330 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
331 /* 1280x800@60Hz */
332 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
333 1480, 1680, 0, 800, 803, 809, 831, 0,
334 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
335 /* 1280x960@60Hz */
336 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
337 1488, 1800, 0, 960, 961, 964, 1000, 0,
338 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
339 /* 1280x1024@60Hz */
340 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
341 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
342 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
343 /* 1360x768@60Hz */
344 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
345 1536, 1792, 0, 768, 771, 777, 795, 0,
346 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
347 /* 1440x1050@60Hz */
348 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
349 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
350 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
351 /* 1440x900@60Hz */
352 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
353 1672, 1904, 0, 900, 903, 909, 934, 0,
354 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
355 /* 1600x1200@60Hz */
356 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
357 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
358 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
359 /* 1680x1050@60Hz */
360 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
361 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
362 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
363 /* 1792x1344@60Hz */
364 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
365 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
366 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
367 /* 1853x1392@60Hz */
368 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
369 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
370 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
371 /* 1920x1200@60Hz */
372 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
373 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
374 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
375 /* 1920x1440@60Hz */
376 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
377 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
378 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
379 /* 2560x1600@60Hz */
380 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
381 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
382 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
383 /* Terminate */
384 { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
385};
386
387static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
388 uint32_t max_width, uint32_t max_height)
389{
390 struct drm_device *dev = connector->dev;
391 struct drm_display_mode *mode = NULL;
392 int i;
393
394 for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
395 if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
396 vmw_ldu_connector_builtin[i].vdisplay > max_height)
397 continue;
398
399 mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
400 if (!mode)
401 return 0;
402 mode->vrefresh = drm_mode_vrefresh(mode);
403
404 drm_mode_probed_add(connector, mode);
405 }
406
407 drm_mode_connector_list_update(connector);
408
409 return 1;
410}
411
412static int vmw_ldu_connector_set_property(struct drm_connector *connector,
413 struct drm_property *property,
414 uint64_t val)
415{
416 return 0;
417}
418
419static void vmw_ldu_connector_destroy(struct drm_connector *connector)
420{
421 vmw_ldu_destroy(vmw_connector_to_ldu(connector));
422}
423
424static struct drm_connector_funcs vmw_legacy_connector_funcs = {
425 .dpms = vmw_ldu_connector_dpms,
426 .save = vmw_ldu_connector_save,
427 .restore = vmw_ldu_connector_restore,
428 .detect = vmw_ldu_connector_detect,
429 .fill_modes = vmw_ldu_connector_fill_modes,
430 .set_property = vmw_ldu_connector_set_property,
431 .destroy = vmw_ldu_connector_destroy,
432};
433
434static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
435{
436 struct vmw_legacy_display_unit *ldu;
437 struct drm_device *dev = dev_priv->dev;
438 struct drm_connector *connector;
439 struct drm_encoder *encoder;
440 struct drm_crtc *crtc;
441
442 ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
443 if (!ldu)
444 return -ENOMEM;
445
446 ldu->unit = unit;
447 crtc = &ldu->base.crtc;
448 encoder = &ldu->base.encoder;
449 connector = &ldu->base.connector;
450
451 drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
452 DRM_MODE_CONNECTOR_LVDS);
453 /* Initial status */
454 if (unit == 0)
455 connector->status = connector_status_connected;
456 else
457 connector->status = connector_status_disconnected;
458
459 drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
460 DRM_MODE_ENCODER_LVDS);
461 drm_mode_connector_attach_encoder(connector, encoder);
462 encoder->possible_crtcs = (1 << unit);
463 encoder->possible_clones = 0;
464
465 INIT_LIST_HEAD(&ldu->active);
466
467 drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
468
469 drm_connector_attach_property(connector,
470 dev->mode_config.dirty_info_property,
471 1);
472
473 return 0;
474}
475
476int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
477{
478 if (dev_priv->ldu_priv) {
479 DRM_INFO("ldu system already on\n");
480 return -EINVAL;
481 }
482
483 dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
484
485 if (!dev_priv->ldu_priv)
486 return -ENOMEM;
487
488 INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
489 dev_priv->ldu_priv->num_active = 0;
490 dev_priv->ldu_priv->fb = NULL;
491
492 drm_mode_create_dirty_info_property(dev_priv->dev);
493
494 vmw_ldu_init(dev_priv, 0);
495 vmw_ldu_init(dev_priv, 1);
496 vmw_ldu_init(dev_priv, 2);
497 vmw_ldu_init(dev_priv, 3);
498 vmw_ldu_init(dev_priv, 4);
499 vmw_ldu_init(dev_priv, 5);
500 vmw_ldu_init(dev_priv, 6);
501 vmw_ldu_init(dev_priv, 7);
502
503 return 0;
504}
505
506int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
507{
508 if (!dev_priv->ldu_priv)
509 return -ENOSYS;
510
511 BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
512
513 kfree(dev_priv->ldu_priv);
514
515 return 0;
516}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
new file mode 100644
index 000000000000..bb6e6a096d25
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -0,0 +1,634 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28
29#include "drmP.h"
30#include "vmwgfx_drv.h"
31
32#include "ttm/ttm_placement.h"
33
34#include "svga_overlay.h"
35#include "svga_escape.h"
36
37#define VMW_MAX_NUM_STREAMS 1
38
39struct vmw_stream {
40 struct vmw_dma_buffer *buf;
41 bool claimed;
42 bool paused;
43 struct drm_vmw_control_stream_arg saved;
44};
45
46/**
47 * Overlay control
48 */
49struct vmw_overlay {
50 /*
51 * Each stream is a single overlay. In Xv these are called ports.
52 */
53 struct mutex mutex;
54 struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
55};
56
57static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
58{
59 struct vmw_private *dev_priv = vmw_priv(dev);
60 return dev_priv ? dev_priv->overlay_priv : NULL;
61}
62
63struct vmw_escape_header {
64 uint32_t cmd;
65 SVGAFifoCmdEscape body;
66};
67
68struct vmw_escape_video_flush {
69 struct vmw_escape_header escape;
70 SVGAEscapeVideoFlush flush;
71};
72
73static inline void fill_escape(struct vmw_escape_header *header,
74 uint32_t size)
75{
76 header->cmd = SVGA_CMD_ESCAPE;
77 header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
78 header->body.size = size;
79}
80
81static inline void fill_flush(struct vmw_escape_video_flush *cmd,
82 uint32_t stream_id)
83{
84 fill_escape(&cmd->escape, sizeof(cmd->flush));
85 cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
86 cmd->flush.streamId = stream_id;
87}
88
89/**
90 * Pin or unpin a buffer in vram.
91 *
92 * @dev_priv: Driver private.
93 * @buf: DMA buffer to pin or unpin.
94 * @pin: Pin buffer in vram if true.
95 * @interruptible: Use interruptible wait.
96 *
97 * Takes the current masters ttm lock in read.
98 *
99 * Returns
100 * -ERESTARTSYS if interrupted by a signal.
101 */
102static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
103 struct vmw_dma_buffer *buf,
104 bool pin, bool interruptible)
105{
106 struct ttm_buffer_object *bo = &buf->base;
107 struct ttm_bo_global *glob = bo->glob;
108 struct ttm_placement *overlay_placement = &vmw_vram_placement;
109 int ret;
110
111 ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
112 if (unlikely(ret != 0))
113 return ret;
114
115 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
116 if (unlikely(ret != 0))
117 goto err;
118
119 if (buf->gmr_bound) {
120 vmw_gmr_unbind(dev_priv, buf->gmr_id);
121 spin_lock(&glob->lru_lock);
122 ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
123 spin_unlock(&glob->lru_lock);
124 buf->gmr_bound = NULL;
125 }
126
127 if (pin)
128 overlay_placement = &vmw_vram_ne_placement;
129
130 ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
131
132 ttm_bo_unreserve(bo);
133
134err:
135 ttm_read_unlock(&dev_priv->active_master->lock);
136
137 return ret;
138}
139
140/**
141 * Send put command to hw.
142 *
143 * Returns
144 * -ERESTARTSYS if interrupted by a signal.
145 */
146static int vmw_overlay_send_put(struct vmw_private *dev_priv,
147 struct vmw_dma_buffer *buf,
148 struct drm_vmw_control_stream_arg *arg,
149 bool interruptible)
150{
151 struct {
152 struct vmw_escape_header escape;
153 struct {
154 struct {
155 uint32_t cmdType;
156 uint32_t streamId;
157 } header;
158 struct {
159 uint32_t registerId;
160 uint32_t value;
161 } items[SVGA_VIDEO_PITCH_3 + 1];
162 } body;
163 struct vmw_escape_video_flush flush;
164 } *cmds;
165 uint32_t offset;
166 int i, ret;
167
168 for (;;) {
169 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
170 if (cmds)
171 break;
172
173 ret = vmw_fallback_wait(dev_priv, false, true, 0,
174 interruptible, 3*HZ);
175 if (interruptible && ret == -ERESTARTSYS)
176 return ret;
177 else
178 BUG_ON(ret != 0);
179 }
180
181 fill_escape(&cmds->escape, sizeof(cmds->body));
182 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
183 cmds->body.header.streamId = arg->stream_id;
184
185 for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
186 cmds->body.items[i].registerId = i;
187
188 offset = buf->base.offset + arg->offset;
189
190 cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
191 cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
192 cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
193 cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
194 cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
195 cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
196 cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
197 cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
198 cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
199 cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
200 cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
201 cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
202 cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
203 cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
204 cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
205 cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
206 cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
207 cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
208 cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
209
210 fill_flush(&cmds->flush, arg->stream_id);
211
212 vmw_fifo_commit(dev_priv, sizeof(*cmds));
213
214 return 0;
215}
216
217/**
218 * Send stop command to hw.
219 *
220 * Returns
221 * -ERESTARTSYS if interrupted by a signal.
222 */
223static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
224 uint32_t stream_id,
225 bool interruptible)
226{
227 struct {
228 struct vmw_escape_header escape;
229 SVGAEscapeVideoSetRegs body;
230 struct vmw_escape_video_flush flush;
231 } *cmds;
232 int ret;
233
234 for (;;) {
235 cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
236 if (cmds)
237 break;
238
239 ret = vmw_fallback_wait(dev_priv, false, true, 0,
240 interruptible, 3*HZ);
241 if (interruptible && ret == -ERESTARTSYS)
242 return ret;
243 else
244 BUG_ON(ret != 0);
245 }
246
247 fill_escape(&cmds->escape, sizeof(cmds->body));
248 cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
249 cmds->body.header.streamId = stream_id;
250 cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
251 cmds->body.items[0].value = false;
252 fill_flush(&cmds->flush, stream_id);
253
254 vmw_fifo_commit(dev_priv, sizeof(*cmds));
255
256 return 0;
257}
258
259/**
260 * Stop or pause a stream.
261 *
262 * If the stream is paused the no evict flag is removed from the buffer
263 * but left in vram. This allows for instance mode_set to evict it
264 * should it need to.
265 *
266 * The caller must hold the overlay lock.
267 *
268 * @stream_id which stream to stop/pause.
269 * @pause true to pause, false to stop completely.
270 */
271static int vmw_overlay_stop(struct vmw_private *dev_priv,
272 uint32_t stream_id, bool pause,
273 bool interruptible)
274{
275 struct vmw_overlay *overlay = dev_priv->overlay_priv;
276 struct vmw_stream *stream = &overlay->stream[stream_id];
277 int ret;
278
279 /* no buffer attached the stream is completely stopped */
280 if (!stream->buf)
281 return 0;
282
283 /* If the stream is paused this is already done */
284 if (!stream->paused) {
285 ret = vmw_overlay_send_stop(dev_priv, stream_id,
286 interruptible);
287 if (ret)
288 return ret;
289
290 /* We just remove the NO_EVICT flag so no -ENOMEM */
291 ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
292 interruptible);
293 if (interruptible && ret == -ERESTARTSYS)
294 return ret;
295 else
296 BUG_ON(ret != 0);
297 }
298
299 if (!pause) {
300 vmw_dmabuf_unreference(&stream->buf);
301 stream->paused = false;
302 } else {
303 stream->paused = true;
304 }
305
306 return 0;
307}
308
309/**
310 * Update a stream and send any put or stop fifo commands needed.
311 *
312 * The caller must hold the overlay lock.
313 *
314 * Returns
315 * -ENOMEM if buffer doesn't fit in vram.
316 * -ERESTARTSYS if interrupted.
317 */
318static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
319 struct vmw_dma_buffer *buf,
320 struct drm_vmw_control_stream_arg *arg,
321 bool interruptible)
322{
323 struct vmw_overlay *overlay = dev_priv->overlay_priv;
324 struct vmw_stream *stream = &overlay->stream[arg->stream_id];
325 int ret = 0;
326
327 if (!buf)
328 return -EINVAL;
329
330 DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
331 stream->buf, buf, stream->paused ? "" : "not ");
332
333 if (stream->buf != buf) {
334 ret = vmw_overlay_stop(dev_priv, arg->stream_id,
335 false, interruptible);
336 if (ret)
337 return ret;
338 } else if (!stream->paused) {
339 /* If the buffers match and not paused then just send
340 * the put command, no need to do anything else.
341 */
342 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
343 if (ret == 0)
344 stream->saved = *arg;
345 else
346 BUG_ON(!interruptible);
347
348 return ret;
349 }
350
351 /* We don't start the old stream if we are interrupted.
352 * Might return -ENOMEM if it can't fit the buffer in vram.
353 */
354 ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
355 if (ret)
356 return ret;
357
358 ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
359 if (ret) {
360 /* This one needs to happen no matter what. We only remove
361 * the NO_EVICT flag so this is safe from -ENOMEM.
362 */
363 BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
364 return ret;
365 }
366
367 if (stream->buf != buf)
368 stream->buf = vmw_dmabuf_reference(buf);
369 stream->saved = *arg;
370
371 return 0;
372}
373
374/**
375 * Stop all streams.
376 *
377 * Used by the fb code when starting.
378 *
379 * Takes the overlay lock.
380 */
381int vmw_overlay_stop_all(struct vmw_private *dev_priv)
382{
383 struct vmw_overlay *overlay = dev_priv->overlay_priv;
384 int i, ret;
385
386 if (!overlay)
387 return 0;
388
389 mutex_lock(&overlay->mutex);
390
391 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
392 struct vmw_stream *stream = &overlay->stream[i];
393 if (!stream->buf)
394 continue;
395
396 ret = vmw_overlay_stop(dev_priv, i, false, false);
397 WARN_ON(ret != 0);
398 }
399
400 mutex_unlock(&overlay->mutex);
401
402 return 0;
403}
404
405/**
406 * Try to resume all paused streams.
407 *
408 * Used by the kms code after moving a new scanout buffer to vram.
409 *
410 * Takes the overlay lock.
411 */
412int vmw_overlay_resume_all(struct vmw_private *dev_priv)
413{
414 struct vmw_overlay *overlay = dev_priv->overlay_priv;
415 int i, ret;
416
417 if (!overlay)
418 return 0;
419
420 mutex_lock(&overlay->mutex);
421
422 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
423 struct vmw_stream *stream = &overlay->stream[i];
424 if (!stream->paused)
425 continue;
426
427 ret = vmw_overlay_update_stream(dev_priv, stream->buf,
428 &stream->saved, false);
429 if (ret != 0)
430 DRM_INFO("%s: *warning* failed to resume stream %i\n",
431 __func__, i);
432 }
433
434 mutex_unlock(&overlay->mutex);
435
436 return 0;
437}
438
439/**
440 * Pauses all active streams.
441 *
442 * Used by the kms code when moving a new scanout buffer to vram.
443 *
444 * Takes the overlay lock.
445 */
446int vmw_overlay_pause_all(struct vmw_private *dev_priv)
447{
448 struct vmw_overlay *overlay = dev_priv->overlay_priv;
449 int i, ret;
450
451 if (!overlay)
452 return 0;
453
454 mutex_lock(&overlay->mutex);
455
456 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
457 if (overlay->stream[i].paused)
458 DRM_INFO("%s: *warning* stream %i already paused\n",
459 __func__, i);
460 ret = vmw_overlay_stop(dev_priv, i, true, false);
461 WARN_ON(ret != 0);
462 }
463
464 mutex_unlock(&overlay->mutex);
465
466 return 0;
467}
468
469int vmw_overlay_ioctl(struct drm_device *dev, void *data,
470 struct drm_file *file_priv)
471{
472 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
473 struct vmw_private *dev_priv = vmw_priv(dev);
474 struct vmw_overlay *overlay = dev_priv->overlay_priv;
475 struct drm_vmw_control_stream_arg *arg =
476 (struct drm_vmw_control_stream_arg *)data;
477 struct vmw_dma_buffer *buf;
478 struct vmw_resource *res;
479 int ret;
480
481 if (!overlay)
482 return -ENOSYS;
483
484 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
485 if (ret)
486 return ret;
487
488 mutex_lock(&overlay->mutex);
489
490 if (!arg->enabled) {
491 ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
492 goto out_unlock;
493 }
494
495 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
496 if (ret)
497 goto out_unlock;
498
499 ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
500
501 vmw_dmabuf_unreference(&buf);
502
503out_unlock:
504 mutex_unlock(&overlay->mutex);
505 vmw_resource_unreference(&res);
506
507 return ret;
508}
509
510int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
511{
512 if (!dev_priv->overlay_priv)
513 return 0;
514
515 return VMW_MAX_NUM_STREAMS;
516}
517
518int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
519{
520 struct vmw_overlay *overlay = dev_priv->overlay_priv;
521 int i, k;
522
523 if (!overlay)
524 return 0;
525
526 mutex_lock(&overlay->mutex);
527
528 for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
529 if (!overlay->stream[i].claimed)
530 k++;
531
532 mutex_unlock(&overlay->mutex);
533
534 return k;
535}
536
537int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
538{
539 struct vmw_overlay *overlay = dev_priv->overlay_priv;
540 int i;
541
542 if (!overlay)
543 return -ENOSYS;
544
545 mutex_lock(&overlay->mutex);
546
547 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
548
549 if (overlay->stream[i].claimed)
550 continue;
551
552 overlay->stream[i].claimed = true;
553 *out = i;
554 mutex_unlock(&overlay->mutex);
555 return 0;
556 }
557
558 mutex_unlock(&overlay->mutex);
559 return -ESRCH;
560}
561
562int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
563{
564 struct vmw_overlay *overlay = dev_priv->overlay_priv;
565
566 BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
567
568 if (!overlay)
569 return -ENOSYS;
570
571 mutex_lock(&overlay->mutex);
572
573 WARN_ON(!overlay->stream[stream_id].claimed);
574 vmw_overlay_stop(dev_priv, stream_id, false, false);
575 overlay->stream[stream_id].claimed = false;
576
577 mutex_unlock(&overlay->mutex);
578 return 0;
579}
580
581int vmw_overlay_init(struct vmw_private *dev_priv)
582{
583 struct vmw_overlay *overlay;
584 int i;
585
586 if (dev_priv->overlay_priv)
587 return -EINVAL;
588
589 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
590 (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
591 DRM_INFO("hardware doesn't support overlays\n");
592 return -ENOSYS;
593 }
594
595 overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
596 if (!overlay)
597 return -ENOMEM;
598
599 memset(overlay, 0, sizeof(*overlay));
600 mutex_init(&overlay->mutex);
601 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
602 overlay->stream[i].buf = NULL;
603 overlay->stream[i].paused = false;
604 overlay->stream[i].claimed = false;
605 }
606
607 dev_priv->overlay_priv = overlay;
608
609 return 0;
610}
611
612int vmw_overlay_close(struct vmw_private *dev_priv)
613{
614 struct vmw_overlay *overlay = dev_priv->overlay_priv;
615 bool forgotten_buffer = false;
616 int i;
617
618 if (!overlay)
619 return -ENOSYS;
620
621 for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
622 if (overlay->stream[i].buf) {
623 forgotten_buffer = true;
624 vmw_overlay_stop(dev_priv, i, false, false);
625 }
626 }
627
628 WARN_ON(forgotten_buffer);
629
630 dev_priv->overlay_priv = NULL;
631 kfree(overlay);
632
633 return 0;
634}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
new file mode 100644
index 000000000000..9d0dd3a342eb
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -0,0 +1,57 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28/**
29 * This file contains virtual hardware defines for kernel space.
30 */
31
32#ifndef _VMWGFX_REG_H_
33#define _VMWGFX_REG_H_
34
35#include <linux/types.h>
36
37#define VMWGFX_INDEX_PORT 0x0
38#define VMWGFX_VALUE_PORT 0x1
39#define VMWGFX_IRQSTATUS_PORT 0x8
40
41struct svga_guest_mem_descriptor {
42 __le32 ppn;
43 __le32 num_pages;
44};
45
46struct svga_fifo_cmd_fence {
47 __le32 fence;
48};
49
50#define SVGA_SYNC_GENERIC 1
51#define SVGA_SYNC_FIFOFULL 2
52
53#include "svga_types.h"
54
55#include "svga3d_reg.h"
56
57#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
new file mode 100644
index 000000000000..c012d5927f65
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -0,0 +1,1183 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30#include "ttm/ttm_object.h"
31#include "ttm/ttm_placement.h"
32#include "drmP.h"
33
34#define VMW_RES_CONTEXT ttm_driver_type0
35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2
37
38struct vmw_user_context {
39 struct ttm_base_object base;
40 struct vmw_resource res;
41};
42
43struct vmw_user_surface {
44 struct ttm_base_object base;
45 struct vmw_surface srf;
46};
47
48struct vmw_user_dma_buffer {
49 struct ttm_base_object base;
50 struct vmw_dma_buffer dma;
51};
52
53struct vmw_bo_user_rep {
54 uint32_t handle;
55 uint64_t map_handle;
56};
57
58struct vmw_stream {
59 struct vmw_resource res;
60 uint32_t stream_id;
61};
62
63struct vmw_user_stream {
64 struct ttm_base_object base;
65 struct vmw_stream stream;
66};
67
68static inline struct vmw_dma_buffer *
69vmw_dma_buffer(struct ttm_buffer_object *bo)
70{
71 return container_of(bo, struct vmw_dma_buffer, base);
72}
73
74static inline struct vmw_user_dma_buffer *
75vmw_user_dma_buffer(struct ttm_buffer_object *bo)
76{
77 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
79}
80
81struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82{
83 kref_get(&res->kref);
84 return res;
85}
86
87static void vmw_resource_release(struct kref *kref)
88{
89 struct vmw_resource *res =
90 container_of(kref, struct vmw_resource, kref);
91 struct vmw_private *dev_priv = res->dev_priv;
92
93 idr_remove(res->idr, res->id);
94 write_unlock(&dev_priv->resource_lock);
95
96 if (likely(res->hw_destroy != NULL))
97 res->hw_destroy(res);
98
99 if (res->res_free != NULL)
100 res->res_free(res);
101 else
102 kfree(res);
103
104 write_lock(&dev_priv->resource_lock);
105}
106
107void vmw_resource_unreference(struct vmw_resource **p_res)
108{
109 struct vmw_resource *res = *p_res;
110 struct vmw_private *dev_priv = res->dev_priv;
111
112 *p_res = NULL;
113 write_lock(&dev_priv->resource_lock);
114 kref_put(&res->kref, vmw_resource_release);
115 write_unlock(&dev_priv->resource_lock);
116}
117
118static int vmw_resource_init(struct vmw_private *dev_priv,
119 struct vmw_resource *res,
120 struct idr *idr,
121 enum ttm_object_type obj_type,
122 void (*res_free) (struct vmw_resource *res))
123{
124 int ret;
125
126 kref_init(&res->kref);
127 res->hw_destroy = NULL;
128 res->res_free = res_free;
129 res->res_type = obj_type;
130 res->idr = idr;
131 res->avail = false;
132 res->dev_priv = dev_priv;
133
134 do {
135 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136 return -ENOMEM;
137
138 write_lock(&dev_priv->resource_lock);
139 ret = idr_get_new_above(idr, res, 1, &res->id);
140 write_unlock(&dev_priv->resource_lock);
141
142 } while (ret == -EAGAIN);
143
144 return ret;
145}
146
147/**
148 * vmw_resource_activate
149 *
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
152 *
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
157 * find it.
158 */
159
160static void vmw_resource_activate(struct vmw_resource *res,
161 void (*hw_destroy) (struct vmw_resource *))
162{
163 struct vmw_private *dev_priv = res->dev_priv;
164
165 write_lock(&dev_priv->resource_lock);
166 res->avail = true;
167 res->hw_destroy = hw_destroy;
168 write_unlock(&dev_priv->resource_lock);
169}
170
171struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 struct idr *idr, int id)
173{
174 struct vmw_resource *res;
175
176 read_lock(&dev_priv->resource_lock);
177 res = idr_find(idr, id);
178 if (res && res->avail)
179 kref_get(&res->kref);
180 else
181 res = NULL;
182 read_unlock(&dev_priv->resource_lock);
183
184 if (unlikely(res == NULL))
185 return NULL;
186
187 return res;
188}
189
190/**
191 * Context management:
192 */
193
194static void vmw_hw_context_destroy(struct vmw_resource *res)
195{
196
197 struct vmw_private *dev_priv = res->dev_priv;
198 struct {
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDestroyContext body;
201 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
202
203 if (unlikely(cmd == NULL)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
205 "destruction.\n");
206 return;
207 }
208
209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 cmd->body.cid = cpu_to_le32(res->id);
212
213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214}
215
216static int vmw_context_init(struct vmw_private *dev_priv,
217 struct vmw_resource *res,
218 void (*res_free) (struct vmw_resource *res))
219{
220 int ret;
221
222 struct {
223 SVGA3dCmdHeader header;
224 SVGA3dCmdDefineContext body;
225 } *cmd;
226
227 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
228 VMW_RES_CONTEXT, res_free);
229
230 if (unlikely(ret != 0)) {
231 if (res_free == NULL)
232 kfree(res);
233 else
234 res_free(res);
235 return ret;
236 }
237
238 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
239 if (unlikely(cmd == NULL)) {
240 DRM_ERROR("Fifo reserve failed.\n");
241 vmw_resource_unreference(&res);
242 return -ENOMEM;
243 }
244
245 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
246 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
247 cmd->body.cid = cpu_to_le32(res->id);
248
249 vmw_fifo_commit(dev_priv, sizeof(*cmd));
250 vmw_resource_activate(res, vmw_hw_context_destroy);
251 return 0;
252}
253
254struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
255{
256 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
257 int ret;
258
259 if (unlikely(res == NULL))
260 return NULL;
261
262 ret = vmw_context_init(dev_priv, res, NULL);
263 return (ret == 0) ? res : NULL;
264}
265
266/**
267 * User-space context management:
268 */
269
270static void vmw_user_context_free(struct vmw_resource *res)
271{
272 struct vmw_user_context *ctx =
273 container_of(res, struct vmw_user_context, res);
274
275 kfree(ctx);
276}
277
278/**
279 * This function is called when user space has no more references on the
280 * base object. It releases the base-object's reference on the resource object.
281 */
282
283static void vmw_user_context_base_release(struct ttm_base_object **p_base)
284{
285 struct ttm_base_object *base = *p_base;
286 struct vmw_user_context *ctx =
287 container_of(base, struct vmw_user_context, base);
288 struct vmw_resource *res = &ctx->res;
289
290 *p_base = NULL;
291 vmw_resource_unreference(&res);
292}
293
294int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file_priv)
296{
297 struct vmw_private *dev_priv = vmw_priv(dev);
298 struct vmw_resource *res;
299 struct vmw_user_context *ctx;
300 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
301 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
302 int ret = 0;
303
304 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
305 if (unlikely(res == NULL))
306 return -EINVAL;
307
308 if (res->res_free != &vmw_user_context_free) {
309 ret = -EINVAL;
310 goto out;
311 }
312
313 ctx = container_of(res, struct vmw_user_context, res);
314 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
315 ret = -EPERM;
316 goto out;
317 }
318
319 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
320out:
321 vmw_resource_unreference(&res);
322 return ret;
323}
324
325int vmw_context_define_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv)
327{
328 struct vmw_private *dev_priv = vmw_priv(dev);
329 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
330 struct vmw_resource *res;
331 struct vmw_resource *tmp;
332 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
333 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
334 int ret;
335
336 if (unlikely(ctx == NULL))
337 return -ENOMEM;
338
339 res = &ctx->res;
340 ctx->base.shareable = false;
341 ctx->base.tfile = NULL;
342
343 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
344 if (unlikely(ret != 0))
345 return ret;
346
347 tmp = vmw_resource_reference(&ctx->res);
348 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
349 &vmw_user_context_base_release, NULL);
350
351 if (unlikely(ret != 0)) {
352 vmw_resource_unreference(&tmp);
353 goto out_err;
354 }
355
356 arg->cid = res->id;
357out_err:
358 vmw_resource_unreference(&res);
359 return ret;
360
361}
362
363int vmw_context_check(struct vmw_private *dev_priv,
364 struct ttm_object_file *tfile,
365 int id)
366{
367 struct vmw_resource *res;
368 int ret = 0;
369
370 read_lock(&dev_priv->resource_lock);
371 res = idr_find(&dev_priv->context_idr, id);
372 if (res && res->avail) {
373 struct vmw_user_context *ctx =
374 container_of(res, struct vmw_user_context, res);
375 if (ctx->base.tfile != tfile && !ctx->base.shareable)
376 ret = -EPERM;
377 } else
378 ret = -EINVAL;
379 read_unlock(&dev_priv->resource_lock);
380
381 return ret;
382}
383
384
385/**
386 * Surface management.
387 */
388
389static void vmw_hw_surface_destroy(struct vmw_resource *res)
390{
391
392 struct vmw_private *dev_priv = res->dev_priv;
393 struct {
394 SVGA3dCmdHeader header;
395 SVGA3dCmdDestroySurface body;
396 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
397
398 if (unlikely(cmd == NULL)) {
399 DRM_ERROR("Failed reserving FIFO space for surface "
400 "destruction.\n");
401 return;
402 }
403
404 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
405 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
406 cmd->body.sid = cpu_to_le32(res->id);
407
408 vmw_fifo_commit(dev_priv, sizeof(*cmd));
409}
410
411void vmw_surface_res_free(struct vmw_resource *res)
412{
413 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
414
415 kfree(srf->sizes);
416 kfree(srf->snooper.image);
417 kfree(srf);
418}
419
420int vmw_surface_init(struct vmw_private *dev_priv,
421 struct vmw_surface *srf,
422 void (*res_free) (struct vmw_resource *res))
423{
424 int ret;
425 struct {
426 SVGA3dCmdHeader header;
427 SVGA3dCmdDefineSurface body;
428 } *cmd;
429 SVGA3dSize *cmd_size;
430 struct vmw_resource *res = &srf->res;
431 struct drm_vmw_size *src_size;
432 size_t submit_size;
433 uint32_t cmd_len;
434 int i;
435
436 BUG_ON(res_free == NULL);
437 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
438 VMW_RES_SURFACE, res_free);
439
440 if (unlikely(ret != 0)) {
441 res_free(res);
442 return ret;
443 }
444
445 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
446 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
447
448 cmd = vmw_fifo_reserve(dev_priv, submit_size);
449 if (unlikely(cmd == NULL)) {
450 DRM_ERROR("Fifo reserve failed for create surface.\n");
451 vmw_resource_unreference(&res);
452 return -ENOMEM;
453 }
454
455 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
456 cmd->header.size = cpu_to_le32(cmd_len);
457 cmd->body.sid = cpu_to_le32(res->id);
458 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
459 cmd->body.format = cpu_to_le32(srf->format);
460 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
461 cmd->body.face[i].numMipLevels =
462 cpu_to_le32(srf->mip_levels[i]);
463 }
464
465 cmd += 1;
466 cmd_size = (SVGA3dSize *) cmd;
467 src_size = srf->sizes;
468
469 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
470 cmd_size->width = cpu_to_le32(src_size->width);
471 cmd_size->height = cpu_to_le32(src_size->height);
472 cmd_size->depth = cpu_to_le32(src_size->depth);
473 }
474
475 vmw_fifo_commit(dev_priv, submit_size);
476 vmw_resource_activate(res, vmw_hw_surface_destroy);
477 return 0;
478}
479
480static void vmw_user_surface_free(struct vmw_resource *res)
481{
482 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
483 struct vmw_user_surface *user_srf =
484 container_of(srf, struct vmw_user_surface, srf);
485
486 kfree(srf->sizes);
487 kfree(srf->snooper.image);
488 kfree(user_srf);
489}
490
491int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
492 struct ttm_object_file *tfile,
493 uint32_t handle, struct vmw_surface **out)
494{
495 struct vmw_resource *res;
496 struct vmw_surface *srf;
497 struct vmw_user_surface *user_srf;
498 struct ttm_base_object *base;
499 int ret = -EINVAL;
500
501 base = ttm_base_object_lookup(tfile, handle);
502 if (unlikely(base == NULL))
503 return -EINVAL;
504
505 if (unlikely(base->object_type != VMW_RES_SURFACE))
506 goto out_bad_resource;
507
508 user_srf = container_of(base, struct vmw_user_surface, base);
509 srf = &user_srf->srf;
510 res = &srf->res;
511
512 read_lock(&dev_priv->resource_lock);
513
514 if (!res->avail || res->res_free != &vmw_user_surface_free) {
515 read_unlock(&dev_priv->resource_lock);
516 goto out_bad_resource;
517 }
518
519 kref_get(&res->kref);
520 read_unlock(&dev_priv->resource_lock);
521
522 *out = srf;
523 ret = 0;
524
525out_bad_resource:
526 ttm_base_object_unref(&base);
527
528 return ret;
529}
530
531static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
532{
533 struct ttm_base_object *base = *p_base;
534 struct vmw_user_surface *user_srf =
535 container_of(base, struct vmw_user_surface, base);
536 struct vmw_resource *res = &user_srf->srf.res;
537
538 *p_base = NULL;
539 vmw_resource_unreference(&res);
540}
541
542int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
543 struct drm_file *file_priv)
544{
545 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
546 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
547
548 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
549}
550
551int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
552 struct drm_file *file_priv)
553{
554 struct vmw_private *dev_priv = vmw_priv(dev);
555 struct vmw_user_surface *user_srf =
556 kmalloc(sizeof(*user_srf), GFP_KERNEL);
557 struct vmw_surface *srf;
558 struct vmw_resource *res;
559 struct vmw_resource *tmp;
560 union drm_vmw_surface_create_arg *arg =
561 (union drm_vmw_surface_create_arg *)data;
562 struct drm_vmw_surface_create_req *req = &arg->req;
563 struct drm_vmw_surface_arg *rep = &arg->rep;
564 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
565 struct drm_vmw_size __user *user_sizes;
566 int ret;
567 int i;
568
569 if (unlikely(user_srf == NULL))
570 return -ENOMEM;
571
572 srf = &user_srf->srf;
573 res = &srf->res;
574
575 srf->flags = req->flags;
576 srf->format = req->format;
577 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
578 srf->num_sizes = 0;
579 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
580 srf->num_sizes += srf->mip_levels[i];
581
582 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
583 DRM_VMW_MAX_MIP_LEVELS) {
584 ret = -EINVAL;
585 goto out_err0;
586 }
587
588 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
589 if (unlikely(srf->sizes == NULL)) {
590 ret = -ENOMEM;
591 goto out_err0;
592 }
593
594 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
595 req->size_addr;
596
597 ret = copy_from_user(srf->sizes, user_sizes,
598 srf->num_sizes * sizeof(*srf->sizes));
599 if (unlikely(ret != 0))
600 goto out_err1;
601
602 user_srf->base.shareable = false;
603 user_srf->base.tfile = NULL;
604
605 /**
606 * From this point, the generic resource management functions
607 * destroy the object on failure.
608 */
609
610 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
611 if (unlikely(ret != 0))
612 return ret;
613
614 tmp = vmw_resource_reference(&srf->res);
615 ret = ttm_base_object_init(tfile, &user_srf->base,
616 req->shareable, VMW_RES_SURFACE,
617 &vmw_user_surface_base_release, NULL);
618
619 if (unlikely(ret != 0)) {
620 vmw_resource_unreference(&tmp);
621 vmw_resource_unreference(&res);
622 return ret;
623 }
624
625 if (srf->flags & (1 << 9) &&
626 srf->num_sizes == 1 &&
627 srf->sizes[0].width == 64 &&
628 srf->sizes[0].height == 64 &&
629 srf->format == SVGA3D_A8R8G8B8) {
630
631 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
632 /* clear the image */
633 if (srf->snooper.image)
634 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
635 else
636 DRM_ERROR("Failed to allocate cursor_image\n");
637
638 } else {
639 srf->snooper.image = NULL;
640 }
641 srf->snooper.crtc = NULL;
642
643 rep->sid = user_srf->base.hash.key;
644 if (rep->sid == SVGA3D_INVALID_ID)
645 DRM_ERROR("Created bad Surface ID.\n");
646
647 vmw_resource_unreference(&res);
648 return 0;
649out_err1:
650 kfree(srf->sizes);
651out_err0:
652 kfree(user_srf);
653 return ret;
654}
655
656int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
657 struct drm_file *file_priv)
658{
659 union drm_vmw_surface_reference_arg *arg =
660 (union drm_vmw_surface_reference_arg *)data;
661 struct drm_vmw_surface_arg *req = &arg->req;
662 struct drm_vmw_surface_create_req *rep = &arg->rep;
663 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
664 struct vmw_surface *srf;
665 struct vmw_user_surface *user_srf;
666 struct drm_vmw_size __user *user_sizes;
667 struct ttm_base_object *base;
668 int ret = -EINVAL;
669
670 base = ttm_base_object_lookup(tfile, req->sid);
671 if (unlikely(base == NULL)) {
672 DRM_ERROR("Could not find surface to reference.\n");
673 return -EINVAL;
674 }
675
676 if (unlikely(base->object_type != VMW_RES_SURFACE))
677 goto out_bad_resource;
678
679 user_srf = container_of(base, struct vmw_user_surface, base);
680 srf = &user_srf->srf;
681
682 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
683 if (unlikely(ret != 0)) {
684 DRM_ERROR("Could not add a reference to a surface.\n");
685 goto out_no_reference;
686 }
687
688 rep->flags = srf->flags;
689 rep->format = srf->format;
690 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
691 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
692 rep->size_addr;
693
694 if (user_sizes)
695 ret = copy_to_user(user_sizes, srf->sizes,
696 srf->num_sizes * sizeof(*srf->sizes));
697 if (unlikely(ret != 0))
698 DRM_ERROR("copy_to_user failed %p %u\n",
699 user_sizes, srf->num_sizes);
700out_bad_resource:
701out_no_reference:
702 ttm_base_object_unref(&base);
703
704 return ret;
705}
706
707int vmw_surface_check(struct vmw_private *dev_priv,
708 struct ttm_object_file *tfile,
709 uint32_t handle, int *id)
710{
711 struct ttm_base_object *base;
712 struct vmw_user_surface *user_srf;
713
714 int ret = -EPERM;
715
716 base = ttm_base_object_lookup(tfile, handle);
717 if (unlikely(base == NULL))
718 return -EINVAL;
719
720 if (unlikely(base->object_type != VMW_RES_SURFACE))
721 goto out_bad_surface;
722
723 user_srf = container_of(base, struct vmw_user_surface, base);
724 *id = user_srf->srf.res.id;
725 ret = 0;
726
727out_bad_surface:
728 /**
729 * FIXME: May deadlock here when called from the
730 * command parsing code.
731 */
732
733 ttm_base_object_unref(&base);
734 return ret;
735}
736
737/**
738 * Buffer management.
739 */
740
741static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
742 unsigned long num_pages)
743{
744 static size_t bo_user_size = ~0;
745
746 size_t page_array_size =
747 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
748
749 if (unlikely(bo_user_size == ~0)) {
750 bo_user_size = glob->ttm_bo_extra_size +
751 ttm_round_pot(sizeof(struct vmw_dma_buffer));
752 }
753
754 return bo_user_size + page_array_size;
755}
756
757void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
758{
759 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
760 struct ttm_bo_global *glob = bo->glob;
761 struct vmw_private *dev_priv =
762 container_of(bo->bdev, struct vmw_private, bdev);
763
764 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
765 if (vmw_bo->gmr_bound) {
766 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
767 spin_lock(&glob->lru_lock);
768 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
769 spin_unlock(&glob->lru_lock);
770 }
771 kfree(vmw_bo);
772}
773
774int vmw_dmabuf_init(struct vmw_private *dev_priv,
775 struct vmw_dma_buffer *vmw_bo,
776 size_t size, struct ttm_placement *placement,
777 bool interruptible,
778 void (*bo_free) (struct ttm_buffer_object *bo))
779{
780 struct ttm_bo_device *bdev = &dev_priv->bdev;
781 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
782 size_t acc_size;
783 int ret;
784
785 BUG_ON(!bo_free);
786
787 acc_size =
788 vmw_dmabuf_acc_size(bdev->glob,
789 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
790
791 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
792 if (unlikely(ret != 0)) {
793 /* we must free the bo here as
794 * ttm_buffer_object_init does so as well */
795 bo_free(&vmw_bo->base);
796 return ret;
797 }
798
799 memset(vmw_bo, 0, sizeof(*vmw_bo));
800
801 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
802 INIT_LIST_HEAD(&vmw_bo->validate_list);
803 vmw_bo->gmr_id = 0;
804 vmw_bo->gmr_bound = false;
805
806 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
807 ttm_bo_type_device, placement,
808 0, 0, interruptible,
809 NULL, acc_size, bo_free);
810 return ret;
811}
812
813static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
814{
815 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
816 struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
817 struct ttm_bo_global *glob = bo->glob;
818 struct vmw_private *dev_priv =
819 container_of(bo->bdev, struct vmw_private, bdev);
820
821 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
822 if (vmw_bo->gmr_bound) {
823 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
824 spin_lock(&glob->lru_lock);
825 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
826 spin_unlock(&glob->lru_lock);
827 }
828 kfree(vmw_user_bo);
829}
830
831static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
832{
833 struct vmw_user_dma_buffer *vmw_user_bo;
834 struct ttm_base_object *base = *p_base;
835 struct ttm_buffer_object *bo;
836
837 *p_base = NULL;
838
839 if (unlikely(base == NULL))
840 return;
841
842 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
843 bo = &vmw_user_bo->dma.base;
844 ttm_bo_unref(&bo);
845}
846
847int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
848 struct drm_file *file_priv)
849{
850 struct vmw_private *dev_priv = vmw_priv(dev);
851 union drm_vmw_alloc_dmabuf_arg *arg =
852 (union drm_vmw_alloc_dmabuf_arg *)data;
853 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
854 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
855 struct vmw_user_dma_buffer *vmw_user_bo;
856 struct ttm_buffer_object *tmp;
857 struct vmw_master *vmaster = vmw_master(file_priv->master);
858 int ret;
859
860 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
861 if (unlikely(vmw_user_bo == NULL))
862 return -ENOMEM;
863
864 ret = ttm_read_lock(&vmaster->lock, true);
865 if (unlikely(ret != 0)) {
866 kfree(vmw_user_bo);
867 return ret;
868 }
869
870 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
871 &vmw_vram_placement, true,
872 &vmw_user_dmabuf_destroy);
873 if (unlikely(ret != 0))
874 return ret;
875
876 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
877 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
878 &vmw_user_bo->base,
879 false,
880 ttm_buffer_type,
881 &vmw_user_dmabuf_release, NULL);
882 if (unlikely(ret != 0)) {
883 ttm_bo_unref(&tmp);
884 } else {
885 rep->handle = vmw_user_bo->base.hash.key;
886 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
887 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
888 rep->cur_gmr_offset = 0;
889 }
890 ttm_bo_unref(&tmp);
891
892 ttm_read_unlock(&vmaster->lock);
893
894 return 0;
895}
896
897int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
898 struct drm_file *file_priv)
899{
900 struct drm_vmw_unref_dmabuf_arg *arg =
901 (struct drm_vmw_unref_dmabuf_arg *)data;
902
903 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
904 arg->handle,
905 TTM_REF_USAGE);
906}
907
908uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
909 uint32_t cur_validate_node)
910{
911 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
912
913 if (likely(vmw_bo->on_validate_list))
914 return vmw_bo->cur_validate_node;
915
916 vmw_bo->cur_validate_node = cur_validate_node;
917 vmw_bo->on_validate_list = true;
918
919 return cur_validate_node;
920}
921
922void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
923{
924 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
925
926 vmw_bo->on_validate_list = false;
927}
928
929uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
930{
931 struct vmw_dma_buffer *vmw_bo;
932
933 if (bo->mem.mem_type == TTM_PL_VRAM)
934 return SVGA_GMR_FRAMEBUFFER;
935
936 vmw_bo = vmw_dma_buffer(bo);
937
938 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
939}
940
941void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
942{
943 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
944 vmw_bo->gmr_bound = true;
945 vmw_bo->gmr_id = id;
946}
947
948int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
949 uint32_t handle, struct vmw_dma_buffer **out)
950{
951 struct vmw_user_dma_buffer *vmw_user_bo;
952 struct ttm_base_object *base;
953
954 base = ttm_base_object_lookup(tfile, handle);
955 if (unlikely(base == NULL)) {
956 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
957 (unsigned long)handle);
958 return -ESRCH;
959 }
960
961 if (unlikely(base->object_type != ttm_buffer_type)) {
962 ttm_base_object_unref(&base);
963 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
964 (unsigned long)handle);
965 return -EINVAL;
966 }
967
968 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
969 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
970 ttm_base_object_unref(&base);
971 *out = &vmw_user_bo->dma;
972
973 return 0;
974}
975
976/**
977 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
978 * when we're out of ids, causing GMR space to be allocated
979 * out of VRAM.
980 */
981
982int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
983{
984 struct ttm_bo_global *glob = dev_priv->bdev.glob;
985 int id;
986 int ret;
987
988 do {
989 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
990 return -ENOMEM;
991
992 spin_lock(&glob->lru_lock);
993 ret = ida_get_new(&dev_priv->gmr_ida, &id);
994 spin_unlock(&glob->lru_lock);
995 } while (ret == -EAGAIN);
996
997 if (unlikely(ret != 0))
998 return ret;
999
1000 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1001 spin_lock(&glob->lru_lock);
1002 ida_remove(&dev_priv->gmr_ida, id);
1003 spin_unlock(&glob->lru_lock);
1004 return -EBUSY;
1005 }
1006
1007 *p_id = (uint32_t) id;
1008 return 0;
1009}
1010
1011/*
1012 * Stream managment
1013 */
1014
1015static void vmw_stream_destroy(struct vmw_resource *res)
1016{
1017 struct vmw_private *dev_priv = res->dev_priv;
1018 struct vmw_stream *stream;
1019 int ret;
1020
1021 DRM_INFO("%s: unref\n", __func__);
1022 stream = container_of(res, struct vmw_stream, res);
1023
1024 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1025 WARN_ON(ret != 0);
1026}
1027
1028static int vmw_stream_init(struct vmw_private *dev_priv,
1029 struct vmw_stream *stream,
1030 void (*res_free) (struct vmw_resource *res))
1031{
1032 struct vmw_resource *res = &stream->res;
1033 int ret;
1034
1035 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1036 VMW_RES_STREAM, res_free);
1037
1038 if (unlikely(ret != 0)) {
1039 if (res_free == NULL)
1040 kfree(stream);
1041 else
1042 res_free(&stream->res);
1043 return ret;
1044 }
1045
1046 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1047 if (ret) {
1048 vmw_resource_unreference(&res);
1049 return ret;
1050 }
1051
1052 DRM_INFO("%s: claimed\n", __func__);
1053
1054 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1055 return 0;
1056}
1057
1058/**
1059 * User-space context management:
1060 */
1061
1062static void vmw_user_stream_free(struct vmw_resource *res)
1063{
1064 struct vmw_user_stream *stream =
1065 container_of(res, struct vmw_user_stream, stream.res);
1066
1067 kfree(stream);
1068}
1069
1070/**
1071 * This function is called when user space has no more references on the
1072 * base object. It releases the base-object's reference on the resource object.
1073 */
1074
1075static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1076{
1077 struct ttm_base_object *base = *p_base;
1078 struct vmw_user_stream *stream =
1079 container_of(base, struct vmw_user_stream, base);
1080 struct vmw_resource *res = &stream->stream.res;
1081
1082 *p_base = NULL;
1083 vmw_resource_unreference(&res);
1084}
1085
1086int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1087 struct drm_file *file_priv)
1088{
1089 struct vmw_private *dev_priv = vmw_priv(dev);
1090 struct vmw_resource *res;
1091 struct vmw_user_stream *stream;
1092 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1093 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1094 int ret = 0;
1095
1096 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1097 if (unlikely(res == NULL))
1098 return -EINVAL;
1099
1100 if (res->res_free != &vmw_user_stream_free) {
1101 ret = -EINVAL;
1102 goto out;
1103 }
1104
1105 stream = container_of(res, struct vmw_user_stream, stream.res);
1106 if (stream->base.tfile != tfile) {
1107 ret = -EINVAL;
1108 goto out;
1109 }
1110
1111 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1112out:
1113 vmw_resource_unreference(&res);
1114 return ret;
1115}
1116
1117int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1118 struct drm_file *file_priv)
1119{
1120 struct vmw_private *dev_priv = vmw_priv(dev);
1121 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1122 struct vmw_resource *res;
1123 struct vmw_resource *tmp;
1124 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1125 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1126 int ret;
1127
1128 if (unlikely(stream == NULL))
1129 return -ENOMEM;
1130
1131 res = &stream->stream.res;
1132 stream->base.shareable = false;
1133 stream->base.tfile = NULL;
1134
1135 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1136 if (unlikely(ret != 0))
1137 return ret;
1138
1139 tmp = vmw_resource_reference(res);
1140 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1141 &vmw_user_stream_base_release, NULL);
1142
1143 if (unlikely(ret != 0)) {
1144 vmw_resource_unreference(&tmp);
1145 goto out_err;
1146 }
1147
1148 arg->stream_id = res->id;
1149out_err:
1150 vmw_resource_unreference(&res);
1151 return ret;
1152}
1153
1154int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1155 struct ttm_object_file *tfile,
1156 uint32_t *inout_id, struct vmw_resource **out)
1157{
1158 struct vmw_user_stream *stream;
1159 struct vmw_resource *res;
1160 int ret;
1161
1162 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1163 if (unlikely(res == NULL))
1164 return -EINVAL;
1165
1166 if (res->res_free != &vmw_user_stream_free) {
1167 ret = -EINVAL;
1168 goto err_ref;
1169 }
1170
1171 stream = container_of(res, struct vmw_user_stream, stream.res);
1172 if (stream->base.tfile != tfile) {
1173 ret = -EPERM;
1174 goto err_ref;
1175 }
1176
1177 *inout_id = stream->stream.stream_id;
1178 *out = res;
1179 return 0;
1180err_ref:
1181 vmw_resource_unreference(&res);
1182 return ret;
1183}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
new file mode 100644
index 000000000000..e3df4adfb4d8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -0,0 +1,99 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30
31int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
32{
33 struct drm_file *file_priv;
34 struct vmw_private *dev_priv;
35
36 if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
37 if (vmw_fifo_mmap(filp, vma) == 0)
38 return 0;
39 return drm_mmap(filp, vma);
40 }
41
42 file_priv = (struct drm_file *)filp->private_data;
43 dev_priv = vmw_priv(file_priv->minor->dev);
44 return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
45}
46
47static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
48{
49 DRM_INFO("global init.\n");
50 return ttm_mem_global_init(ref->object);
51}
52
53static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
54{
55 ttm_mem_global_release(ref->object);
56}
57
58int vmw_ttm_global_init(struct vmw_private *dev_priv)
59{
60 struct ttm_global_reference *global_ref;
61 int ret;
62
63 global_ref = &dev_priv->mem_global_ref;
64 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
65 global_ref->size = sizeof(struct ttm_mem_global);
66 global_ref->init = &vmw_ttm_mem_global_init;
67 global_ref->release = &vmw_ttm_mem_global_release;
68
69 ret = ttm_global_item_ref(global_ref);
70 if (unlikely(ret != 0)) {
71 DRM_ERROR("Failed setting up TTM memory accounting.\n");
72 return ret;
73 }
74
75 dev_priv->bo_global_ref.mem_glob =
76 dev_priv->mem_global_ref.object;
77 global_ref = &dev_priv->bo_global_ref.ref;
78 global_ref->global_type = TTM_GLOBAL_TTM_BO;
79 global_ref->size = sizeof(struct ttm_bo_global);
80 global_ref->init = &ttm_bo_global_init;
81 global_ref->release = &ttm_bo_global_release;
82 ret = ttm_global_item_ref(global_ref);
83
84 if (unlikely(ret != 0)) {
85 DRM_ERROR("Failed setting up TTM buffer objects.\n");
86 goto out_no_bo;
87 }
88
89 return 0;
90out_no_bo:
91 ttm_global_item_unref(&dev_priv->mem_global_ref);
92 return ret;
93}
94
95void vmw_ttm_global_release(struct vmw_private *dev_priv)
96{
97 ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
98 ttm_global_item_unref(&dev_priv->mem_global_ref);
99}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index da74e216b71d..9533ab60cdb5 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -101,6 +101,8 @@ source "drivers/staging/p9auth/Kconfig"
101 101
102source "drivers/staging/line6/Kconfig" 102source "drivers/staging/line6/Kconfig"
103 103
104source "drivers/gpu/drm/vmwgfx/Kconfig"
105
104source "drivers/gpu/drm/radeon/Kconfig" 106source "drivers/gpu/drm/radeon/Kconfig"
105 107
106source "drivers/gpu/drm/nouveau/Kconfig" 108source "drivers/gpu/drm/nouveau/Kconfig"
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index cfa6af43c9ea..bd3a1c2fbdb4 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -7,5 +7,6 @@ unifdef-y += r128_drm.h
7unifdef-y += radeon_drm.h 7unifdef-y += radeon_drm.h
8unifdef-y += sis_drm.h 8unifdef-y += sis_drm.h
9unifdef-y += savage_drm.h 9unifdef-y += savage_drm.h
10unifdef-y += vmwgfx_drm.h
10unifdef-y += via_drm.h 11unifdef-y += via_drm.h
11unifdef-y += nouveau_drm.h 12unifdef-y += nouveau_drm.h
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 19ef8ebdc662..71dafb69cfeb 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -296,6 +296,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
296#define DRM_MASTER 0x2 296#define DRM_MASTER 0x2
297#define DRM_ROOT_ONLY 0x4 297#define DRM_ROOT_ONLY 0x4
298#define DRM_CONTROL_ALLOW 0x8 298#define DRM_CONTROL_ALLOW 0x8
299#define DRM_UNLOCKED 0x10
299 300
300struct drm_ioctl_desc { 301struct drm_ioctl_desc {
301 unsigned int cmd; 302 unsigned int cmd;
@@ -1128,8 +1129,8 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
1128 /* Driver support (drm_drv.h) */ 1129 /* Driver support (drm_drv.h) */
1129extern int drm_init(struct drm_driver *driver); 1130extern int drm_init(struct drm_driver *driver);
1130extern void drm_exit(struct drm_driver *driver); 1131extern void drm_exit(struct drm_driver *driver);
1131extern int drm_ioctl(struct inode *inode, struct file *filp, 1132extern long drm_ioctl(struct file *filp,
1132 unsigned int cmd, unsigned long arg); 1133 unsigned int cmd, unsigned long arg);
1133extern long drm_compat_ioctl(struct file *filp, 1134extern long drm_compat_ioctl(struct file *filp,
1134 unsigned int cmd, unsigned long arg); 1135 unsigned int cmd, unsigned long arg);
1135extern int drm_lastclose(struct drm_device *dev); 1136extern int drm_lastclose(struct drm_device *dev);
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index 703ca4db0a29..0d9db099978b 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -77,7 +77,11 @@ enum ttm_object_type {
77 ttm_buffer_type, 77 ttm_buffer_type,
78 ttm_lock_type, 78 ttm_lock_type,
79 ttm_driver_type0 = 256, 79 ttm_driver_type0 = 256,
80 ttm_driver_type1 80 ttm_driver_type1,
81 ttm_driver_type2,
82 ttm_driver_type3,
83 ttm_driver_type4,
84 ttm_driver_type5
81}; 85};
82 86
83struct ttm_object_file; 87struct ttm_object_file;
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
new file mode 100644
index 000000000000..2be7e1249b6f
--- /dev/null
+++ b/include/drm/vmwgfx_drm.h
@@ -0,0 +1,574 @@
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef __VMWGFX_DRM_H__
29#define __VMWGFX_DRM_H__
30
31#define DRM_VMW_MAX_SURFACE_FACES 6
32#define DRM_VMW_MAX_MIP_LEVELS 24
33
34#define DRM_VMW_EXT_NAME_LEN 128
35
36#define DRM_VMW_GET_PARAM 0
37#define DRM_VMW_ALLOC_DMABUF 1
38#define DRM_VMW_UNREF_DMABUF 2
39#define DRM_VMW_CURSOR_BYPASS 3
40/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
41#define DRM_VMW_CONTROL_STREAM 4
42#define DRM_VMW_CLAIM_STREAM 5
43#define DRM_VMW_UNREF_STREAM 6
44/* guarded by DRM_VMW_PARAM_3D == 1 */
45#define DRM_VMW_CREATE_CONTEXT 7
46#define DRM_VMW_UNREF_CONTEXT 8
47#define DRM_VMW_CREATE_SURFACE 9
48#define DRM_VMW_UNREF_SURFACE 10
49#define DRM_VMW_REF_SURFACE 11
50#define DRM_VMW_EXECBUF 12
51#define DRM_VMW_FIFO_DEBUG 13
52#define DRM_VMW_FENCE_WAIT 14
53
54
55/*************************************************************************/
56/**
57 * DRM_VMW_GET_PARAM - get device information.
58 *
59 * DRM_VMW_PARAM_FIFO_OFFSET:
60 * Offset to use to map the first page of the FIFO read-only.
61 * The fifo is mapped using the mmap() system call on the drm device.
62 *
63 * DRM_VMW_PARAM_OVERLAY_IOCTL:
64 * Does the driver support the overlay ioctl.
65 */
66
67#define DRM_VMW_PARAM_NUM_STREAMS 0
68#define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
69#define DRM_VMW_PARAM_3D 2
70#define DRM_VMW_PARAM_FIFO_OFFSET 3
71
72
73/**
74 * struct drm_vmw_getparam_arg
75 *
76 * @value: Returned value. //Out
77 * @param: Parameter to query. //In.
78 *
79 * Argument to the DRM_VMW_GET_PARAM Ioctl.
80 */
81
82struct drm_vmw_getparam_arg {
83 uint64_t value;
84 uint32_t param;
85 uint32_t pad64;
86};
87
88/*************************************************************************/
89/**
90 * DRM_VMW_EXTENSION - Query device extensions.
91 */
92
93/**
94 * struct drm_vmw_extension_rep
95 *
96 * @exists: The queried extension exists.
97 * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension.
98 * @driver_sarea_offset: Offset to any space in the DRI SAREA
99 * used by the extension.
100 * @major: Major version number of the extension.
101 * @minor: Minor version number of the extension.
102 * @pl: Patch level version number of the extension.
103 *
104 * Output argument to the DRM_VMW_EXTENSION Ioctl.
105 */
106
107struct drm_vmw_extension_rep {
108 int32_t exists;
109 uint32_t driver_ioctl_offset;
110 uint32_t driver_sarea_offset;
111 uint32_t major;
112 uint32_t minor;
113 uint32_t pl;
114 uint32_t pad64;
115};
116
117/**
118 * union drm_vmw_extension_arg
119 *
120 * @extension - Ascii name of the extension to be queried. //In
121 * @rep - Reply as defined above. //Out
122 *
123 * Argument to the DRM_VMW_EXTENSION Ioctl.
124 */
125
126union drm_vmw_extension_arg {
127 char extension[DRM_VMW_EXT_NAME_LEN];
128 struct drm_vmw_extension_rep rep;
129};
130
131/*************************************************************************/
132/**
133 * DRM_VMW_CREATE_CONTEXT - Create a host context.
134 *
135 * Allocates a device unique context id, and queues a create context command
136 * for the host. Does not wait for host completion.
137 */
138
139/**
140 * struct drm_vmw_context_arg
141 *
142 * @cid: Device unique context ID.
143 *
144 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
145 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
146 */
147
148struct drm_vmw_context_arg {
149 int32_t cid;
150 uint32_t pad64;
151};
152
153/*************************************************************************/
154/**
155 * DRM_VMW_UNREF_CONTEXT - Create a host context.
156 *
157 * Frees a global context id, and queues a destroy host command for the host.
158 * Does not wait for host completion. The context ID can be used directly
159 * in the command stream and shows up as the same context ID on the host.
160 */
161
162/*************************************************************************/
163/**
164 * DRM_VMW_CREATE_SURFACE - Create a host suface.
165 *
166 * Allocates a device unique surface id, and queues a create surface command
167 * for the host. Does not wait for host completion. The surface ID can be
168 * used directly in the command stream and shows up as the same surface
169 * ID on the host.
170 */
171
172/**
173 * struct drm_wmv_surface_create_req
174 *
175 * @flags: Surface flags as understood by the host.
176 * @format: Surface format as understood by the host.
177 * @mip_levels: Number of mip levels for each face.
178 * An unused face should have 0 encoded.
179 * @size_addr: Address of a user-space array of sruct drm_vmw_size
180 * cast to an uint64_t for 32-64 bit compatibility.
181 * The size of the array should equal the total number of mipmap levels.
182 * @shareable: Boolean whether other clients (as identified by file descriptors)
183 * may reference this surface.
184 *
185 * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
186 * Output data from the DRM_VMW_REF_SURFACE Ioctl.
187 */
188
189struct drm_vmw_surface_create_req {
190 uint32_t flags;
191 uint32_t format;
192 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
193 uint64_t size_addr;
194 int32_t shareable;
195 uint32_t pad64;
196};
197
198/**
199 * struct drm_wmv_surface_arg
200 *
201 * @sid: Surface id of created surface or surface to destroy or reference.
202 *
203 * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
204 * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
205 * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
206 */
207
208struct drm_vmw_surface_arg {
209 int32_t sid;
210 uint32_t pad64;
211};
212
213/**
214 * struct drm_vmw_size ioctl.
215 *
216 * @width - mip level width
217 * @height - mip level height
218 * @depth - mip level depth
219 *
220 * Description of a mip level.
221 * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
222 */
223
224struct drm_vmw_size {
225 uint32_t width;
226 uint32_t height;
227 uint32_t depth;
228 uint32_t pad64;
229};
230
231/**
232 * union drm_vmw_surface_create_arg
233 *
234 * @rep: Output data as described above.
235 * @req: Input data as described above.
236 *
237 * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
238 */
239
240union drm_vmw_surface_create_arg {
241 struct drm_vmw_surface_arg rep;
242 struct drm_vmw_surface_create_req req;
243};
244
245/*************************************************************************/
246/**
247 * DRM_VMW_REF_SURFACE - Reference a host surface.
248 *
249 * Puts a reference on a host surface with a give sid, as previously
250 * returned by the DRM_VMW_CREATE_SURFACE ioctl.
251 * A reference will make sure the surface isn't destroyed while we hold
252 * it and will allow the calling client to use the surface ID in the command
253 * stream.
254 *
255 * On successful return, the Ioctl returns the surface information given
256 * in the DRM_VMW_CREATE_SURFACE ioctl.
257 */
258
259/**
260 * union drm_vmw_surface_reference_arg
261 *
262 * @rep: Output data as described above.
263 * @req: Input data as described above.
264 *
265 * Argument to the DRM_VMW_REF_SURFACE Ioctl.
266 */
267
268union drm_vmw_surface_reference_arg {
269 struct drm_vmw_surface_create_req rep;
270 struct drm_vmw_surface_arg req;
271};
272
273/*************************************************************************/
274/**
275 * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
276 *
277 * Clear a reference previously put on a host surface.
278 * When all references are gone, including the one implicitly placed
279 * on creation,
280 * a destroy surface command will be queued for the host.
281 * Does not wait for completion.
282 */
283
284/*************************************************************************/
285/**
286 * DRM_VMW_EXECBUF
287 *
288 * Submit a command buffer for execution on the host, and return a
289 * fence sequence that when signaled, indicates that the command buffer has
290 * executed.
291 */
292
293/**
294 * struct drm_vmw_execbuf_arg
295 *
296 * @commands: User-space address of a command buffer cast to an uint64_t.
297 * @command-size: Size in bytes of the command buffer.
298 * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
299 * uint64_t.
300 *
301 * Argument to the DRM_VMW_EXECBUF Ioctl.
302 */
303
304struct drm_vmw_execbuf_arg {
305 uint64_t commands;
306 uint32_t command_size;
307 uint32_t pad64;
308 uint64_t fence_rep;
309};
310
311/**
312 * struct drm_vmw_fence_rep
313 *
314 * @fence_seq: Fence sequence associated with a command submission.
315 * @error: This member should've been set to -EFAULT on submission.
316 * The following actions should be take on completion:
317 * error == -EFAULT: Fence communication failed. The host is synchronized.
318 * Use the last fence id read from the FIFO fence register.
319 * error != 0 && error != -EFAULT:
320 * Fence submission failed. The host is synchronized. Use the fence_seq member.
321 * error == 0: All is OK, The host may not be synchronized.
322 * Use the fence_seq member.
323 *
324 * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
325 */
326
327struct drm_vmw_fence_rep {
328 uint64_t fence_seq;
329 int32_t error;
330 uint32_t pad64;
331};
332
333/*************************************************************************/
334/**
335 * DRM_VMW_ALLOC_DMABUF
336 *
337 * Allocate a DMA buffer that is visible also to the host.
338 * NOTE: The buffer is
339 * identified by a handle and an offset, which are private to the guest, but
340 * useable in the command stream. The guest kernel may translate these
341 * and patch up the command stream accordingly. In the future, the offset may
342 * be zero at all times, or it may disappear from the interface before it is
343 * fixed.
344 *
345 * The DMA buffer may stay user-space mapped in the guest at all times,
346 * and is thus suitable for sub-allocation.
347 *
348 * DMA buffers are mapped using the mmap() syscall on the drm device.
349 */
350
351/**
352 * struct drm_vmw_alloc_dmabuf_req
353 *
354 * @size: Required minimum size of the buffer.
355 *
356 * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
357 */
358
359struct drm_vmw_alloc_dmabuf_req {
360 uint32_t size;
361 uint32_t pad64;
362};
363
364/**
365 * struct drm_vmw_dmabuf_rep
366 *
367 * @map_handle: Offset to use in the mmap() call used to map the buffer.
368 * @handle: Handle unique to this buffer. Used for unreferencing.
369 * @cur_gmr_id: GMR id to use in the command stream when this buffer is
370 * referenced. See not above.
371 * @cur_gmr_offset: Offset to use in the command stream when this buffer is
372 * referenced. See note above.
373 *
374 * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
375 */
376
377struct drm_vmw_dmabuf_rep {
378 uint64_t map_handle;
379 uint32_t handle;
380 uint32_t cur_gmr_id;
381 uint32_t cur_gmr_offset;
382 uint32_t pad64;
383};
384
385/**
386 * union drm_vmw_dmabuf_arg
387 *
388 * @req: Input data as described above.
389 * @rep: Output data as described above.
390 *
391 * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
392 */
393
394union drm_vmw_alloc_dmabuf_arg {
395 struct drm_vmw_alloc_dmabuf_req req;
396 struct drm_vmw_dmabuf_rep rep;
397};
398
399/*************************************************************************/
400/**
401 * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
402 *
403 */
404
405/**
406 * struct drm_vmw_unref_dmabuf_arg
407 *
408 * @handle: Handle indicating what buffer to free. Obtained from the
409 * DRM_VMW_ALLOC_DMABUF Ioctl.
410 *
411 * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
412 */
413
414struct drm_vmw_unref_dmabuf_arg {
415 uint32_t handle;
416 uint32_t pad64;
417};
418
419/*************************************************************************/
420/**
421 * DRM_VMW_FIFO_DEBUG - Get last FIFO submission.
422 *
423 * This IOCTL copies the last FIFO submission directly out of the FIFO buffer.
424 */
425
426/**
427 * struct drm_vmw_fifo_debug_arg
428 *
429 * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In
430 * @debug_buffer_size: Size in bytes of debug buffer //In
431 * @used_size: Number of bytes copied to the buffer // Out
432 * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out
433 *
434 * Argument to the DRM_VMW_FIFO_DEBUG Ioctl.
435 */
436
437struct drm_vmw_fifo_debug_arg {
438 uint64_t debug_buffer;
439 uint32_t debug_buffer_size;
440 uint32_t used_size;
441 int32_t did_not_fit;
442 uint32_t pad64;
443};
444
445struct drm_vmw_fence_wait_arg {
446 uint64_t sequence;
447 uint64_t kernel_cookie;
448 int32_t cookie_valid;
449 int32_t pad64;
450};
451
452/*************************************************************************/
453/**
454 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
455 *
456 * This IOCTL controls the overlay units of the svga device.
457 * The SVGA overlay units does not work like regular hardware units in
458 * that they do not automaticaly read back the contents of the given dma
459 * buffer. But instead only read back for each call to this ioctl, and
460 * at any point between this call being made and a following call that
461 * either changes the buffer or disables the stream.
462 */
463
464/**
465 * struct drm_vmw_rect
466 *
467 * Defines a rectangle. Used in the overlay ioctl to define
468 * source and destination rectangle.
469 */
470
471struct drm_vmw_rect {
472 int32_t x;
473 int32_t y;
474 uint32_t w;
475 uint32_t h;
476};
477
478/**
479 * struct drm_vmw_control_stream_arg
480 *
481 * @stream_id: Stearm to control
482 * @enabled: If false all following arguments are ignored.
483 * @handle: Handle to buffer for getting data from.
484 * @format: Format of the overlay as understood by the host.
485 * @width: Width of the overlay.
486 * @height: Height of the overlay.
487 * @size: Size of the overlay in bytes.
488 * @pitch: Array of pitches, the two last are only used for YUV12 formats.
489 * @offset: Offset from start of dma buffer to overlay.
490 * @src: Source rect, must be within the defined area above.
491 * @dst: Destination rect, x and y may be negative.
492 *
493 * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
494 */
495
496struct drm_vmw_control_stream_arg {
497 uint32_t stream_id;
498 uint32_t enabled;
499
500 uint32_t flags;
501 uint32_t color_key;
502
503 uint32_t handle;
504 uint32_t offset;
505 int32_t format;
506 uint32_t size;
507 uint32_t width;
508 uint32_t height;
509 uint32_t pitch[3];
510
511 uint32_t pad64;
512 struct drm_vmw_rect src;
513 struct drm_vmw_rect dst;
514};
515
516/*************************************************************************/
517/**
518 * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
519 *
520 */
521
522#define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0)
523#define DRM_VMW_CURSOR_BYPASS_FLAGS (1)
524
525/**
526 * struct drm_vmw_cursor_bypass_arg
527 *
528 * @flags: Flags.
529 * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
530 * @xpos: X position of cursor.
531 * @ypos: Y position of cursor.
532 * @xhot: X hotspot.
533 * @yhot: Y hotspot.
534 *
535 * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
536 */
537
538struct drm_vmw_cursor_bypass_arg {
539 uint32_t flags;
540 uint32_t crtc_id;
541 int32_t xpos;
542 int32_t ypos;
543 int32_t xhot;
544 int32_t yhot;
545};
546
547/*************************************************************************/
548/**
549 * DRM_VMW_CLAIM_STREAM - Claim a single stream.
550 */
551
552/**
553 * struct drm_vmw_context_arg
554 *
555 * @stream_id: Device unique context ID.
556 *
557 * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
558 * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
559 */
560
561struct drm_vmw_stream_arg {
562 uint32_t stream_id;
563 uint32_t pad64;
564};
565
566/*************************************************************************/
567/**
568 * DRM_VMW_UNREF_STREAM - Unclaim a stream.
569 *
570 * Return a single stream that was claimed by this process. Also makes
571 * sure that the stream has been stopped.
572 */
573
574#endif